hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4e260459749ff763cc29289bb61a3da0e6e6901 | 7,434 | py | Python | search_domain_list2csv.py | op7ic/investigatehunt | 79c3d4827bc2af372548f5f97ae356c905ce6dad | [
"MIT"
] | 6 | 2020-07-02T07:59:29.000Z | 2021-09-07T17:14:22.000Z | search_domain_list2csv.py | op7ic/investigatehunt | 79c3d4827bc2af372548f5f97ae356c905ce6dad | [
"MIT"
] | null | null | null | search_domain_list2csv.py | op7ic/investigatehunt | 79c3d4827bc2af372548f5f97ae356c905ce6dad | [
"MIT"
] | 4 | 2020-07-02T07:59:31.000Z | 2021-09-05T18:22:41.000Z | import sys
import requests
import configparser
import time
import json
import datetime
# Ignore insecure cert warnings (enable only if working with onsite-amp deployments)
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def checkAPITimeout(headers, request):
"""Ensure we don't cross API limits, sleep if we are approaching close to limits"""
if str(request.status_code) == '200':
pass
elif str(request.status_code) == '404':
time.sleep(45)
pass
elif str(request.status_code) == '503':
# server sarted to block us
time.sleep(90)
pass
else:
# in any other case, sleep
time.sleep(90)
pass
def isMalicious(value):
if value == 0:
return "Unclassified"
if value == -1:
return "Malicious"
if value == 1:
return "Benign"
def match_category(number):
categories=dict({'0': 'Adware', '1': 'Alcohol', '2': 'Auctions', '3': 'Blogs', '4': 'Chat', '5': 'Classifieds',
'6': 'Dating', '7': 'Drugs', '8': 'Ecommerce/Shopping', '9': 'File Storage', '10': 'Gambling', '11': 'Games',
'12': 'Hate/Discrimination', '13': 'Health and Fitness', '14': 'Humor', '15': 'Instant Messaging', '16':
'Jobs/Employment', '17': 'Movies', '18': 'News/Media', '19': 'P2P/File sharing', '20': 'Photo Sharing',
'21': 'Portals', '22': 'Radio', '23': 'Search Engines', '24': 'Social Networking', '25': 'Software/Technology',
'26': 'Television', '28': 'Video Sharing', '29': 'Visual Search Engines', '30': 'Weapons', '31': 'Webmail', '32':
'Business Services', '33': 'Educational Institutions', '34': 'Financial Institutions', '35': 'Government', '36':
'Music', '37': 'Parked Domains', '38': 'Tobacco', '39': 'Sports', '40': 'Adult Themes', '41': 'Lingerie/Bikini',
'42': 'Nudity', '43': 'Proxy/Anonymizer', '44': 'Pornography', '45': 'Sexuality', '46': 'Tasteless', '47': 'Academic Fraud',
'48': 'Automotive', '49': 'Forums/Message boards', '50': 'Non-Profits', '51': 'Podcasts', '52': 'Politics', '53': 'Religious',
'54': 'Research/Reference', '55': 'Travel', '57': 'Anime/Manga/Webcomic', '58': 'Web Spam', '59': 'Typo Squatting', '60':
'Drive-by Downloads/Exploits', '61': 'Dynamic DNS', '62': 'Mobile Threats', '63': 'High Risk Sites and Locations', '64':
'Command and Control', '65': 'Command and Control', '66': 'Malware', '67': 'Malware', '68': 'Phishing', '108': 'Newly Seen Domains',
'109': 'Potentially Harmful', '110': 'DNS Tunneling VPN', '111': 'Arts', '112': 'Astrology', '113': 'Computer Security',
'114': 'Digital Postcards', '115': 'Dining and Drinking', '116': 'Dynamic and Residential', '117': 'Fashion', '118':
'File Transfer Services', '119': 'Freeware and Shareware', '120': 'Hacking', '121': 'Illegal Activities',
'122': 'Illegal Downloads', '123': 'Infrastructure', '124': 'Internet Telephony', '125': 'Lotteries',
'126': 'Mobile Phones', '127': 'Nature', '128': 'Online Trading', '129': 'Personal Sites', '130': 'Professional Networking',
'131': 'Real Estate', '132': 'SaaS and B2B', '133': 'Safe for Kids', '134': 'Science and Technology', '135': 'Sex Education',
'136': 'Social Science', '137': 'Society and Culture', '138': 'Software Updates', '139': 'Web Hosting', '140': 'Web Page Translation',
'141': 'Organization Email', '142': 'Online Meetings', '143': 'Paranormal', '144': 'Personal VPN', '145': 'DIY Projects',
'146': 'Hunting', '147': 'Military', '150': 'Cryptomining'})
return categories.get(str(number))
# Validate a command line parameter was provided
if len(sys.argv) < 3:
sys.exit('Usage: <config file.txt> <domainlistfile.txt>\n %s' % sys.argv[0])
# Parse config to extract API keys
config = configparser.ConfigParser()
config.read(sys.argv[1])
api_key = config['settings']['investigate_api_key']
domain_list = sys.argv[2]
# Session object
session = requests.Session()
session.headers.update({'Authorization': 'Bearer {}'.format(api_key)})
# Print CSV header
print("{},{},{},{},{},{},{},{},{},{},{},{},{}".format("Date",
"Domain",
"Category",
"Security Category",
"Verdict",
"Risk Score",
"DGA Score",
"Perplexity",
"Domain Entropy",
"ASN Score",
"Popularity",
"Securerank2 Score",
"Attack",
"Threat Type"))
try:
fp = open(domain_list,'r')
for single_domain in fp.readlines():
try:
domain=single_domain.strip()
# URL to API mapping
URL_API='https://investigate.api.umbrella.com/domains/categorization/{}'.format(domain)
responsecategorization = session.get(URL_API, verify=False)
checkAPITimeout(responsecategorization.headers,responsecategorization)
responsecategorization_json = responsecategorization.json()
category_score = responsecategorization_json['{}'.format(domain)]['status']
category_content_category = responsecategorization_json['{}'.format(domain)]['content_categories']
category_content_security_category = responsecategorization_json['{}'.format(domain)]['security_categories']
catList = list()
secList = list()
for cat in category_content_category:
catList.append(match_category(cat))
for sec_cat in category_content_security_category:
secList.append(match_category(sec_cat))
URL_API='https://investigate.api.umbrella.com/domains/risk-score/{}'.format(domain)
risk_score = session.get(URL_API, verify=False)
checkAPITimeout(risk_score.headers,risk_score)
risk_score_json = risk_score.json()
URL_API_SEC='https://investigate.api.umbrella.com/security/name/{}'.format(domain)
responsesecurity = session.get(URL_API_SEC, verify=False)
checkAPITimeout(responsesecurity.headers,responsesecurity)
responsesecurity_json = responsesecurity.json()
#print(responsesecurity_json)
dga_score = responsesecurity_json['dga_score']
perplexity = responsesecurity_json['perplexity']
asn_score = responsesecurity_json['asn_score']
popularity = responsesecurity_json['popularity']
domain_entropy = responsesecurity_json['entropy']
securerank2 = responsesecurity_json['securerank2']
attack = responsesecurity_json['attack']
threat_type = responsesecurity_json['threat_type']
print("{},{},{},{},{},{},{},{},{},{},{},{},{},{}".format(
datetime.datetime.utcnow().isoformat(),
str(domain).replace(".","[.]"),
"|".join(catList),
"|".join(secList),
isMalicious(category_score),
risk_score_json['risk_score'],
dga_score,
perplexity,
domain_entropy,
asn_score,
popularity,
securerank2,
attack,
threat_type
))
# We need to sleep so API won't kick up. 3 API queries are allowed per second in Tier 1
time.sleep(1)
except:
# In theory this should never really happen
print("{},{},{},{},{},{},{},{},{},{},{},{},{}".format(
datetime.datetime.utcnow().isoformat(),
str(domain).replace(".","[.]"),
"Lookup Failed",
"Lookup Failed",
"Lookup Failed",
"Lookup Failed",
"Lookup Failed",
"Lookup Failed",
"Lookup Failed",
"Lookup Failed",
"Lookup Failed",
"Lookup Failed",
"Lookup Failed",
"Lookup Failed"
))
pass
finally:
fp.close()
| 42.238636 | 136 | 0.636938 |
3134cb8d4d97e66ebd06717eeb67982ab6e0911a | 21,750 | py | Python | userinterface/flightPathDefinition.py | MarkHermreck/UAS-C2C2-2019 | 9eb04c080eb1e05da0e18616b7dd245d8afb4c75 | [
"MIT"
] | null | null | null | userinterface/flightPathDefinition.py | MarkHermreck/UAS-C2C2-2019 | 9eb04c080eb1e05da0e18616b7dd245d8afb4c75 | [
"MIT"
] | null | null | null | userinterface/flightPathDefinition.py | MarkHermreck/UAS-C2C2-2019 | 9eb04c080eb1e05da0e18616b7dd245d8afb4c75 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import sys
import os
import csv
import numbers
import re
from matplotlib.backends import qt_compat
use_pyside = qt_compat.QT_API == qt_compat.QT_API_PYSIDE
if use_pyside:
from PySide import QtWidgets, QtCore
else:
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtWidgets import QListWidget, QLineEdit, QLabel, QPushButton, QCheckBox, QInputDialog
from PyQt5.QtGui import QFont
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import communication
#progname = os.path.basename(sys.argv[0])
progname = "GPS Coordinate Entry UI"
progversion = "0.1"
# format is [[x], [y], [x2], [y2], [x3], [y3]]
dataPoints = [[], [], [], [], [], []]
com = communication.Communication(sys.argv[1], float(sys.argv[2]))
class MyMplCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
self.compute_initial_figure()
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class MyDynamicMplCanvas(MyMplCanvas):
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
# timer = QtCore.QTimer(self)
# timer.timeout.connect(self.update_figure)
# timer.start(1000)
cid = self.mpl_connect('button_press_event', self.onclick)
def onclick(self, event):
aw.setFocus()
print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.x, event.y, event.xdata, event.ydata))
# rounding. Rounds to 0 decimal places, but this can be changed
#it has been
roundedX = round(event.xdata, 8)
roundedY = round(event.ydata, 8)
roundedAlt = round(float(aw.altitudeEdit.text()), 8)
if roundedX > 250:
roundedX = 250
elif roundedX < -250:
roundedX = -250
if roundedY > 250:
roundedY = 250
elif roundedY < -250:
roundedY = -250
if roundedAlt < 1:
roundedAlt = 1
self.addPoint(roundedX, roundedY, roundedAlt)
def addPoint(self, xPoint, yPoint, altitude):
lenCoords = len(dataPoints[0])
coordExists = False
for i in range(0, lenCoords):
if xPoint == dataPoints[0][i] and yPoint == dataPoints[1][i] and altitude == dataPoints[2][i]:
coordExists = True
# Checks if the current coordinate exists (including altitude). Currently disabled, but if we want it back uncomment the next line and fix the indentation accordingly
# if coordExists == False:
dataPoints[0].append(xPoint)
dataPoints[1].append(yPoint)
dataPoints[2].append(altitude)
print("added a point at (" + str(dataPoints[0][lenCoords]) + ", " + str(
dataPoints[1][lenCoords]) + ") Alt: " + str(dataPoints[2][lenCoords]))
aw.list.addItem(str(lenCoords + 1) + ". " + "(" + str(dataPoints[0][lenCoords]) + ", " + str(
dataPoints[1][lenCoords]) + ") Alt: " + str(dataPoints[2][lenCoords]))
self.axes.cla()
self.axes.plot(dataPoints[0], dataPoints[1], c='c', linestyle='dashed', marker='o')
self.dc.axes.set_xlabel('Longitude in Decimal Degrees (m)')
self.dc.axes.set_ylabel('Latitude in Decimal Degrees (m)')
self.dc.axes.set_title('ISU & Ground Station Locations Entry')
self.axes.set_xlim(28, 38)
self.axes.set_ylim(82, 92)
# Annotation happens here, see what the rest of the group thinks
# if(altitudeCheckBox
for i in range(0, len(dataPoints[0])):
if aw.altitudeCheckBox.isChecked():
self.axes.annotate(str(dataPoints[2][i]) + ' m', (dataPoints[0][i] - 10, dataPoints[1][i] + 20))
self.axes.annotate(str(i + 1), (dataPoints[0][i] - 2.5 - 3 * len(str(i + 1)), dataPoints[1][i] - 8), size=8)
self.draw()
def removePoint(self, indexToRemove):
del dataPoints[0][indexToRemove]
del dataPoints[1][indexToRemove]
del dataPoints[2][indexToRemove]
self.axes.cla()
self.axes.plot(dataPoints[0], dataPoints[1], c='c', linestyle='dashed', marker='o')
self.dc.axes.set_xlabel('Longitude in Decimal Degrees (m)')
self.dc.axes.set_ylabel('Latitude in Decimal Degrees (m)')
self.dc.axes.set_title('ISU & Ground Station Locations Entry')
self.axes.set_xlim(28, 38)
self.axes.set_ylim(82, 92)
aw.list.clear()
for i in range(0, len(dataPoints[0])):
if aw.altitudeCheckBox.isChecked():
self.axes.annotate(str(dataPoints[2][i]) + ' m', (dataPoints[0][i] - 10, dataPoints[1][i] + 20))
self.axes.annotate(str(i + 1), (dataPoints[0][i] - 2.5 - 3 * len(str(i + 1)), dataPoints[1][i] - 8), size=8)
aw.list.addItem(
str(i + 1) + ". " + "(" + str(dataPoints[0][i]) + ", " + str(dataPoints[1][i]) + ") Alt: " + str(
dataPoints[2][i]))
self.draw()
def compute_initial_figure(self):
self.axes.plot(dataPoints[0], dataPoints[1], c='c', linestyle='dashed', marker='o')
# self.axes.plot(exampleData[0],exampleData[1],c='b',marker='o')
self.axes.set_xlim(28, 38)
self.axes.set_ylim(82, 92)
self.axes.set_xlabel('Relative Position, West/East (m)')
self.axes.set_ylabel('Relative Position, South/North (m)')
self.axes.set_title('Flight Path Definition')
#original values -270, 270
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("application main window")
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('&Quit', self.fileQuit,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.help_menu = QtWidgets.QMenu('&Help', self)
self.menuBar().addSeparator()
self.menuBar().addMenu(self.help_menu)
self.help_menu.addAction('&About', self.about)
self.main_widget = QtWidgets.QWidget(self)
l = QtWidgets.QGridLayout(self.main_widget)
# sc = MyStaticMplCanvas(self.main_widget, width=5, height=4, dpi=100)
self.dc = MyDynamicMplCanvas(self.main_widget, width=10, height=8, dpi=100)
self.list = QListWidget(self)
self.list.setFont(QFont('Courier'))
self.list.doubleClicked.connect(self.listItemDoubleClicked)
# l.addWidget(sc)
self.xEdit = QLineEdit()
self.yEdit = QLineEdit()
self.xEdit2 = QLineEdit()
self.yEdit2 = QLineEdit()
self.xEdit3 = QLineEdit()
self.yEdit3 = QLineEdit()
#self.altitudeEdit = QLineEdit()
self.xEdit.setText('0')
self.yEdit.setText('0')
self.xEdit2.setText('0')
self.yEdit2.setText('0')
self.xEdit3.setText('0')
self.yEdit3.setText('0')
#self.altitudeEdit.setText('5')
self.xLabel = QLabel('ISU #1 Longitude')
self.yLabel = QLabel('ISU #1 Latitude')
self.xLabel2 = QLabel('ISU #2 Longitude')
self.yLabel2 = QLabel('ISU #2 Latitude')
self.xLabel3 = QLabel('Ground Station Longitude')
self.yLabel3 = QLabel('Ground Station Latitude')
#self.altitudeLabel = QLabel('Altitude in Feet')
self.gpsLabel = QLabel(' Use GPS as position')
self.addButton = QPushButton("Add Point")
self.addButton.clicked.connect(self.addPointButtonClicked)
self.startButton = QPushButton("Begin")
self.startButton.clicked.connect(self.beginButtonClicked)
self.loadCSVButton = QPushButton("Load from CSV")
self.loadCSVButton.clicked.connect(self.loadCSVButtonClicked)
self.gpsCheckBox = QCheckBox()
self.altitudeCheckBox = QCheckBox()
self.altitudeCheckBoxLabel = QLabel(' Show altitude annotations')
self.altitudeCheckBox.stateChanged.connect(self.altitudeCheckBoxClicked)
self.altitudeCheckBox.toggle()
l.addWidget(self.xLabel, 0, 0, 1, 1)
l.addWidget(self.yLabel, 0, 1, 1, 1)
l.addWidget(self.xLabel2, 0, 2, 1, 1)
l.addWidget(self.yLabel2, 0, 3, 1, 1)
l.addWidget(self.xLabel3, 0, 4, 1, 1)
l.addWidget(self.yLabel3, 0, 5, 1, 1)
#l.addWidget(self.altitudeLabel, 0, 6, 1, 1)
l.addWidget(self.gpsLabel, 1, 8, 1, 1)
l.addWidget(self.xEdit, 1, 0, 1, 1)
l.addWidget(self.yEdit, 1, 1, 1, 1)
l.addWidget(self.xEdit2, 1, 2, 1, 1)
l.addWidget(self.yEdit2, 1, 3, 1, 1)
l.addWidget(self.xEdit3, 1, 4, 1, 1)
l.addWidget(self.yEdit3, 1, 5, 1, 1)
#l.addWidget(self.altitudeEdit, 1, 6, 1, 1)
l.addWidget(self.addButton, 1, 7, 1, 1)
l.addWidget(self.gpsCheckBox, 1, 8, 1, 1)
l.addWidget(self.startButton, 3, 8, 1, 1)
l.addWidget(self.loadCSVButton, 3, 7, 1, 1)
#l.addWidget(self.altitudeCheckBoxLabel, 0, 8, 1, 1)
#l.addWidget(self.altitudeCheckBox, 0, 8, 1, 1)
l.addWidget(self.dc, 2, 0, 1, 7)
l.addWidget(self.list, 2, 7, 1, 2)
# self.addButton.clicked.connect(dc.addPoint(str(self.xEdit.text()), str(self.yEdit.text()), str(self.altitudeEdit.text())))
lenCoords = len(dataPoints[0])
for i in range(0, lenCoords):
self.list.addItem(
str(i + 1) + ". " + "(" + str(dataPoints[0][i]) + ", " + str(dataPoints[1][i]) + ") Alt: " + str(
dataPoints[2][i]))
# lenCoords = len(exampleData[0])
# for i in range(0,lenCoords):
# self.list.addItem("(" + str(exampleData[0][i]) + ", " + str(exampleData[1][i]) + ") Altitude: " + str(exampleData[2][i]) + " Temp: " + str(exampleData[3][i]) + " °C")
# self.list.addItem('{:16s} {:18s} {:18s}'.format("(" + str(exampleData[0][i]) + ", " + str(exampleData[1][i]) + ")", "Altitude: " + str(exampleData[2][i]), "Temp: " + str(exampleData[3][i]) + " °C"))
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
# def buttonClicked(self, dc):
# dc.addPoint(self, str(self.xEdit.text()), str(self.yEdit.text()), str(self.altitudeEdit.text()))
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
def altitudeCheckBoxClicked(self):
self.dc.axes.cla()
self.dc.axes.plot(dataPoints[0], dataPoints[1], c='c', linestyle='dashed', marker='o')
self.dc.axes.set_xlabel('Longitude in Decimal Degrees')
self.dc.axes.set_ylabel('Latitude in Decimal Degrees')
self.dc.axes.set_title('ISU & Ground Station Locations')
self.dc.axes.set_xlim(28, 38)
self.dc.axes.set_ylim(82, 92)
# tuscaloosa GPS coords roughly
# 33.209561 lat, 33 deg 12 min 34.412 sec N
# -87.567526 long, 87 deg 34 min 3.092 sec W
if self.altitudeCheckBox.isChecked():
for i in range(0, len(dataPoints[0])):
self.dc.axes.annotate(str(dataPoints[2][i]) + ' m', (dataPoints[0][i] - 10, dataPoints[1][i] + 20))
self.dc.axes.annotate(str(i + 1), (dataPoints[0][i] - 2.5 - 3 * len(str(i + 1)), dataPoints[1][i] - 8),
size=8)
self.dc.draw()
else:
for i in range(0, len(dataPoints[0])):
self.dc.axes.annotate(str(i + 1), (dataPoints[0][i] - 2.5 - 3 * len(str(i + 1)), dataPoints[1][i] - 8),
size=8)
self.dc.draw()
def listItemDoubleClicked(self):
editedRow = self.list.currentRow()
print(editedRow)
s = str(dataPoints[editedRow*2]) + ', ' + str(dataPoints[2*editedRow+1])
t, okPressed = QInputDialog.getText(self, "Edit waypoint", "Format: X.X, Y.Y without brackets", QLineEdit.Normal, s)
if okPressed:
if re.match('[-+]?[0-9]*\.?[0-9]+, [-+]?[0-9]*\.?[0-9]+', t):
editX, editY = t.split(', ')
dataPoints[editedRow*2] = float(editX)
dataPoints[editedRow*2+1] = float(editY)
if(editedRow == 0):
self.list.currentItem().setText(
str("ISU #1: ") + "(" + str(dataPoints[editedRow*2]) + ", " + str(
dataPoints[editedRow*2+1]) + ")")
elif(editedRow == 1):
self.list.currentItem().setText(
str("ISU #2: ") + "(" + str(dataPoints[editedRow * 2]) + ", " + str(
dataPoints[editedRow * 2 + 1]) + ")")
elif(editedRow == 2):
self.list.currentItem().setText(
str("Ground Station: ") + "(" + str(dataPoints[editedRow * 2]) + ", " + str(
dataPoints[editedRow * 2 + 1]) + ")")
else:
print("Some error with ")
return None;
else:
QtWidgets.QMessageBox.about(self, "Unsaved edit", "Invalid format")
def addPointButtonClicked(self):
lenCoords = len(dataPoints[0])
newX = self.xEdit.text()
newY = self.yEdit.text()
newX2 = self.xEdit2.text()
newY2 = self.yEdit2.text()
newX3 = self.xEdit3.text()
newY3 = self.yEdit3.text()
#newAlt = self.altitudeEdit.text()
if isinstance(float(newX), numbers.Number) and isinstance(float(newY), numbers.Number) and isinstance(
float(newX2), numbers.Number) and isinstance(float(newY2), numbers.Number) and isinstance(
float(newX3), numbers.Number) and isinstance(float(newY3), numbers.Number):
dataPoints[0].append(float(newX))
dataPoints[1].append(float(newY))
#print("reached here")
dataPoints[2].append(float(newX2))
dataPoints[3].append(float(newY2))
#print("reached here")
dataPoints[4].append(float(newX3))
dataPoints[5].append(float(newY3))
#dataPoints[2].append(float(newAlt))
print("added a point at (" + str(dataPoints[0][lenCoords]) + ", " + str(
dataPoints[1][lenCoords]) + ") " )
print("added a point at (" + str(dataPoints[2][lenCoords]) + ", " + str(
dataPoints[3][lenCoords]) + ") ")
print("added a point at (" + str(dataPoints[4][lenCoords]) + ", " + str(
dataPoints[5][lenCoords]) + ") ")
self.list.addItem("ISU #1: " + "(" + str(dataPoints[0][lenCoords]) + ", " + str(
dataPoints[1][lenCoords]) + ") ")
self.list.addItem("ISU #2: " + "(" + str(dataPoints[2][lenCoords]) + ", " + str(
dataPoints[3][lenCoords]) + ") ")
self.list.addItem("Ground Station: "+ "(" + str(dataPoints[4][lenCoords]) + ", " + str(
dataPoints[5][lenCoords]) + ") ")
self.dc.axes.cla()
self.dc.axes.plot(dataPoints[0], dataPoints[1], c='c', linestyle='dashed', marker='o')
self.dc.axes.plot(dataPoints[2], dataPoints[3], c='c', linestyle='dashed', marker='o')
self.dc.axes.plot(dataPoints[4], dataPoints[5], c='c', linestyle='dashed', marker='o')
self.dc.axes.set_xlabel('Longitude in Decimal Degrees')
self.dc.axes.set_ylabel('Latitude in Decimal Degrees')
self.dc.axes.set_title('ISU & Ground Station Locations')
self.dc.axes.set_xlim(28, 38)
self.dc.axes.set_ylim(82, 92)
if self.altitudeCheckBox.isChecked():
for i in range(0, len(dataPoints[0])):
self.dc.axes.annotate(str(dataPoints[2][i]) + ' m', (dataPoints[0][i] - 10, dataPoints[1][i] + 20))
self.dc.axes.annotate(str(i + 1),
(dataPoints[0][i] - 2.5 - 3 * len(str(i + 1)), dataPoints[1][i] - 8), size=8)
else:
for i in range(0, len(dataPoints[0])):
self.dc.axes.annotate(str(i + 1),
(dataPoints[0][i] - 2.5 - 3 * len(str(i + 1)), dataPoints[1][i] - 8), size=8)
self.dc.draw()
def beginButtonClicked(self):
msg = QtWidgets.QMessageBox()
reply = msg.question(self, 'Begin Flight?',
"Flight path will be saved to flightPath.csv. Are you sure you want to begin flight?",
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
with open('flightPath.csv', 'w') as csvfile:
fieldnames = ['x', 'y']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(0, 3):
writer.writerow({'x': str(dataPoints[2*i]), 'y': str(dataPoints[2*i+1])})
dictList = []
for i in range(0, 3):
currentDict = {'x': dataPoints[2*i], 'y': dataPoints[2*i+1]}
dictList.append(currentDict)
#uncomment below when ready to start testing data, as well as uncommenting line 30
#com.send(dictList)
for i in range(0, 3):
#send x
com.send(dataPoints[2*i])
#space for seperation
com.send(" ")
#send y
com.send(dataPoints[2*i+1])
com.send(" ")
com.send("EndOfFile")
# msg.exec_()
def threeDButtonClicked(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(dataPoints[0], dataPoints[1], dataPoints[2], c='r', linestyle='dashed', marker='o')
ax.plot(dataPoints[0], dataPoints[1], dataPoints[2], color='r')
ax.set_xlabel('Relative Position, West/East (m)')
ax.set_ylabel('Relative Position, South/North (m)')
ax.set_zlabel('Relative Altitude (m)')
ax.set_xlim(28, 38)
ax.set_ylim(82, 92)
ax.set_zlim(0, 25)
plt.show()
def loadCSVButtonClicked(self):
msg = QtWidgets.QMessageBox()
reply = msg.question(self, 'Load waypoints from CSV?',
"Are you sure? This will overwrite your existing waypoints.", QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
del dataPoints[0][:]
del dataPoints[1][:]
del dataPoints[2][:]
del dataPoints[3][:]
del dataPoints[4][:]
del dataPoints[5][:]
aw.list.clear()
with open('flightPath.csv', 'r') as csvfile:
csvReader = csv.reader(csvfile)
for row in csvReader:
if len(row) == 3:
try:
self.dc.addPoint(int(row[0]), int(row[1]), int(row[2]))
print(row)
except ValueError:
True
# Make sure to check if any row is selected here to avoid crash
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Delete:
deletedRow = self.list.currentRow()
if deletedRow >= 0:
print('deleted ' + str(deletedRow))
self.list.takeItem(deletedRow)
self.dc.removePoint(deletedRow)
# Left and Right key to increase or decrease altitude
if event.key() == QtCore.Qt.Key_Right:
self.altitudeEdit.setText(str(int(self.altitudeEdit.text()) + 1))
elif event.key() == QtCore.Qt.Key_Left:
self.altitudeEdit.setText(str(int(self.altitudeEdit.text()) - 1))
if event.modifiers() & QtCore.Qt.ControlModifier and event.key() == QtCore.Qt.Key_Z:
rowToDelete = len(dataPoints[0]) - 1
if rowToDelete != 0:
print('deleted ' + str(rowToDelete))
self.list.takeItem(rowToDelete)
self.dc.removePoint(rowToDelete)
def about(self):
QtWidgets.QMessageBox.about(self, "About",
"""A program to plot a flight path for an autonomous UAV."""
)
qApp = QtWidgets.QApplication(sys.argv)
aw = ApplicationWindow()
aw.setWindowTitle("%s" % progname)
aw.show()
sys.exit(qApp.exec_())
# qApp.exec_()
| 42.730845 | 209 | 0.550483 |
cab37bb57207ef46dacbfa766ab626e9357364a7 | 857 | py | Python | medium/73.py | pisskidney/leetcode | 08c19cbf3d7afc897908ea05db4ad11a5487f523 | [
"MIT"
] | null | null | null | medium/73.py | pisskidney/leetcode | 08c19cbf3d7afc897908ea05db4ad11a5487f523 | [
"MIT"
] | null | null | null | medium/73.py | pisskidney/leetcode | 08c19cbf3d7afc897908ea05db4ad11a5487f523 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from typing import List
"""
73. Set Matrix Zeroes
https://leetcode.com/problems/set-matrix-zeroes/
"""
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
rows, cols = set([]), set([])
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
rows.add(i)
cols.add(j)
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if i in rows or j in cols:
matrix[i][j] = 0
def main():
sol = Solution()
a = [1, 1, 1], [1, 0, 1], [1, 1, 1]
print(sol.setZeroes(a))
print(a)
return 0
if __name__ == '__main__':
raise SystemExit(main())
| 21.974359 | 63 | 0.500583 |
d6b5befe3b5464b1ec89518470702cc88b67fc65 | 9,527 | py | Python | pahelix/utils/splitters.py | agave233/PaddleHelix | e5578f72c2a203a27d9df7da111f1ced826c1429 | [
"Apache-2.0"
] | 454 | 2020-11-21T01:02:45.000Z | 2022-03-29T12:53:40.000Z | pahelix/utils/splitters.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
] | 161 | 2020-12-12T06:35:54.000Z | 2022-03-27T11:31:13.000Z | pahelix/utils/splitters.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
] | 108 | 2020-12-07T09:01:10.000Z | 2022-03-31T14:42:29.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
| Splitters
"""
import random
import numpy as np
from itertools import compress
from rdkit.Chem.Scaffolds import MurckoScaffold
from collections import defaultdict
from sklearn.model_selection import StratifiedKFold
__all__ = [
'RandomSplitter',
'IndexSplitter',
'ScaffoldSplitter',
'RandomScaffoldSplitter',
]
def generate_scaffold(smiles, include_chirality=False):
"""
Obtain Bemis-Murcko scaffold from smiles
Args:
smiles: smiles sequence
include_chirality: Default=False
Return:
the scaffold of the given smiles.
"""
scaffold = MurckoScaffold.MurckoScaffoldSmiles(
smiles=smiles, includeChirality=include_chirality)
return scaffold
class Splitter(object):
"""
The abstract class of splitters which split up dataset into train/valid/test
subsets.
"""
def __init__(self):
super(Splitter, self).__init__()
class RandomSplitter(Splitter):
"""
Random splitter.
"""
def __init__(self):
super(RandomSplitter, self).__init__()
def split(self,
dataset,
frac_train=None,
frac_valid=None,
frac_test=None,
seed=None):
"""
Args:
dataset(InMemoryDataset): the dataset to split.
frac_train(float): the fraction of data to be used for the train split.
frac_valid(float): the fraction of data to be used for the valid split.
frac_test(float): the fraction of data to be used for the test split.
seed(int|None): the random seed.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0)
N = len(dataset)
indices = list(range(N))
rng = np.random.RandomState(seed)
rng.shuffle(indices)
train_cutoff = int(frac_train * N)
valid_cutoff = int((frac_train + frac_valid) * N)
train_dataset = dataset[indices[:train_cutoff]]
valid_dataset = dataset[indices[train_cutoff:valid_cutoff]]
test_dataset = dataset[indices[valid_cutoff:]]
return train_dataset, valid_dataset, test_dataset
class IndexSplitter(Splitter):
"""
Split daatasets that has already been orderd. The first `frac_train` proportion
is used for train set, the next `frac_valid` for valid set and the final `frac_test`
for test set.
"""
def __init__(self):
super(IndexSplitter, self).__init__()
def split(self,
dataset,
frac_train=None,
frac_valid=None,
frac_test=None):
"""
Args:
dataset(InMemoryDataset): the dataset to split.
frac_train(float): the fraction of data to be used for the train split.
frac_valid(float): the fraction of data to be used for the valid split.
frac_test(float): the fraction of data to be used for the test split.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0)
N = len(dataset)
indices = list(range(N))
train_cutoff = int(frac_train * N)
valid_cutoff = int((frac_train + frac_valid) * N)
train_dataset = dataset[indices[:train_cutoff]]
valid_dataset = dataset[indices[train_cutoff:valid_cutoff]]
test_dataset = dataset[indices[valid_cutoff:]]
return train_dataset, valid_dataset, test_dataset
class ScaffoldSplitter(Splitter):
"""
Adapted from https://github.com/deepchem/deepchem/blob/master/deepchem/splits/splitters.py
Split dataset by Bemis-Murcko scaffolds
"""
def __init__(self):
super(ScaffoldSplitter, self).__init__()
def split(self,
dataset,
frac_train=None,
frac_valid=None,
frac_test=None):
"""
Args:
dataset(InMemoryDataset): the dataset to split. Make sure each element in
the dataset has key "smiles" which will be used to calculate the
scaffold.
frac_train(float): the fraction of data to be used for the train split.
frac_valid(float): the fraction of data to be used for the valid split.
frac_test(float): the fraction of data to be used for the test split.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0)
N = len(dataset)
# create dict of the form {scaffold_i: [idx1, idx....]}
all_scaffolds = {}
for i in range(N):
scaffold = generate_scaffold(dataset[i]['smiles'], include_chirality=True)
if scaffold not in all_scaffolds:
all_scaffolds[scaffold] = [i]
else:
all_scaffolds[scaffold].append(i)
# sort from largest to smallest sets
all_scaffolds = {key: sorted(value) for key, value in all_scaffolds.items()}
all_scaffold_sets = [
scaffold_set for (scaffold, scaffold_set) in sorted(
all_scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
# get train, valid test indices
train_cutoff = frac_train * N
valid_cutoff = (frac_train + frac_valid) * N
train_idx, valid_idx, test_idx = [], [], []
for scaffold_set in all_scaffold_sets:
if len(train_idx) + len(scaffold_set) > train_cutoff:
if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff:
test_idx.extend(scaffold_set)
else:
valid_idx.extend(scaffold_set)
else:
train_idx.extend(scaffold_set)
assert len(set(train_idx).intersection(set(valid_idx))) == 0
assert len(set(test_idx).intersection(set(valid_idx))) == 0
# get train, valid test indices
train_cutoff = frac_train * N
valid_cutoff = (frac_train + frac_valid) * N
train_idx, valid_idx, test_idx = [], [], []
for scaffold_set in all_scaffold_sets:
if len(train_idx) + len(scaffold_set) > train_cutoff:
if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff:
test_idx.extend(scaffold_set)
else:
valid_idx.extend(scaffold_set)
else:
train_idx.extend(scaffold_set)
assert len(set(train_idx).intersection(set(valid_idx))) == 0
assert len(set(test_idx).intersection(set(valid_idx))) == 0
train_dataset = dataset[train_idx]
valid_dataset = dataset[valid_idx]
test_dataset = dataset[test_idx]
return train_dataset, valid_dataset, test_dataset
class RandomScaffoldSplitter(Splitter):
"""
Adapted from https://github.com/pfnet-research/chainer-chemistry/blob/master/chainer_chemistry/dataset/splitters/scaffold_splitter.py
Split dataset by Bemis-Murcko scaffolds
"""
def __init__(self):
super(RandomScaffoldSplitter, self).__init__()
def split(self,
dataset,
frac_train=None,
frac_valid=None,
frac_test=None,
seed=None):
"""
Args:
dataset(InMemoryDataset): the dataset to split. Make sure each element in
the dataset has key "smiles" which will be used to calculate the
scaffold.
frac_train(float): the fraction of data to be used for the train split.
frac_valid(float): the fraction of data to be used for the valid split.
frac_test(float): the fraction of data to be used for the test split.
seed(int|None): the random seed.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0)
N = len(dataset)
rng = np.random.RandomState(seed)
scaffolds = defaultdict(list)
for ind in range(N):
scaffold = generate_scaffold(dataset[ind]['smiles'], include_chirality=True)
scaffolds[scaffold].append(ind)
scaffold_sets = rng.permutation(np.array(list(scaffolds.values()), dtype=object))
n_total_valid = int(np.floor(frac_valid * len(dataset)))
n_total_test = int(np.floor(frac_test * len(dataset)))
train_idx = []
valid_idx = []
test_idx = []
for scaffold_set in scaffold_sets:
if len(valid_idx) + len(scaffold_set) <= n_total_valid:
valid_idx.extend(scaffold_set)
elif len(test_idx) + len(scaffold_set) <= n_total_test:
test_idx.extend(scaffold_set)
else:
train_idx.extend(scaffold_set)
train_dataset = dataset[train_idx]
valid_dataset = dataset[valid_idx]
test_dataset = dataset[test_idx]
return train_dataset, valid_dataset, test_dataset
| 35.950943 | 137 | 0.627375 |
7f4e427c98ebad68ae988246aeef63231ea76f3d | 2,039 | py | Python | pettingzoo/butterfly/prospector/constants.py | MarioJayakumar/PettingZoo | 0673d44c33ae1843a773babf5e6595baf8214664 | [
"MIT"
] | null | null | null | pettingzoo/butterfly/prospector/constants.py | MarioJayakumar/PettingZoo | 0673d44c33ae1843a773babf5e6595baf8214664 | [
"MIT"
] | null | null | null | pettingzoo/butterfly/prospector/constants.py | MarioJayakumar/PettingZoo | 0673d44c33ae1843a773babf5e6595baf8214664 | [
"MIT"
] | null | null | null | import math
SCREEN_SIZE = SCREEN_WIDTH, SCREEN_HEIGHT = (1280, 720)
BACKGROUND_COLOR = (217, 151, 106)
FPS = 15
FENCE_WIDTH = 22
WATER_HEIGHT = 100
AGENT_RADIUS = 15
AGENT_DIAMETER = AGENT_RADIUS * 2
# The 3 is for RBG values
PROSPEC_OBSERV_SIDE_LEN = 5 * AGENT_DIAMETER
PROSPEC_OBSERV_SHAPE = (PROSPEC_OBSERV_SIDE_LEN, PROSPEC_OBSERV_SIDE_LEN, 3)
# Slightly bigger for bankers - 2 integers bigger on each side
BANKER_OBSERV_SIDE_LEN = PROSPEC_OBSERV_SIDE_LEN + 4
BANKER_OBSERV_SHAPE = (BANKER_OBSERV_SIDE_LEN, BANKER_OBSERV_SIDE_LEN, 3)
MAX_SPRITE_ROTATION = math.pi / 4
NUM_PROSPECTORS = 4
NUM_BANKERS = 3
NUM_AGENTS = NUM_PROSPECTORS + NUM_BANKERS
PROSPECTOR_SPEED = 150
BANKER_SPEED = 100
BANKER_HANDOFF_TOLERANCE = math.pi / 4
TWO_PI = math.pi * 2.0
FENCE_COLLISION_BUFFER = AGENT_DIAMETER
VERT_FENCE_HEIGHT = SCREEN_HEIGHT - WATER_HEIGHT
# For the left and right fences
FENCE_VERT_VERTICES = (
(0, 0),
(FENCE_WIDTH + FENCE_COLLISION_BUFFER, 0),
(FENCE_WIDTH + FENCE_COLLISION_BUFFER, VERT_FENCE_HEIGHT),
(0, VERT_FENCE_HEIGHT),
)
# For the top fence
FENCE_HORIZ_VERTICES = (
(0, 0),
(SCREEN_WIDTH, 0),
(SCREEN_WIDTH, FENCE_WIDTH + FENCE_COLLISION_BUFFER),
(0, FENCE_WIDTH + FENCE_COLLISION_BUFFER),
)
FENCE_INFO = [
("left", [0, 0], [0, 0], FENCE_VERT_VERTICES), # left boundary
("top", [0, 0], [0, 0], FENCE_HORIZ_VERTICES), # top boundary
(
"right",
[SCREEN_WIDTH - FENCE_WIDTH, 0],
[SCREEN_WIDTH - (FENCE_WIDTH + FENCE_COLLISION_BUFFER), 0],
FENCE_VERT_VERTICES,
),
]
BANK_SIZE = BANK_WIDTH, BANK_HEIGHT = 184, 100
BANK_VERTS = (
(0, 0),
(BANK_WIDTH, 0),
(BANK_WIDTH, BANK_HEIGHT),
(0, BANK_HEIGHT),
)
BANK_INFO = [
([184 * 1, 50], BANK_VERTS),
([184 * 3, 50], BANK_VERTS),
([184 * 5, 50], BANK_VERTS),
]
WATER_INFO = [
(0, SCREEN_HEIGHT - WATER_HEIGHT), # position
( # vertices
(0, 0),
(SCREEN_WIDTH, 0),
(SCREEN_WIDTH, WATER_HEIGHT),
(0, WATER_HEIGHT),
),
]
| 24.27381 | 76 | 0.681707 |
0e762ee3740ea1054280a33487adbee08b43b750 | 747 | py | Python | wikilabels/util/tsv.py | aryan040501/wikilabels | ea110da2b969cc978a0f288c4da6250dc9d67e72 | [
"MIT"
] | 15 | 2015-07-16T17:56:43.000Z | 2018-08-20T14:59:16.000Z | wikilabels/util/tsv.py | aryan040501/wikilabels | ea110da2b969cc978a0f288c4da6250dc9d67e72 | [
"MIT"
] | 122 | 2015-06-10T15:58:11.000Z | 2018-08-16T14:56:23.000Z | wikilabels/util/tsv.py | aryan040501/wikilabels | ea110da2b969cc978a0f288c4da6250dc9d67e72 | [
"MIT"
] | 27 | 2015-07-15T22:12:35.000Z | 2018-08-06T23:10:28.000Z | import json
def read(f, header=False):
if header:
headers = decode_row(f.readline())
else:
headers = None
for line in f:
yield decode_row(line, headers=headers)
def encode(value):
return json.dumps(value)
def encode_row(values, headers=None):
if headers is None:
return "\t".join(encode(v) for v in values)
else:
return "\t".join(encode(values[h]) for h in headers)
def decode(value):
try:
return json.loads(value)
except ValueError:
return str(value)
def decode_row(line, headers=None):
if headers is None:
return [decode(v) for v in line.strip().split("\t")]
else:
return {h: v for h, v in zip(headers, decode_row(line))}
| 20.189189 | 64 | 0.613119 |
0ac27deddc6a6760037aee8469353b4c5e98ab8d | 1,291 | py | Python | qaqg/main.py | monatis/ai-aas | d8363dfa33ba73cc09f5ed47511b7bdccb6e3d33 | [
"Apache-2.0"
] | 7 | 2021-04-25T18:50:44.000Z | 2021-12-04T01:15:50.000Z | qaqg/main.py | monatis/ai-aas | d8363dfa33ba73cc09f5ed47511b7bdccb6e3d33 | [
"Apache-2.0"
] | null | null | null | qaqg/main.py | monatis/ai-aas | d8363dfa33ba73cc09f5ed47511b7bdccb6e3d33 | [
"Apache-2.0"
] | 1 | 2021-06-17T16:24:22.000Z | 2021-06-17T16:24:22.000Z | import asyncio
import logging
import aioredis
import ujson
from pipelines import pipeline # pipelines.py script in the cloned repo
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
async def task():
tokenizer = AutoTokenizer.from_pretrained(
"ozcangundes/mt5-multitask-qa-qg-turkish")
model = AutoModelForSeq2SeqLM.from_pretrained(
"ozcangundes/mt5-multitask-qa-qg-turkish")
multimodel = pipeline("multitask-qa-qg", tokenizer=tokenizer, model=model)
queue = await aioredis.create_redis_pool("redis://redis:6379/0?encoding=utf-8")
logging.warning("Connected to Redis")
logging.warning("QAQG task is running asynchronously...")
while True:
pipe = queue.pipeline()
pipe.lrange("qaqg", 0, 7)
pipe.ltrim("qaqg", 8, -1)
requests, _ = await pipe.execute()
for r in requests:
r = ujson.loads(r)
results = {}
if r.get("question", None) is None:
results = multimodel(r["text"])
else:
results = multimodel(
{"context": r["text"], "question": r["question"]})
await queue.set(r["id"], ujson.dumps(results))
asyncio.sleep(0.1)
if __name__ == "__main__":
asyncio.run(task())
| 30.023256 | 83 | 0.627421 |
56fd20a56074d3642b499dabf03e660ee1a70dd1 | 1,144 | py | Python | RandomCameras/settings/test.py | Haizza1/RandomCameras-Backend | b679e2c685a9f9582f5f1a6b4c79c91c2328e326 | [
"MIT"
] | 1 | 2021-06-09T01:35:59.000Z | 2021-06-09T01:35:59.000Z | RandomCameras/settings/test.py | Haizza1/RandomCameras-Backend | b679e2c685a9f9582f5f1a6b4c79c91c2328e326 | [
"MIT"
] | null | null | null | RandomCameras/settings/test.py | Haizza1/RandomCameras-Backend | b679e2c685a9f9582f5f1a6b4c79c91c2328e326 | [
"MIT"
] | null | null | null | """Testing settings.
With these settings, tests run faster.
"""
from .base import *
from .base import env
# Base
DEBUG = False
SECRET_KEY = env('SECRET_KEY',
default="7lEaACt4wsCj8JbXYgQLf4BmdG5QbuHTMYUGir2Gc1GHqqb2Pv8w9iXwwlIIviI2"
)
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Cache
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": ""
}
}
# Passwords
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# Templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # NOQA
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # NOQA
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# Email
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025 | 21.185185 | 78 | 0.663462 |
7cece22c01449d5fe8a8e8acd5517606f670a7a3 | 5,878 | py | Python | tests/sync_test_app.py | BiuroCo/mega | 31dbbe791b591b3d80f332ad782d9db76c9d388f | [
"BSD-2-Clause"
] | 1 | 2020-01-25T21:20:15.000Z | 2020-01-25T21:20:15.000Z | tests/sync_test_app.py | BiuroCo/mega | 31dbbe791b591b3d80f332ad782d9db76c9d388f | [
"BSD-2-Clause"
] | null | null | null | tests/sync_test_app.py | BiuroCo/mega | 31dbbe791b591b3d80f332ad782d9db76c9d388f | [
"BSD-2-Clause"
] | 1 | 2022-02-24T10:06:08.000Z | 2022-02-24T10:06:08.000Z | """
Application for testing syncing algorithm
(c) 2013-2014 by Mega Limited, Wellsford, New Zealand
This file is part of the MEGA SDK - Client Access Engine.
Applications using the MEGA API must present a valid application key
and comply with the the rules set forth in the Terms of Service.
The MEGA SDK is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@copyright Simplified (2-clause) BSD License.
You should have received a copy of the license along with this
program.
"""
import os
import time
import random
from sync_test_base import get_random_str
import shutil
import logging
import datetime
class SyncTestApp(object):
"""
test application base class
"""
def __init__(self, local_mount_in, local_mount_out, work_folder, delete_tmp_files=True, use_large_files=True):
"""
work_dir: a temporary folder to place generated files
remote_folder: a remote folder to sync
"""
self.start_time = time.time()
random.seed(time.time())
self.local_mount_in = local_mount_in
self.local_mount_out = local_mount_out
self.rnd_folder = get_random_str()
self.local_folder_in = os.path.join(self.local_mount_in, self.rnd_folder)
self.local_folder_out = os.path.join(self.local_mount_out, self.rnd_folder)
self.work_folder = os.path.join(work_folder, self.rnd_folder)
self.nr_retries = 200
self.delete_tmp_files = delete_tmp_files
self.use_large_files = use_large_files
def __enter__(self):
# call subclass function
res = self.start()
if not res:
self.stop()
raise Exception('Failed to start app!')
res = self.prepare_folders()
if not res:
self.stop()
raise Exception('Failed to start app!')
return self
def __exit__(self, exc_type, exc_value, traceback):
# remove tmp folders
if self.delete_tmp_files:
try:
logging.debug("Deleting %s" % self.local_folder_in)
shutil.rmtree(self.local_folder_in)
except OSError:
pass
try:
logging.debug("Deleting %s" % self.local_folder_out)
shutil.rmtree(self.local_folder_out)
except OSError:
pass
try:
logging.debug("Deleting %s" % self.work_folder)
shutil.rmtree(self.work_folder)
except OSError:
pass
# terminate apps
self.stop()
logging.info("Execution time: %s" % str(datetime.timedelta(seconds=time.time()-self.start_time)))
@staticmethod
def touch(path):
"""
create an empty file
update utime
"""
with open(path, 'a'):
os.utime(path, None)
def prepare_folders(self):
"""
prepare upsync, downsync and work directories
"""
# create "in" folder
logging.info("IN folder: %s" % self.local_folder_in)
try:
os.makedirs(self.local_folder_in)
except OSError, e:
logging.error("Failed to create directory: %s (%s)" % (self.local_folder_in, e))
return False
logging.info("OUT folder: %s" % self.local_folder_out)
self.sync()
# temporary workaround
#tmp_fix_file = os.path.join(self.local_mount_out, "tmp_fix")
success = False
# try to access the dir
for r in range(0, self.nr_retries):
try:
if os.path.isdir(self.local_folder_out):
success = True
break
else:
# wait for a dir
logging.debug("Directory %s not found! Retrying [%d/%d] .." % (self.local_folder_out, r + 1, self.nr_retries))
#self.touch(tmp_fix_file)
self.sync()
except OSError:
# wait for a dir
logging.debug("Directory %s not found! Retrying [%d/%d] .." % (self.local_folder_out, r + 1, self.nr_retries))
#self.touch(tmp_fix_file)
self.sync()
if success is False:
logging.error("Failed to access directory: %s" % self.local_folder_out)
return False
# create work folder
logging.debug("Work folder: %s" % self.work_folder)
try:
os.makedirs(self.work_folder)
except OSError, e:
logging.error("Failed to create directory: %s (%s)" % (self.work_folder, e))
return False
return True
def stop(self):
"""
cleans directories and call finish
"""
if self.delete_tmp_files:
try:
shutil.rmtree(self.local_folder_in)
except OSError:
pass
self.sync()
self.finish()
# virtual methods
def start(self):
"""
start application
"""
raise NotImplementedError("Not Implemented !")
def finish(self):
"""
stop application
"""
raise NotImplementedError("Not Implemented !")
def sync(self):
"""
wait for full synchronization
"""
raise NotImplementedError("Not Implemented !")
def pause(self):
"""
pause application
"""
raise NotImplementedError("Not Implemented !")
def unpause(self):
"""
unpause application
"""
raise NotImplementedError("Not Implemented !")
def is_alive(self):
"""
return True if application instance is running
"""
raise NotImplementedError("Not Implemented !")
| 29.39 | 130 | 0.58166 |
354edf779bda3095a7c579484d998c0142ecfa74 | 13,855 | py | Python | core/admin/mailu/utils.py | Archipel/Mailu | 637c72f3fc08b7c8917da2d073aced945a10ebe4 | [
"MIT"
] | null | null | null | core/admin/mailu/utils.py | Archipel/Mailu | 637c72f3fc08b7c8917da2d073aced945a10ebe4 | [
"MIT"
] | null | null | null | core/admin/mailu/utils.py | Archipel/Mailu | 637c72f3fc08b7c8917da2d073aced945a10ebe4 | [
"MIT"
] | null | null | null | """ Mailu admin app utilities
"""
try:
import cPickle as pickle
except ImportError:
import pickle
import hmac
import secrets
import time
from multiprocessing import Value
from mailu import limiter
import flask
import flask_login
import flask_migrate
import flask_babel
import redis
from flask.sessions import SessionMixin, SessionInterface
from itsdangerous.encoding import want_bytes
from werkzeug.datastructures import CallbackDict
from werkzeug.contrib import fixers
# Login configuration
login = flask_login.LoginManager()
login.login_view = "ui.login"
@login.unauthorized_handler
def handle_needs_login():
""" redirect unauthorized requests to login page """
return flask.redirect(
flask.url_for('ui.login', next=flask.request.endpoint)
)
# Rate limiter
limiter = limiter.LimitWraperFactory()
# Application translation
babel = flask_babel.Babel()
@babel.localeselector
def get_locale():
""" selects locale for translation """
translations = [str(translation) for translation in babel.list_translations()]
flask.session['available_languages'] = translations
try:
language = flask.session['language']
except KeyError:
language = flask.request.accept_languages.best_match(translations)
flask.session['language'] = language
return language
# Proxy fixer
class PrefixMiddleware(object):
""" fix proxy headers """
def __init__(self):
self.app = None
def __call__(self, environ, start_response):
prefix = environ.get('HTTP_X_FORWARDED_PREFIX', '')
if prefix:
environ['SCRIPT_NAME'] = prefix
return self.app(environ, start_response)
def init_app(self, app):
self.app = fixers.ProxyFix(app.wsgi_app, x_for=1, x_proto=1)
app.wsgi_app = self
proxy = PrefixMiddleware()
# Data migrate
migrate = flask_migrate.Migrate()
# session store (inspired by https://github.com/mbr/flask-kvsession)
class RedisStore:
""" Stores session data in a redis db. """
has_ttl = True
def __init__(self, redisstore):
self.redis = redisstore
def get(self, key):
""" load item from store. """
value = self.redis.get(key)
if value is None:
raise KeyError(key)
return value
def put(self, key, value, ttl=None):
""" save item to store. """
if ttl:
self.redis.setex(key, int(ttl), value)
else:
self.redis.set(key, value)
def delete(self, key):
""" delete item from store. """
self.redis.delete(key)
def list(self, prefix=None):
""" return list of keys starting with prefix """
if prefix:
prefix += b'*'
return list(self.redis.scan_iter(match=prefix))
class DictStore:
""" Stores session data in a python dict. """
has_ttl = False
def __init__(self):
self.dict = {}
def get(self, key):
""" load item from store. """
return self.dict[key]
def put(self, key, value, ttl_secs=None):
""" save item to store. """
self.dict[key] = value
def delete(self, key):
""" delete item from store. """
try:
del self.dict[key]
except KeyError:
pass
def list(self, prefix=None):
""" return list of keys starting with prefix """
if prefix is None:
return list(self.dict.keys())
return [key for key in self.dict if key.startswith(prefix)]
class MailuSession(CallbackDict, SessionMixin):
""" Custom flask session storage. """
# default modified to false
modified = False
def __init__(self, key=None, app=None):
self.app = app or flask.current_app
initial = None
key = want_bytes(key)
if parsed := self.app.session_config.parse_key(key, self.app):
try:
initial = pickle.loads(app.session_store.get(key))
except (KeyError, EOFError, pickle.UnpicklingError):
# either the cookie was manipulated or we did not find the
# session in the backend or the pickled data is invalid.
# => start new session
pass
else:
(self._uid, self._sid, self._created) = parsed
self._key = key
if initial is None:
# start new session
self.new = True
self._uid = None
self._sid = None
self._created = self.app.session_config.gen_created()
self._key = None
def _on_update(obj):
obj.modified = True
CallbackDict.__init__(self, initial, _on_update)
@property
def saved(self):
""" this reflects if the session was saved. """
return self._key is not None
@property
def sid(self):
""" this reflects the session's id. """
if self._sid is None or self._uid is None or self._created is None:
return None
return b''.join([self._uid, self._sid, self._created])
def destroy(self):
""" destroy session for security reasons. """
self.delete()
self._uid = None
self._sid = None
self._created = None
self.clear()
self.modified = True
self.new = False
def regenerate(self):
""" generate new id for session to avoid `session fixation`. """
self.delete()
self._sid = None
self._created = self.app.session_config.gen_created()
self.modified = True
def delete(self):
""" Delete stored session. """
if self.saved:
self.app.session_store.delete(self._key)
self._key = None
def save(self):
""" Save session to store. """
set_cookie = False
# set uid from dict data
if self._uid is None:
self._uid = self.app.session_config.gen_uid(self.get('user_id', ''))
# create new session id for new or regenerated sessions and force setting the cookie
if self._sid is None:
self._sid = self.app.session_config.gen_sid()
set_cookie = True
# get new session key
key = self.sid
# delete old session if key has changed
if key != self._key:
self.delete()
# remember time to refresh
self['_refresh'] = int(time.time()) + self.app.permanent_session_lifetime.total_seconds()/2
# save session
self.app.session_store.put(
key,
pickle.dumps(dict(self)),
self.app.permanent_session_lifetime.total_seconds()
)
self._key = key
self.new = False
self.modified = False
return set_cookie
def needs_refresh(self):
""" Checks if server side session needs to be refreshed. """
return int(time.time()) > self.get('_refresh', 0)
class MailuSessionConfig:
""" Stores sessions crypto config """
# default size of session key parts
uid_bits = 64 # default if SESSION_KEY_BITS is not set in config
sid_bits = 128 # for now. must be multiple of 8!
time_bits = 32 # for now. must be multiple of 8!
def __init__(self, app=None):
if app is None:
app = flask.current_app
bits = app.config.get('SESSION_KEY_BITS', self.uid_bits)
if not 64 <= bits <= 256:
raise ValueError('SESSION_KEY_BITS must be between 64 and 256!')
uid_bytes = bits//8 + (bits%8>0)
sid_bytes = self.sid_bits//8
key = want_bytes(app.secret_key)
self._hmac = hmac.new(hmac.digest(key, b'SESSION_UID_HASH', digest='sha256'), digestmod='sha256')
self._uid_len = uid_bytes
self._uid_b64 = len(self._encode(bytes(uid_bytes)))
self._sid_len = sid_bytes
self._sid_b64 = len(self._encode(bytes(sid_bytes)))
self._key_min = self._uid_b64 + self._sid_b64
self._key_max = self._key_min + len(self._encode(bytes(self.time_bits//8)))
def gen_sid(self):
""" Generate random session id. """
return self._encode(secrets.token_bytes(self._sid_len))
def gen_uid(self, uid):
""" Generate hashed user id part of session key. """
_hmac = self._hmac.copy()
_hmac.update(want_bytes(uid))
return self._encode(_hmac.digest()[:self._uid_len])
def gen_created(self, now=None):
""" Generate base64 representation of creation time. """
return self._encode(int(now or time.time()).to_bytes(8, byteorder='big').lstrip(b'\0'))
def parse_key(self, key, app=None, validate=False, now=None):
""" Split key into sid, uid and creation time. """
if not (isinstance(key, bytes) and self._key_min <= len(key) <= self._key_max):
return None
uid = key[:self._uid_b64]
sid = key[self._uid_b64:self._key_min]
crt = key[self._key_min:]
# validate if parts are decodeable
created = self._decode(crt)
if created is None or self._decode(uid) is None or self._decode(sid) is None:
return None
# validate creation time when requested or store does not support ttl
if validate or not app.session_store.has_ttl:
if now is None:
now = int(time.time())
created = int.from_bytes(created, byteorder='big')
if not created < now < created + app.permanent_session_lifetime.total_seconds():
return None
return (uid, sid, crt)
def _encode(self, value):
return secrets.base64.urlsafe_b64encode(value).rstrip(b'=')
def _decode(self, value):
try:
return secrets.base64.urlsafe_b64decode(value + b'='*(4-len(value)%4))
except secrets.binascii.Error:
return None
class MailuSessionInterface(SessionInterface):
""" Custom flask session interface. """
def open_session(self, app, request):
""" Load or create session. """
return MailuSession(request.cookies.get(app.config['SESSION_COOKIE_NAME'], None), app)
def save_session(self, app, session, response):
""" Save modified session. """
# If the session is modified to be empty, remove the cookie.
# If the session is empty, return without setting the cookie.
if not session:
if session.modified:
session.delete()
response.delete_cookie(
app.session_cookie_name,
domain=self.get_cookie_domain(app),
path=self.get_cookie_path(app),
)
return
# Add a "Vary: Cookie" header if the session was accessed
if session.accessed:
response.vary.add('Cookie')
set_cookie = session.permanent and app.config['SESSION_REFRESH_EACH_REQUEST']
need_refresh = session.needs_refresh()
# save modified session or refresh unmodified session
if session.modified or need_refresh:
set_cookie |= session.save()
# set cookie on refreshed permanent sessions
if need_refresh and session.permanent:
set_cookie = True
# set or update cookie if necessary
if set_cookie:
response.set_cookie(
app.session_cookie_name,
session.sid,
expires=self.get_expiration_time(app, session),
httponly=self.get_cookie_httponly(app),
domain=self.get_cookie_domain(app),
path=self.get_cookie_path(app),
secure=self.get_cookie_secure(app),
samesite=self.get_cookie_samesite(app)
)
class MailuSessionExtension:
""" Server side session handling """
@staticmethod
def cleanup_sessions(app=None):
""" Remove invalid or expired sessions. """
app = app or flask.current_app
now = int(time.time())
count = 0
for key in app.session_store.list():
if not app.session_config.parse_key(key, app, validate=True, now=now):
app.session_store.delete(key)
count += 1
return count
@staticmethod
def prune_sessions(uid=None, keep=None, app=None):
""" Remove sessions
uid: remove all sessions (NONE) or sessions belonging to a specific user
keep: keep listed sessions
"""
keep = keep or set()
app = app or flask.current_app
prefix = None if uid is None else app.session_config.gen_uid(uid)
count = 0
for key in app.session_store.list(prefix):
if key not in keep:
app.session_store.delete(key)
count += 1
return count
def init_app(self, app):
""" Replace session management of application. """
if app.config.get('MEMORY_SESSIONS'):
# in-memory session store for use in development
app.session_store = DictStore()
else:
# redis-based session store for use in production
app.session_store = RedisStore(
redis.StrictRedis().from_url(app.config['SESSION_STORAGE_URL'])
)
# clean expired sessions oonce on first use in case lifetime was changed
def cleaner():
with cleaned.get_lock():
if not cleaned.value:
cleaned.value = True
flask.current_app.logger.error('cleaning')
MailuSessionExtension.cleanup_sessions(app)
app.before_first_request(cleaner)
app.session_config = MailuSessionConfig(app)
app.session_interface = MailuSessionInterface()
cleaned = Value('i', False)
session = MailuSessionExtension()
| 29.924406 | 108 | 0.605485 |
8adc2a9d885c68814ac38d0bcc885b9a092cc2bd | 46,762 | py | Python | django/db/models/base.py | Rao-Muneeb/django-nonrel | 875bb88f07178c591eef69cdf2482a3d595a093c | [
"BSD-3-Clause"
] | null | null | null | django/db/models/base.py | Rao-Muneeb/django-nonrel | 875bb88f07178c591eef69cdf2482a3d595a093c | [
"BSD-3-Clause"
] | null | null | null | django/db/models/base.py | Rao-Muneeb/django-nonrel | 875bb88f07178c591eef69cdf2482a3d595a093c | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import copy
import sys
from functools import update_wrapper
from django.utils.six.moves import zip
import django.db.models.manager # Imported to register signal handler.
from django.conf import settings
from django.core.exceptions import (ObjectDoesNotExist,
MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (ForeignObjectRel, ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db import (connections, router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute, deferred_class_factory
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.text import get_text_list, capfirst
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# six.with_metaclass() inserts an extra class called 'NewBase' in the
# inheritance tree: Model -> NewBase -> object. But the initialization
# should be executed only once for a given model class.
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase) and
not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_attrs = {'__module__': module}
classcell = attrs.pop('__classcell__', None)
if classcell is not None:
new_attrs['__classcell__'] = classcell
new_class = super_new(cls, name, bases, new_attrs)
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception(str('DoesNotExist'),
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,),
module, attached_to=new_class))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception(str('MultipleObjectsReturned'),
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,),
module, attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# Also used when connection.features.distinguishes_insert_from_update is false to identify
# when an instance has been newly created.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(self._meta.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.fields)
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
(isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
or field.column is None)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
self._original_pk = self._get_pk_val()
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
if not self._deferred:
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id, [], simple_class_factory), data
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
class_id = model._meta.app_label, model._meta.object_name
return (model_unpickle, (class_id, defers, deferred_class_factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if f.attname not in self.__dict__
and isinstance(self.__class__.__dict__[f.attname],
DeferredAttribute)]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
with transaction.commit_on_success_unless_managed(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
self._original_pk = self._get_pk_val(meta)
# Signal that the save is complete
if not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
# TODO/NONREL: Some backends could emulate force_insert/_update
# with an optimistic transaction, but since it's costly we should
# only do it when the user explicitly wants it.
# By adding support for an optimistic locking transaction
# in Django (SQL: SELECT ... FOR UPDATE) we could even make that
# part fully reusable on all backends (the current .exists()
# check below isn't really safe if you have lots of concurrent
# requests. BTW, and neither is QuerySet.get_or_create).
connection = connections[using]
try_update = connection.features.distinguishes_insert_from_update
entity_exists = bool(not self._state.adding and self._original_pk == pk_val)
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if try_update and pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = cls._base_manager.using(using).filter(
**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if not isinstance(f, AutoField)]
update_pk = bool(meta.has_auto_field and not pk_set)
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
if not try_update:
updated = entity_exists
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
filtered._update(values)
return True
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
self._state.adding = False
self._original_pk = None
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': six.text_type(capfirst(opts.get_field(field).verbose_name)),
'date_field': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field = opts.get_field(field_name)
field_label = capfirst(field.verbose_name)
# Insert the error into the error dict, very sneaky
return field.error_messages['unique'] % {
'model_name': six.text_type(model_name),
'field_label': six.text_type(field_label)
}
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
field_labels = get_text_list(field_labels, _('and'))
return _("%(model_name)s with this %(field_label)s already exists.") % {
'model_name': six.text_type(model_name),
'field_label': six.text_type(field_labels)
}
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.commit_on_success_unless_managed(using=using):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.model_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def simple_class_factory(model, attrs):
"""
Needed for dynamic classes.
"""
return model
def model_unpickle(model_id, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
model = get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
| 43.743686 | 166 | 0.601129 |
96b1672a84cf397bee7d47bf721602133931339b | 3,577 | py | Python | mxfold2/fold/mix.py | n-mikamo/mxfold2 | 8c195c77f824bdd5899d3d01d6a096de95cd0e9b | [
"MIT"
] | 46 | 2020-09-17T04:50:22.000Z | 2022-03-22T08:14:15.000Z | mxfold2/fold/mix.py | n-mikamo/mxfold2 | 8c195c77f824bdd5899d3d01d6a096de95cd0e9b | [
"MIT"
] | 7 | 2021-02-09T10:09:03.000Z | 2022-01-14T21:19:02.000Z | mxfold2/fold/mix.py | n-mikamo/mxfold2 | 8c195c77f824bdd5899d3d01d6a096de95cd0e9b | [
"MIT"
] | 20 | 2020-10-15T09:03:59.000Z | 2022-03-09T07:16:20.000Z | import torch
from .. import interface
from .fold import AbstractFold
from .rnafold import RNAFold
from .zuker import ZukerFold
class MixedFold(AbstractFold):
def __init__(self, init_param=None, model_type='M', max_helix_length=30, **kwargs):
super(MixedFold, self).__init__(interface.predict_mxfold, interface.partfunc_mxfold)
self.turner = RNAFold(init_param=init_param)
self.zuker = ZukerFold(model_type=model_type, max_helix_length=max_helix_length, **kwargs)
self.max_helix_length = max_helix_length
def forward(self, seq, return_param=False, param=None, return_partfunc=False,
max_internal_length=30, constraint=None, reference=None,
loss_pos_paired=0.0, loss_neg_paired=0.0, loss_pos_unpaired=0.0, loss_neg_unpaired=0.0):
param = self.make_param(seq) if param is None else param # reuse param or not
ss = []
preds = []
pairs = []
pfs = []
bpps = []
for i in range(len(seq)):
param_on_cpu = {
'turner': {k: v.to("cpu") for k, v in param[i]['turner'].items() },
'positional': {k: v.to("cpu") for k, v in param[i]['positional'].items() }
}
param_on_cpu = {k: self.clear_count(v) for k, v in param_on_cpu.items()}
with torch.no_grad():
v, pred, pair = interface.predict_mxfold(seq[i], param_on_cpu,
max_internal_length=max_internal_length if max_internal_length is not None else len(seq[i]),
max_helix_length=self.max_helix_length,
constraint=constraint[i].tolist() if constraint is not None else None,
reference=reference[i].tolist() if reference is not None else None,
loss_pos_paired=loss_pos_paired, loss_neg_paired=loss_neg_paired,
loss_pos_unpaired=loss_pos_unpaired, loss_neg_unpaired=loss_neg_unpaired)
if return_partfunc:
pf, bpp = interface.partfunc_mxfold(seq[i], param_on_cpu,
max_internal_length=max_internal_length if max_internal_length is not None else len(seq[i]),
max_helix_length=self.max_helix_length,
constraint=constraint[i].tolist() if constraint is not None else None,
reference=reference[i].tolist() if reference is not None else None,
loss_pos_paired=loss_pos_paired, loss_neg_paired=loss_neg_paired,
loss_pos_unpaired=loss_pos_unpaired, loss_neg_unpaired=loss_neg_unpaired)
pfs.append(pf)
bpps.append(bpp)
if torch.is_grad_enabled():
v = self.calculate_differentiable_score(v, param[i]['positional'], param_on_cpu['positional'])
ss.append(v)
preds.append(pred)
pairs.append(pair)
device = next(iter(param[0]['positional'].values())).device
ss = torch.stack(ss) if torch.is_grad_enabled() else torch.tensor(ss, device=device)
if return_param:
return ss, preds, pairs, param
elif return_partfunc:
return ss, preds, pairs, pfs, bpps
else:
return ss, preds, pairs
def make_param(self, seq):
ts = self.turner.make_param(seq)
ps = self.zuker.make_param(seq)
return [{'turner': t, 'positional': p} for t, p in zip(ts, ps)]
| 51.84058 | 124 | 0.600224 |
a2b26118af1b6bb84bacd8a6d43c7c863c284681 | 911 | py | Python | ooobuild/dyn/script/document_script_library_container.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/script/document_script_library_container.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/script/document_script_library_container.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.script
from ...lo.script.document_script_library_container import DocumentScriptLibraryContainer as DocumentScriptLibraryContainer
__all__ = ['DocumentScriptLibraryContainer']
| 35.038462 | 123 | 0.77607 |
3a2243a8d7d0a1143c54d1046ece5b0c1820011a | 3,362 | py | Python | diskcache/memo.py | allaudet/python-diskcache | 2774689c60bac3ebd06246943bca2014779ee2c6 | [
"Apache-2.0"
] | null | null | null | diskcache/memo.py | allaudet/python-diskcache | 2774689c60bac3ebd06246943bca2014779ee2c6 | [
"Apache-2.0"
] | null | null | null | diskcache/memo.py | allaudet/python-diskcache | 2774689c60bac3ebd06246943bca2014779ee2c6 | [
"Apache-2.0"
] | null | null | null | """Memoization utilities.
"""
from functools import wraps
from .core import ENOVAL
def memoize(cache, name=None, typed=False, expire=None, tag=None):
"""Memoizing cache decorator.
Decorator to wrap callable with memoizing function using cache. Repeated
calls with the same arguments will lookup result in cache and avoid
function evaluation.
If name is set to None (default), the callable name will be determined
automatically.
If typed is set to True, function arguments of different types will be
cached separately. For example, f(3) and f(3.0) will be treated as distinct
calls with distinct results.
The original underlying function is accessible through the __wrapped__
attribute. This is useful for introspection, for bypassing the cache, or
for rewrapping the function with a different cache.
>>> from diskcache import FanoutCache
>>> cache = FanoutCache('/tmp/diskcache/fanoutcache')
>>> @cache.memoize(typed=True, expire=1, tag='fib')
... def fibonacci(number):
... if number == 0:
... return 0
... elif number == 1:
... return 1
... else:
... return fibonacci(number - 1) + fibonacci(number - 2)
>>> print(sum(fibonacci(number=value) for value in range(100)))
573147844013817084100
Remember to call memoize when decorating a callable. If you forget, then a
TypeError will occur. Note the lack of parenthenses after memoize below:
>>> @cache.memoize
... def test():
... pass
Traceback (most recent call last):
...
TypeError: name cannot be callable
:param cache: cache to store callable arguments and return values
:param str name: name given for callable (default None, automatic)
:param bool typed: cache different types separately (default False)
:param float expire: seconds until arguments expire
(default None, no expiry)
:param str tag: text to associate with arguments (default None)
:return: callable decorator
"""
if callable(name):
raise TypeError('name cannot be callable')
def decorator(function):
"Decorator created by memoize call for callable."
if name is None:
try:
reference = function.__qualname__
except AttributeError:
reference = function.__name__
reference = function.__module__ + reference
else:
reference = name
reference = (reference,)
@wraps(function)
def wrapper(*args, **kwargs):
"Wrapper for callable to cache arguments and return values."
key = reference + args
if kwargs:
key += (ENOVAL,)
sorted_items = sorted(kwargs.items())
for item in sorted_items:
key += item
if typed:
key += tuple(type(arg) for arg in args)
if kwargs:
key += tuple(type(value) for _, value in sorted_items)
result = cache.get(key, default=ENOVAL, retry=True)
if result is ENOVAL:
result = function(*args, **kwargs)
cache.set(key, result, expire=expire, tag=tag, retry=True)
return result
return wrapper
return decorator
| 31.716981 | 79 | 0.620761 |
87d9ae38cc93180705bba7d94ad39daa2a32049b | 1,482 | py | Python | blogger/users/views.py | reallyusefulengine/django_blogger | b9f6444318e7b2a2643f25cd8974d11a8de6cfea | [
"MIT"
] | null | null | null | blogger/users/views.py | reallyusefulengine/django_blogger | b9f6444318e7b2a2643f25cd8974d11a8de6cfea | [
"MIT"
] | 7 | 2020-02-12T01:17:07.000Z | 2022-02-10T12:13:24.000Z | blogger/users/views.py | reallyusefulengine/django_blogger | b9f6444318e7b2a2643f25cd8974d11a8de6cfea | [
"MIT"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from . forms import UserRegistrationForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method=='POST':
form = UserRegistrationForm(request.POST)
if form.is_valid():
form.save()
username=form.cleaned_data.get('username')
messages.success(request, f'Your account has been created. You can now login')
return redirect('login')
else:
form = UserRegistrationForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method=='POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated.')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context= {
'u_form': u_form,
'p_form': p_form,
}
return render(request, 'users/profile.html', context)
| 35.285714 | 94 | 0.65857 |
9835e711858b1376a0815b79900875854fc9273a | 15,750 | py | Python | spanner_orm/tests/query_test.py | jtoss/python-spanner-orm | d06b2a594b79268de80cbd8332829f41f9361985 | [
"Apache-2.0"
] | 2 | 2020-07-12T00:43:08.000Z | 2021-01-30T03:03:32.000Z | spanner_orm/tests/query_test.py | maroux/python-spanner-orm | a42b39cb1a1aadd945c12563cc732aa96da28656 | [
"Apache-2.0"
] | 20 | 2020-07-20T21:55:52.000Z | 2021-02-03T21:53:00.000Z | spanner_orm/tests/query_test.py | maroux/python-spanner-orm | a42b39cb1a1aadd945c12563cc732aa96da28656 | [
"Apache-2.0"
] | 3 | 2020-07-20T21:13:45.000Z | 2021-01-29T18:41:20.000Z | # python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import unittest
from unittest import mock
from absl.testing import parameterized
from spanner_orm import condition
from spanner_orm import error
from spanner_orm import field
from spanner_orm import query
from spanner_orm.tests import models
from google.cloud.spanner_v1.proto import type_pb2
def now():
return datetime.datetime.now(tz=datetime.timezone.utc)
class QueryTest(parameterized.TestCase):
@mock.patch("spanner_orm.table_apis.sql_query")
def test_where(self, sql_query):
sql_query.return_value = []
models.UnittestModel.where_equal(True, int_=3)
(_, sql, parameters, types), _ = sql_query.call_args
expected_sql = "SELECT .* FROM table WHERE table.int_ = @int_0"
self.assertRegex(sql, expected_sql)
self.assertEqual(parameters, {"int_0": 3})
self.assertEqual(types, {"int_0": field.Integer.grpc_type()})
@mock.patch("spanner_orm.table_apis.sql_query")
def test_count(self, sql_query):
sql_query.return_value = [[0]]
column, value = "int_", 3
models.UnittestModel.count_equal(True, int_=3)
(_, sql, parameters, types), _ = sql_query.call_args
column_key = "{}0".format(column)
expected_sql = r"SELECT COUNT\(\*\) FROM table WHERE table.{} = @{}".format(
column, column_key
)
self.assertRegex(sql, expected_sql)
self.assertEqual({column_key: value}, parameters)
self.assertEqual(types, {column_key: field.Integer.grpc_type()})
def test_count_allows_force_index(self):
force_index = condition.force_index("test_index")
count_query = query.CountQuery(models.UnittestModel, [force_index])
sql = count_query.sql()
expected_sql = "SELECT COUNT(*) FROM table@{FORCE_INDEX=test_index}"
self.assertEqual(expected_sql, sql)
@parameterized.parameters(
condition.limit(1), condition.order_by(("int_", condition.OrderType.DESC))
)
def test_count_only_allows_where_and_from_segment_conditions(self, condition):
with self.assertRaises(error.SpannerError):
query.CountQuery(models.UnittestModel, [condition])
def select(self, *conditions):
return query.SelectQuery(models.UnittestModel, list(conditions))
def test_query_limit(self):
key, value = "limit0", 2
select_query = self.select(condition.limit(value))
self.assertEndsWith(select_query.sql(), " LIMIT @{}".format(key))
self.assertEqual(select_query.parameters(), {key: value})
self.assertEqual(select_query.types(), {key: field.Integer.grpc_type()})
select_query = self.select()
self.assertNotRegex(select_query.sql(), "LIMIT")
def test_query_limit_offset(self):
limit_key, limit = "limit0", 2
offset_key, offset = "offset0", 5
select_query = self.select(condition.limit(limit, offset=offset))
self.assertEndsWith(
select_query.sql(), " LIMIT @{} OFFSET @{}".format(limit_key, offset_key)
)
self.assertEqual(
select_query.parameters(), {limit_key: limit, offset_key: offset}
)
self.assertEqual(
select_query.types(),
{
limit_key: field.Integer.grpc_type(),
offset_key: field.Integer.grpc_type(),
},
)
def test_query_order_by(self):
order = ("int_", condition.OrderType.DESC)
select_query = self.select(condition.order_by(order))
self.assertEndsWith(select_query.sql(), " ORDER BY table.int_ DESC")
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
select_query = self.select()
self.assertNotRegex(select_query.sql(), "ORDER BY")
def test_query_order_by_with_object(self):
order = (models.UnittestModel.int_, condition.OrderType.DESC)
select_query = self.select(condition.order_by(order))
self.assertEndsWith(select_query.sql(), " ORDER BY table.int_ DESC")
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
select_query = self.select()
self.assertNotRegex(select_query.sql(), "ORDER BY")
def test_query_select_fields(self):
select_query = self.select(
condition.select_columns([models.UnittestModel.int_])
)
self.assertEqual(select_query.sql(), "SELECT table.int_ FROM table")
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
select_query2 = self.select(condition.select_columns(["int_"]))
self.assertEqual(select_query.sql(), select_query2.sql())
select_query = self.select()
self.assertRegex(select_query.sql(), "table.int_2")
@parameterized.parameters(
("int_", 5, field.Integer.grpc_type()),
("string", "foo", field.String.grpc_type()),
("timestamp", now(), field.Timestamp.grpc_type()),
)
def test_query_where_comparison(self, column, value, grpc_type):
condition_generators = [
condition.greater_than,
condition.not_less_than,
condition.less_than,
condition.not_greater_than,
condition.equal_to,
condition.not_equal_to,
]
for condition_generator in condition_generators:
current_condition = condition_generator(column, value)
select_query = self.select(current_condition)
column_key = "{}0".format(column)
expected_where = " WHERE table.{} {} @{}".format(
column, current_condition.operator, column_key
)
self.assertEndsWith(select_query.sql(), expected_where)
self.assertEqual(select_query.parameters(), {column_key: value})
self.assertEqual(select_query.types(), {column_key: grpc_type})
@parameterized.parameters(
(models.UnittestModel.int_, 5, field.Integer.grpc_type()),
(models.UnittestModel.string, "foo", field.String.grpc_type()),
(models.UnittestModel.timestamp, now(), field.Timestamp.grpc_type()),
)
def test_query_where_comparison_with_object(self, column, value, grpc_type):
condition_generators = [
condition.greater_than,
condition.not_less_than,
condition.less_than,
condition.not_greater_than,
condition.equal_to,
condition.not_equal_to,
]
for condition_generator in condition_generators:
current_condition = condition_generator(column, value)
select_query = self.select(current_condition)
column_key = "{}0".format(column.name)
expected_where = " WHERE table.{} {} @{}".format(
column.name, current_condition.operator, column_key
)
self.assertEndsWith(select_query.sql(), expected_where)
self.assertEqual(select_query.parameters(), {column_key: value})
self.assertEqual(select_query.types(), {column_key: grpc_type})
@parameterized.parameters(
("int_", [1, 2, 3], field.Integer.grpc_type()),
("string", ["a", "b", "c"], field.String.grpc_type()),
("timestamp", [now()], field.Timestamp.grpc_type()),
)
def test_query_where_list_comparison(self, column, values, grpc_type):
condition_generators = [condition.in_list, condition.not_in_list]
for condition_generator in condition_generators:
current_condition = condition_generator(column, values)
select_query = self.select(current_condition)
column_key = "{}0".format(column)
expected_sql = " WHERE table.{} {} UNNEST(@{})".format(
column, current_condition.operator, column_key
)
list_type = type_pb2.Type(code=type_pb2.ARRAY, array_element_type=grpc_type)
self.assertEndsWith(select_query.sql(), expected_sql)
self.assertEqual(select_query.parameters(), {column_key: values})
self.assertEqual(select_query.types(), {column_key: list_type})
def test_query_combines_properly(self):
select_query = self.select(
condition.equal_to("int_", 5),
condition.not_equal_to("string_array", ["foo", "bar"]),
condition.limit(2),
condition.order_by(("string", condition.OrderType.DESC)),
)
expected_sql = (
"WHERE table.int_ = @int_0 AND table.string_array != "
"@string_array1 ORDER BY table.string DESC LIMIT @limit2"
)
self.assertEndsWith(select_query.sql(), expected_sql)
def test_only_one_limit_allowed(self):
with self.assertRaises(error.SpannerError):
self.select(condition.limit(2), condition.limit(2))
def test_force_index(self):
select_query = self.select(condition.force_index("test_index"))
expected_sql = "FROM table@{FORCE_INDEX=test_index}"
self.assertEndsWith(select_query.sql(), expected_sql)
def test_force_index_with_object(self):
select_query = self.select(
condition.force_index(models.UnittestModel.test_index)
)
expected_sql = "FROM table@{FORCE_INDEX=test_index}"
self.assertEndsWith(select_query.sql(), expected_sql)
def includes(self, relation, *conditions):
include_condition = condition.includes(relation, list(conditions))
return query.SelectQuery(models.RelationshipTestModel, [include_condition])
def test_includes(self):
select_query = self.includes("parent")
# The column order varies between test runs
expected_sql = (
r"SELECT RelationshipTestModel\S* RelationshipTestModel\S* "
r"ARRAY\(SELECT AS STRUCT SmallTestModel\S* SmallTestModel\S* "
r"SmallTestModel\S* FROM SmallTestModel WHERE SmallTestModel.key = "
r"RelationshipTestModel.parent_key\)"
)
self.assertRegex(select_query.sql(), expected_sql)
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
def test_includes_with_object(self):
select_query = self.includes(models.RelationshipTestModel.parent)
# The column order varies between test runs
expected_sql = (
r"SELECT RelationshipTestModel\S* RelationshipTestModel\S* "
r"ARRAY\(SELECT AS STRUCT SmallTestModel\S* SmallTestModel\S* "
r"SmallTestModel\S* FROM SmallTestModel WHERE SmallTestModel.key = "
r"RelationshipTestModel.parent_key\)"
)
self.assertRegex(select_query.sql(), expected_sql)
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
def test_includes_subconditions_query(self):
select_query = self.includes("parents", condition.equal_to("key", "value"))
expected_sql = (
"WHERE SmallTestModel.key = RelationshipTestModel.parent_key "
"AND SmallTestModel.key = @key0"
)
self.assertRegex(select_query.sql(), expected_sql)
def includes_result(self, related=1):
child = {"parent_key": "parent_key", "child_key": "child"}
result = [child[name] for name in models.RelationshipTestModel.columns]
parent = {"key": "key", "value_1": "value_1", "value_2": None}
parents = []
for _ in range(related):
parents.append([parent[name] for name in models.SmallTestModel.columns])
result.append(parents)
return child, parent, [result]
def test_includes_single_related_object_result(self):
select_query = self.includes("parent")
child_values, parent_values, rows = self.includes_result(related=1)
result = select_query.process_results(rows)[0]
self.assertIsInstance(result.parent, models.SmallTestModel)
for name, value in child_values.items():
self.assertEqual(getattr(result, name), value)
for name, value in parent_values.items():
self.assertEqual(getattr(result.parent, name), value)
def test_includes_single_no_related_object_result(self):
select_query = self.includes("parent")
child_values, _, rows = self.includes_result(related=0)
result = select_query.process_results(rows)[0]
self.assertIsNone(result.parent)
for name, value in child_values.items():
self.assertEqual(getattr(result, name), value)
def test_includes_subcondition_result(self):
select_query = self.includes("parents", condition.equal_to("key", "value"))
child_values, parent_values, rows = self.includes_result(related=2)
result = select_query.process_results(rows)[0]
self.assertLen(result.parents, 2)
for name, value in child_values.items():
self.assertEqual(getattr(result, name), value)
for name, value in parent_values.items():
self.assertEqual(getattr(result.parents[0], name), value)
def test_includes_error_on_multiple_results_for_single(self):
select_query = self.includes("parent")
_, _, rows = self.includes_result(related=2)
with self.assertRaises(error.SpannerError):
_ = select_query.process_results(rows)
def test_includes_error_on_invalid_relation(self):
with self.assertRaises(error.ValidationError):
self.includes("bad_relation")
@parameterized.parameters(
("bad_column", 0), ("child_key", "good value"), ("key", ["bad value"])
)
def test_includes_error_on_invalid_subconditions(self, column, value):
with self.assertRaises(error.ValidationError):
self.includes("parent", condition.equal_to(column, value))
def test_or(self):
condition_1 = condition.equal_to("int_", 1)
condition_2 = condition.equal_to("int_", 2)
select_query = self.select(condition.or_([condition_1], [condition_2]))
expected_sql = "((table.int_ = @int_0) OR (table.int_ = @int_1))"
self.assertEndsWith(select_query.sql(), expected_sql)
self.assertEqual(select_query.parameters(), {"int_0": 1, "int_1": 2})
self.assertEqual(
select_query.types(),
{"int_0": field.Integer.grpc_type(), "int_1": field.Integer.grpc_type()},
)
def test_raw(self):
select_query = self.select(condition.raw_field("COS(float_)", "cosine"))
expected_sql = r"COS\(float_\) AS cosine"
self.assertRegex(select_query.sql(), expected_sql)
select_query = self.select(
condition.select_columns([models.UnittestModel.int_]),
condition.raw_field("CAST(MOD(int_, 2) AS BOOL)", "is_odd"),
)
self.assertEqual(
select_query.sql(),
"SELECT table.int_, CAST(MOD(int_, 2) AS BOOL) AS is_odd FROM table",
)
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
if __name__ == "__main__":
logging.basicConfig()
unittest.main()
| 41.122715 | 88 | 0.660635 |
24e4b67c65f904e0aed6a9d78b2f58d656aad33e | 2,700 | py | Python | oi/Contest/self/IOI-Test-Round/puzzle/data/checker.py | Riteme/test | b511d6616a25f4ae8c3861e2029789b8ee4dcb8d | [
"BSD-Source-Code"
] | 3 | 2018-08-30T09:43:20.000Z | 2019-12-03T04:53:43.000Z | oi/Contest/self/IOI-Test-Round/puzzle/data/checker.py | Riteme/test | b511d6616a25f4ae8c3861e2029789b8ee4dcb8d | [
"BSD-Source-Code"
] | null | null | null | oi/Contest/self/IOI-Test-Round/puzzle/data/checker.py | Riteme/test | b511d6616a25f4ae8c3861e2029789b8ee4dcb8d | [
"BSD-Source-Code"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2017 riteme
#
from sys import argv, version
from os.path import *
if version[0] == '3':
xrange = range
if len(argv) == 1 or "--help" in argv or "-h" in argv:
print("Participate answer checker & grader.")
print("Usage: %s [ID] [--no-limit] [--help/-h]" % argv[0])
print("\t--no-limit: Ignore the attempt limit (Set the limit to 1,000,000,000).")
print("\t--help / -h: Show this message.")
exit(0)
def ASSERT(expr, message):
if not expr:
print(message)
exit(-1)
idx = int(argv[1])
INPUT_FILE = "puzzle{}.in".format(idx)
OUTPUT_FILE = "puzzle{}.out".format(idx)
ASSERT(exists(INPUT_FILE), "'{}' not found.".format(INPUT_FILE))
ASSERT(exists(OUTPUT_FILE), "'{}' not found.".format(OUTPUT_FILE))
inp = open(INPUT_FILE)
out = open(OUTPUT_FILE)
T, n, m, LIMIT = map(int, inp.readline().split())
if "--no-limit" in argv:
LIMIT = 10**9
DOWN = 1
RIGHT = 2
def read_graph():
G = [[0] * (m + 1) for i in xrange(n + 1)]
x = 0
while x < n:
line = inp.readline().strip()
if len(line) == 0:
continue
x += 1
for y in xrange(1, m + 1):
if line[y - 1] == '$' and G[x - 1][y] != DOWN:
G[x][y] = DOWN
elif line[y - 1] == '#' and G[x][y - 1] != RIGHT:
G[x][y] = RIGHT
return G
last = read_graph()
def rotate(x, y, line):
if x < n and last[x][y] == RIGHT and last[x + 1][y] == RIGHT:
last[x][y] = last[x][y + 1] = DOWN
last[x + 1][y] = 0
elif y < m and last[x][y] == DOWN and last[x][y + 1] == DOWN:
last[x][y] = last[x + 1][y] = RIGHT
last[x][y + 1] = 0
else:
ASSERT(False, "Can't rotate at ({}, {}) (at line {}).".format(x, y, line))
score = line = 0
cnt = LIMIT
for i in xrange(1, T + 1):
cur = read_graph()
try:
k = int(out.readline())
line += 1
except:
ASSERT(False, "Can't read integer 'k' at gate {}.".format(i))
reported = False
for j in xrange(k):
if cnt <= 0:
print("No opportunities left.")
reported = True
break
cnt -= 1
try:
x, y = map(int, out.readline().split())
line += 1
except:
ASSERT(False, "Can't read integer 'x' and 'y' at gate {}.".format(i))
rotate(x, y, line)
if last != cur:
print("Can't open the gate {}.".format(i))
break
score = i
last = cur
if cnt <= 0:
if not reported:
print("No opportunities left after gate {}.".format(i))
break
print("Score: {}\nTried {} times.".format(score, LIMIT - cnt))
| 23.275862 | 85 | 0.510741 |
7ca29ef138bfacee2476cfcc44f9aa99263e8e23 | 6,430 | py | Python | mappyfile/validator.py | tomkralidis/mappyfile | 4313f9e52f5c54198988e62c3e2ebc9223f174ef | [
"MIT"
] | 1 | 2018-08-08T06:48:05.000Z | 2018-08-08T06:48:05.000Z | mappyfile/validator.py | tomkralidis/mappyfile | 4313f9e52f5c54198988e62c3e2ebc9223f174ef | [
"MIT"
] | null | null | null | mappyfile/validator.py | tomkralidis/mappyfile | 4313f9e52f5c54198988e62c3e2ebc9223f174ef | [
"MIT"
] | null | null | null | import json
import os
import sys
from collections import OrderedDict
import logging
import jsonschema
import jsonref
import mappyfile as utils
log = logging.getLogger("mappyfile")
PY2 = sys.version_info[0] < 3
if PY2:
str = unicode # NOQA
class Validator(object):
def __init__(self):
self.schemas = {}
self.expanded_schemas = {}
def get_schema_path(self, schemas_folder):
"""
Return a file protocol URI e.g. file:///D:/mappyfile/mappyfile/schemas/ on Windows
and file:////home/user/mappyfile/mappyfile/schemas/ on Linux
"""
# replace any Windows path back slashes with forward slashes
schemas_folder = schemas_folder.replace("\\", "/")
# HACK Python 2.7 on Linux seems to remove the root slash
# so add this back in
if schemas_folder.startswith("/"):
schemas_folder = "/" + schemas_folder
host = ""
root_schema_path = "file://{}/{}".format(host, schemas_folder) + "/"
return root_schema_path
def get_schemas_folder(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "schemas")
def get_schema_file(self, schema_name):
schema_name += ".json"
schemas_folder = self.get_schemas_folder()
schema_file = os.path.join(schemas_folder, schema_name)
if not os.path.isfile(schema_file):
raise IOError("The file %s does not exist" % schema_file)
return schema_file
def get_schema_validator(self, schema_name):
"""
Had to remove the id property from map.json or it uses URLs for validation
See various issues at https://github.com/Julian/jsonschema/pull/306
"""
if schema_name not in self.schemas:
schema_file = self.get_schema_file(schema_name)
with open(schema_file) as f:
try:
jsn_schema = json.load(f)
except ValueError as ex:
log.error("Could not load %s", schema_file)
raise ex
schemas_folder = self.get_schemas_folder()
root_schema_path = self.get_schema_path(schemas_folder)
resolver = jsonschema.RefResolver(root_schema_path, None)
# cache the schema for future use
self.schemas[schema_name] = (jsn_schema, resolver)
else:
jsn_schema, resolver = self.schemas[schema_name]
validator = jsonschema.Draft4Validator(schema=jsn_schema, resolver=resolver)
# validator.check_schema(jsn_schema) # check schema is valid
return validator
def convert_lowercase(self, x):
if isinstance(x, list):
return [self.convert_lowercase(v) for v in x]
elif isinstance(x, dict):
return OrderedDict((k.lower(), self.convert_lowercase(v)) for k, v in x.items())
else:
if isinstance(x, (str, bytes)):
x = x.lower()
return x
def create_message(self, rootdict, path, error, add_comments):
"""
Add a validation comment to the dictionary
path is the path to the error object, it can be empty if the error is in the root object
http://python-jsonschema.readthedocs.io/en/latest/errors/#jsonschema.exceptions.ValidationError.absolute_path
It can also reference an object in a list e.g. [u'layers', 0]
"""
if not path:
# error applies to the root type
d = rootdict
key = d["__type__"]
elif isinstance(path[-1], int):
# the error is on an object in a list
d = utils.findkey(rootdict, *path)
key = d["__type__"]
else:
key = path[-1]
d = utils.findkey(rootdict, *path[:-1])
error_message = "ERROR: Invalid value for {}".format(key.upper())
# add a comment to the dict structure
if add_comments:
if "__comments__" not in d:
d["__comments__"] = OrderedDict()
d["__comments__"][key] = "# {}".format(error_message)
error_message = {"error": error.message,
"message": error_message}
# add in details of the error line, when Mapfile was parsed to
# include position details
if "__position__" in d:
pd = d["__position__"][key]
error_message["line"] = pd.get("line")
error_message["column"] = pd.get("column")
return error_message
def get_error_messages(self, d, errors, add_comments):
error_messages = []
for error in errors:
pth = error.absolute_path
pth = list(pth) # convert deque to list
em = self.create_message(d, pth, error, add_comments)
error_messages.append(em)
return error_messages
def _validate(self, d, validator, add_comments, schema_name):
lowercase_dict = self.convert_lowercase(d)
jsn = json.loads(json.dumps(lowercase_dict), object_pairs_hook=OrderedDict)
errors = list(validator.iter_errors(jsn))
error_messages = self.get_error_messages(d, errors, add_comments)
return error_messages
def validate(self, value, add_comments=False, schema_name="map"):
"""
verbose - also return the jsonschema error details
"""
validator = self.get_schema_validator(schema_name)
error_messages = []
if isinstance(value, list):
for d in value:
error_messages += self._validate(d, validator, add_comments, schema_name)
else:
error_messages = self._validate(value, validator, add_comments, schema_name)
return error_messages
def get_expanded_schema(self, schema_name):
"""
Return a schema file with all $ref properties expanded
"""
if schema_name not in self.expanded_schemas:
fn = self.get_schema_file(schema_name)
schemas_folder = self.get_schemas_folder()
base_uri = self.get_schema_path(schemas_folder)
with open(fn) as f:
jsn_schema = jsonref.load(f, base_uri=base_uri)
# cache the schema for future use
self.expanded_schemas[schema_name] = jsn_schema
else:
jsn_schema = self.expanded_schemas[schema_name]
return jsn_schema
| 33.14433 | 117 | 0.611975 |
569507451c36635dbb108eecc62b18de68082398 | 385 | py | Python | day-10/python/part2.py | kayew/aoc-2020 | 55ea804c983aef4d3a7e159403247ec23a47aa10 | [
"MIT"
] | null | null | null | day-10/python/part2.py | kayew/aoc-2020 | 55ea804c983aef4d3a7e159403247ec23a47aa10 | [
"MIT"
] | null | null | null | day-10/python/part2.py | kayew/aoc-2020 | 55ea804c983aef4d3a7e159403247ec23a47aa10 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from collections import defaultdict
adapter_list = sorted([0] + [int(x.strip()) for x in open(sys.argv[1], "r").readlines()])
highest = adapter_list[-1] + 3
adapter_list.append(highest)
combos = defaultdict(lambda: 0)
combos[0] = 1
for x in adapter_list:
combos[x] += combos[x - 1] + combos[x - 2] + combos[x - 3]
print(combos[max(combos)])
| 21.388889 | 89 | 0.675325 |
93320a72708991d2e7b088eda08192301a1f4c19 | 1,258 | py | Python | merfi/tests/test_util.py | alfredodeza/merfi | 899ca4cea60a335c6574a00696fd552b4460db28 | [
"MIT"
] | 2 | 2017-12-02T23:54:45.000Z | 2021-04-29T15:45:13.000Z | merfi/tests/test_util.py | alfredodeza/merfi | 899ca4cea60a335c6574a00696fd552b4460db28 | [
"MIT"
] | 35 | 2015-09-18T00:30:42.000Z | 2019-01-24T22:46:12.000Z | merfi/tests/test_util.py | alfredodeza/merfi | 899ca4cea60a335c6574a00696fd552b4460db28 | [
"MIT"
] | 2 | 2015-09-16T20:16:12.000Z | 2017-12-02T23:54:16.000Z | from pytest import raises
from merfi import util
class TestInferPath(object):
def test_no_last_argument(self):
args = ['gpg', '--output', 'signed']
with raises(RuntimeError) as error:
util.infer_path(args)
assert 'is not a valid path' in str(error.value)
def test_last_argument(self):
args = ['gpg', '--output', 'signed', '/']
result = util.infer_path(args)
assert result == '/'
def test_no_arguments(self):
# the parser engine pops the current command so we can
# certainly end up with an empty argv list
result = util.infer_path([])
assert result.startswith('/')
class TestDependencyCheck(object):
def test_silent_does_not_raise(self):
result = util.check_dependency('ls', silent=True)
assert result is None
def test_silent_does_not_output(self, capsys):
util.check_dependency('ls', silent=True)
out, err = capsys.readouterr()
assert out == ''
assert err == ''
def test_not_silent_prints_when_erroring(self, capsys):
with raises(RuntimeError):
util.check_dependency('ffffffffffffff')
out, err = capsys.readouterr()
assert 'could not find' in out
| 29.952381 | 62 | 0.63434 |
a8adcfc23d39e8953a157abec8786d4b13bb7c66 | 3,524 | py | Python | ironic/api/controllers/v1/volume.py | armohamm/ironic | 21093ca886ed736a7a25bf5e71e05d41e132fd2f | [
"Apache-2.0"
] | 2 | 2019-06-17T21:37:53.000Z | 2020-07-11T03:58:39.000Z | ironic/api/controllers/v1/volume.py | armohamm/ironic | 21093ca886ed736a7a25bf5e71e05d41e132fd2f | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | ironic/api/controllers/v1/volume.py | armohamm/ironic | 21093ca886ed736a7a25bf5e71e05d41e132fd2f | [
"Apache-2.0"
] | 6 | 2019-06-13T12:49:33.000Z | 2021-04-17T16:33:19.000Z | # Copyright (c) 2017 Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
from six.moves import http_client
import wsme
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api.controllers.v1 import volume_connector
from ironic.api.controllers.v1 import volume_target
from ironic.api import expose
from ironic.common import exception
from ironic.common import policy
class Volume(base.APIBase):
"""API representation of a volume root.
This class exists as a root class for the volume connectors and volume
targets controllers.
"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated volume links"""
connectors = wsme.wsattr([link.Link], readonly=True)
"""Links to the volume connectors resource"""
targets = wsme.wsattr([link.Link], readonly=True)
"""Links to the volume targets resource"""
@staticmethod
def convert(node_ident=None):
url = pecan.request.public_url
volume = Volume()
if node_ident:
resource = 'nodes'
args = '%s/volume/' % node_ident
else:
resource = 'volume'
args = ''
volume.links = [
link.Link.make_link('self', url, resource, args),
link.Link.make_link('bookmark', url, resource, args,
bookmark=True)]
volume.connectors = [
link.Link.make_link('self', url, resource, args + 'connectors'),
link.Link.make_link('bookmark', url, resource, args + 'connectors',
bookmark=True)]
volume.targets = [
link.Link.make_link('self', url, resource, args + 'targets'),
link.Link.make_link('bookmark', url, resource, args + 'targets',
bookmark=True)]
return volume
class VolumeController(rest.RestController):
"""REST controller for volume root"""
_subcontroller_map = {
'connectors': volume_connector.VolumeConnectorsController,
'targets': volume_target.VolumeTargetsController
}
def __init__(self, node_ident=None):
super(VolumeController, self).__init__()
self.parent_node_ident = node_ident
@expose.expose(Volume)
def get(self):
if not api_utils.allow_volume():
raise exception.NotFound()
cdict = pecan.request.context.to_policy_values()
policy.authorize('baremetal:volume:get', cdict, cdict)
return Volume.convert(self.parent_node_ident)
@pecan.expose()
def _lookup(self, subres, *remainder):
if not api_utils.allow_volume():
pecan.abort(http_client.NOT_FOUND)
subcontroller = self._subcontroller_map.get(subres)
if subcontroller:
return subcontroller(node_ident=self.parent_node_ident), remainder
| 33.884615 | 79 | 0.661464 |
6f2f016788262e42295074a1229da84c64492b12 | 1,092 | py | Python | python_exercises_folder/Exercises/cnpj.py | Kaique425/python_exercises | c203384d372d9ef7193300d6ce35857879d91839 | [
"MIT"
] | null | null | null | python_exercises_folder/Exercises/cnpj.py | Kaique425/python_exercises | c203384d372d9ef7193300d6ce35857879d91839 | [
"MIT"
] | null | null | null | python_exercises_folder/Exercises/cnpj.py | Kaique425/python_exercises | c203384d372d9ef7193300d6ce35857879d91839 | [
"MIT"
] | null | null | null | import re
regressives_numbers = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
def validate_cnpj(cnpj):
cnpj = clean_cnpj(cnpj)
if is_sequence(cnpj):
return False
try:
new_cnpj = calculate_digit(cnpj=cnpj, cnpj_digit=1)
new_cnpj = calculate_digit(cnpj=cnpj, cnpj_digit=2)
if new_cnpj and new_cnpj == cnpj:
return True
except:
return False
def clean_cnpj(cnpj):
cnpj = re.sub(r'[^\w\s]', '', cnpj)
return cnpj
def calculate_digit(cnpj, cnpj_digit):
if cnpj_digit == 1:
regressives = regressives_numbers[1:]
cnpj = cnpj[:-2]
elif cnpj_digit == 2:
regressives = regressives_numbers
cnpj = cnpj[:-1]
else:
regressives = []
total = sum([int(cnpj[index]) * regress for index,
regress in enumerate(regressives)])
digit = 11 - (total % 11)
digit = digit if digit <= 9 else 0
return f'{cnpj}{digit}'
def is_sequence(cnpj):
sequence = cnpj[0] * len(cnpj)
if sequence == cnpj:
return True
else:
return False
| 21.84 | 61 | 0.582418 |
2fdb1776715872660cb429417ee01b2205a6ea7a | 11,802 | py | Python | ASR2.py | mn270/Recognition-phenomena-TIMIT- | a549e6e9b813555bde5d6f2803e4bb5f7dc1ea52 | [
"MIT"
] | null | null | null | ASR2.py | mn270/Recognition-phenomena-TIMIT- | a549e6e9b813555bde5d6f2803e4bb5f7dc1ea52 | [
"MIT"
] | null | null | null | ASR2.py | mn270/Recognition-phenomena-TIMIT- | a549e6e9b813555bde5d6f2803e4bb5f7dc1ea52 | [
"MIT"
] | null | null | null | import python_speech_features as psf
import scipy.io.wavfile as sciwav
import os
import glob
import numpy as np
import numpy
import scipy.io.wavfile
from scipy.fftpack import dct
from sklearn.preprocessing import StandardScaler
import random
import tensorflow as tf
import tensorflow.python.keras.backend as K
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.activations import relu, sigmoid, softmax
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.layers import BatchNormalization
from tensorflow.python.keras.layers import Input
from tensorflow.python.keras.layers import Conv2D
from tensorflow.python.keras.layers import MaxPool2D
from tensorflow.python.keras.layers import AveragePooling2D
from tensorflow.python.keras.layers import Bidirectional
from tensorflow.python.keras.layers import Lambda
from tensorflow.python.keras.layers import Flatten
from tensorflow.python.keras.layers import Reshape
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras import regularizers
# ensuring repeatability of results
random.seed(1)
np.random.seed(1)
tf.random.set_seed(1)
PATH = "/home/marcin/Pobrane/TIMIT"
class TimitDatase():
phonemes = ['h#', 'sh', 'ix', 'hv', 'eh', 'dcl', 'jh', 'ih', 'd', 'ah',
'kcl', 'k', 's', 'ux', 'q', 'en', 'gcl', 'g', 'r', 'w',
'ao', 'epi', 'dx', 'axr', 'l', 'y', 'uh', 'n', 'ae', 'm',
'oy', 'ax', 'dh', 'tcl', 'iy', 'v', 'f', 't', 'pcl', 'ow',
'hh', 'ch', 'bcl', 'b', 'aa', 'em', 'ng', 'ay', 'th', 'ax-h',
'ey', 'p', 'aw', 'er', 'nx', 'z', 'el', 'uw', 'pau', 'zh',
'eng', 'BLANK']
def __init__(self, timit_root):
self.max_label_len = 0
# load the dataset
training_root = os.path.join(timit_root, 'TRAIN')
test_root = os.path.join(timit_root, 'TEST')
self.ph_org_train, self.train_input_length, self.train_label_length, self.x_train, self.y_train = self.load_split_timit_data(
training_root)
self.ph_org_test, self.test_input_length, self.test_label_length, self.x_test, self.y_test = self.load_split_timit_data(
test_root)
self.normalize_xs()
self.train_padded_ph = pad_sequences(self.y_train, maxlen=self.max_label_len, padding='post',
value=len(self.phonemes))
self.test_padded_ph = pad_sequences(self.y_test, maxlen=self.max_label_len, padding='post',
value=len(self.phonemes))
def num_classes(self):
return len(self.phonemes)
def normalize_xs(self):
"""
Standarization 2D data
"""
cut = int(self.x_train.shape[1] / 2)
longX = self.x_train[:, -cut:, :]
# flatten windows
longX = longX.reshape((longX.shape[0] * longX.shape[1], longX.shape[2]))
# flatten train and test
flatTrainX = self.x_train.reshape((self.x_train.shape[0] * self.x_train.shape[1], self.x_train.shape[2]))
flatTestX = self.x_test.reshape((self.x_test.shape[0] * self.x_test.shape[1], self.x_test.shape[2]))
# standardize
s = StandardScaler()
# fit on training data
s.fit(longX)
print("MEAN:")
print(s.mean_)
print("------------------------------------------")
print("VAR:")
print(s.var_)
print("------------------------------------------")
print("STD:")
print(s.scale_)
print(s.get_params(True))
# apply to training and test data
longX = s.transform(longX)
flatTrainX = s.transform(flatTrainX)
flatTestX = s.transform(flatTestX)
# reshape
self.x_train = flatTrainX.reshape((self.x_train.shape))
self.x_test = flatTestX.reshape((self.x_test.shape))
def filter_banks(self, signal, sample_rate):
"""
Preprocessing data. If you would like to use MFCC, you should add DCT
"""
pre_emphasis = 0.97
frame_size = 0.025
frame_stride = 0.01
NFFT = 512
nfilt = 40
num_cep = 500
emphasized_signal = np.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])
frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples
signal_length = len(emphasized_signal)
frame_length = int(round(frame_length))
frame_step = int(round(frame_step))
num_frames = int(numpy.ceil(float(numpy.abs(signal_length - frame_length)) / frame_step))
pad_signal_length = num_frames * frame_step + frame_length
z = numpy.zeros((pad_signal_length - signal_length))
pad_signal = numpy.append(emphasized_signal, z) # Pad Signal
indices = numpy.tile(numpy.arange(0, frame_length), (num_frames, 1)) + numpy.tile(
numpy.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T
frames = pad_signal[indices.astype(numpy.int32, copy=False)]
frames *= numpy.hamming(frame_length)
mag_frames = numpy.absolute(numpy.fft.rfft(frames, NFFT)) # Magnitude of the FFT
pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2)) # Power Spectrum
low_freq_mel = 0
high_freq_mel = (2595 * numpy.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel
mel_points = numpy.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale
hz_points = (700 * (10 ** (mel_points / 2595) - 1)) # Convert Mel to Hz
bin = numpy.floor((NFFT + 1) * hz_points / sample_rate)
fbank = numpy.zeros((nfilt, int(numpy.floor(NFFT / 2 + 1))))
for m in range(1, nfilt + 1):
f_m_minus = int(bin[m - 1]) # left
f_m = int(bin[m]) # center
f_m_plus = int(bin[m + 1]) # right
for k in range(f_m_minus, f_m):
fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])
for k in range(f_m, f_m_plus):
fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])
filter_banks = numpy.dot(pow_frames, fbank.T)
filter_banks = numpy.where(filter_banks == 0, numpy.finfo(float).eps, filter_banks) # Numerical Stability
filter_banks = 20 * numpy.log10(filter_banks) # dB
filter_banks -= (numpy.mean(filter_banks, axis=0) + 1e-8)
padding = np.zeros((num_cep, nfilt))
padding[:filter_banks.shape[0], :filter_banks.shape[1]] = filter_banks[:num_cep, :]
return padding
def load_split_timit_data(self, root_dir):
"""
Load and prepare TIMIT data to use ASR
"""
wav_glob = os.path.join(root_dir, '**/SA1.wav') # SA1 -> * (to read all DATA)
x_list = []
y_list = []
ph_list = []
label_length = []
input_length = []
for wav_filename in glob.glob(wav_glob, recursive=True):
y_list_local = []
ph_list_local = []
sample_rate, wav = sciwav.read(wav_filename)
filtered_data = self.filter_banks(wav, sample_rate)
x_list.append(filtered_data)
# parse the text file with phonemes
phn_filename = wav_filename[:-3] + 'PHN' # fragile, i know
with open(phn_filename) as f:
lines = f.readlines()
phonemes = [line.split() for line in lines]
for l, r, ph in phonemes:
if len(x_list) % 100 == 0:
print('Added {} pairs.'.format(len(x_list)))
phonem_idx = self.phonemes.index(ph)
ph_list_local.append(ph)
y_list_local.append(phonem_idx)
y_list.append(y_list_local)
ph_list.append((ph_list_local))
label_length.append(len(y_list_local))
input_length.append(100)
if len(y_list_local) > self.max_label_len:
self.max_label_len = len(y_list_local)
x = np.array(x_list)
y = np.array(y_list)
ph_org_train = np.array(ph_list)
train_input_length = np.array(input_length)
train_label_length = np.array(label_length)
return ph_org_train, train_input_length, train_label_length, x, y
class ASR():
def __init__(self):
"""
Create the model
"""
inputs = Input(shape=(500, 40, 1))
conv_1 = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
pool_1 = AveragePooling2D(pool_size=(2, 2))(conv_1)
conv_2 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool_1)
pool_2 = AveragePooling2D(pool_size=(2, 2))(conv_2)
conv_3 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool_2)
batch_norm_3 = BatchNormalization()(conv_3)
pool_3 = AveragePooling2D(pool_size=(1, 2))(batch_norm_3)
conv_4 = Conv2D(256, (2, 2), activation='relu', padding='same')(pool_3)
batch_norm_4 = BatchNormalization()(conv_4)
pool_4 = AveragePooling2D(pool_size=(1, 5))(batch_norm_4)
lamb = Lambda(lambda x: K.squeeze(x, 2))(pool_4)
blstm_1 = Bidirectional(LSTM(128, return_sequences=True, dropout=0.5))(lamb)
blstm_2 = Bidirectional(LSTM(128, return_sequences=True, dropout=0.5))(blstm_1)
outputs = Dense(62, activation='softmax')(blstm_2)
# model to be used at test time
self.act_model = Model(inputs, outputs)
self.act_model.summary()
labels = Input(name='the_labels', shape=[data.max_label_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(self.ctc_lambda_func, output_shape=(1,), name='ctc')(
[outputs, labels, input_length, label_length])
# model to be used at training time
self.model = Model(inputs=[inputs, labels, input_length, label_length], outputs=loss_out)
def ctc_lambda_func(args, x):
"""
Create cost function (CTC)
"""
y_pred, labels, input_length, label_length = x
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
if __name__ == "__main__":
"""
Main function
"""
data = TimitDatase(PATH)
asr = ASR()
asr.model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer='adam')
filepath = "best_model.hdf5"
checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
callbacks_list = [checkpoint]
batch_size = 64
epochs = 100
asr.model.fit(x=[data.x_train, data.train_padded_ph, data.train_input_length, data.train_label_length],
y=np.zeros(len(data.x_train)), batch_size=batch_size, validation_split=0.1, epochs=epochs, verbose=1,
callbacks=callbacks_list, shuffle=True)
# load the saved best model weights
asr.act_model.load_weights('best_model.hdf5')
# predict outputs on validation images
prediction = asr.act_model.predict(data.x_test[:10])
# use CTC decoder
out = K.get_value(K.ctc_decode(prediction, input_length=np.ones(prediction.shape[0]) * prediction.shape[1],
greedy=True)[0][0])
# see the results
i = 0
for x in out:
print("original_text = ", data.ph_org_test[i])
print("predicted text = ", end='')
for p in x:
if int(p) != -1:
print("'" + data.phonemes[int(p)] + "', ", end='')
print('\n')
i += 1
| 42.15 | 133 | 0.611845 |
95cfad0340085f5af167901584fc5e6120b08369 | 1,366 | py | Python | LineAlpha/LineApi/LineTracer.py | AnzPo/Des12 | 844df70cd8ab10fcb8108371b8d7e50b166810d0 | [
"Apache-2.0"
] | null | null | null | LineAlpha/LineApi/LineTracer.py | AnzPo/Des12 | 844df70cd8ab10fcb8108371b8d7e50b166810d0 | [
"Apache-2.0"
] | null | null | null | LineAlpha/LineApi/LineTracer.py | AnzPo/Des12 | 844df70cd8ab10fcb8108371b8d7e50b166810d0 | [
"Apache-2.0"
] | 1 | 2019-06-02T15:35:25.000Z | 2019-06-02T15:35:25.000Z | # -*- coding: utf-8 -*-
from .LineClient import LineClient
from types import *
from ..LineThrift.ttypes import OpType
from .LineServer import url
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class LineTracer(object):
OpInterrupt = {}
client = None
def __init__(self, client):
if type(client) is not LineClient:
raise Exception(
"You need to set LineClient instance to initialize LineTracer")
self.client = client
self.client.endPoint(url.LINE_POLL_QUERY_PATH_FIR)
def addOpInterruptWithDict(self, OpInterruptDict):
"""To add Operation with Callback function {Optype.NOTIFIED_INTO_GROUP: func}"""
self.OpInterrupt.update(OpInterruptDict)
def addOpInterrupt(self, OperationType, DisposeFunc):
self.OpInterrupt[OperationType] = DisposeFunc
def execute(self):
try:
operations = self.client.fetchOperation(self.client.revision, 1)
except EOFError:
return
except KeyboardInterrupt:
exit()
except:
return
for op in operations:
if op.type in self.OpInterrupt.keys():
self.OpInterrupt[op.type](op)
self.client.revision = max(op.revision, self.client.revision)
| 29.695652 | 89 | 0.628111 |
33bc24d79d26b766bea32bc4d06086f2d084543e | 275 | py | Python | tests/artificial/transf_Difference/trend_ConstantTrend/cycle_12/ar_12/test_artificial_32_Difference_ConstantTrend_12_12_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/artificial/transf_Difference/trend_ConstantTrend/cycle_12/ar_12/test_artificial_32_Difference_ConstantTrend_12_12_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/artificial/transf_Difference/trend_ConstantTrend/cycle_12/ar_12/test_artificial_32_Difference_ConstantTrend_12_12_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12); | 39.285714 | 170 | 0.738182 |
45f3ee3eaf6c8bd8d60f9f4c18b74561db4382d7 | 2,200 | py | Python | tensorflow_probability/python/experimental/auto_batching/tfp_xla_test_case.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/experimental/auto_batching/tfp_xla_test_case.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/experimental/auto_batching/tfp_xla_test_case.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A TestCase wrapper for TF Probability, inspired in part by XLATestCase."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
from absl import flags
import tensorflow as tf
from tensorflow_probability.python.experimental.auto_batching import xla
from tensorflow.python.ops import control_flow_util # pylint: disable=g-direct-tensorflow-import
flags.DEFINE_string('test_device', None,
'TensorFlow device on which to place operators under test')
flags.DEFINE_string('tf_xla_flags', None,
'Value to set the TF_XLA_FLAGS environment variable to')
FLAGS = flags.FLAGS
class TFPXLATestCase(tf.test.TestCase):
"""TFP+XLA test harness."""
def __init__(self, method_name='runTest'):
super(TFPXLATestCase, self).__init__(method_name)
self.device = FLAGS.test_device
if FLAGS.tf_xla_flags is not None:
os.environ['TF_XLA_FLAGS'] = FLAGS.tf_xla_flags
def setUp(self):
self._orig_cfv2 = control_flow_util.ENABLE_CONTROL_FLOW_V2
# We require control flow v2 for XLA CPU.
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
super(TFPXLATestCase, self).setUp()
def tearDown(self):
control_flow_util.ENABLE_CONTROL_FLOW_V2 = self._orig_cfv2
super(TFPXLATestCase, self).tearDown()
def wrap_fn(self, f):
return xla.compile_nested_output(
f, (tf.compat.v1.tpu.rewrite if 'TPU' in self.device
else tf.xla.experimental.compile))
| 37.288136 | 97 | 0.726364 |
8558cc12c1aa5ff3a51d33dce9ca63ca239f58db | 2,168 | py | Python | __init__.py | ThakeeNathees/localhost | 098a9eab05d5645b32e5709e315f49830b4e978d | [
"MIT"
] | 1 | 2021-05-23T21:37:13.000Z | 2021-05-23T21:37:13.000Z | __init__.py | ThakeeNathees/localhost | 098a9eab05d5645b32e5709e315f49830b4e978d | [
"MIT"
] | null | null | null | __init__.py | ThakeeNathees/localhost | 098a9eab05d5645b32e5709e315f49830b4e978d | [
"MIT"
] | 1 | 2021-04-29T06:03:50.000Z | 2021-04-29T06:03:50.000Z | import sys, os
from http.server import HTTPServer
from .handler import (
Handler, _home_page_handler,
)
from .response import (
HttpResponse, JsonResponse, Http404,
render, _get_404_context, redirect, media_response,
_static_handler
)
from . import auth
from .auth import admin_page
from .urls import Path
from . import utils
try:
from server_data import settings
except ImportError:
raise Exception('did you initialize with "python localhost init [path]" ?')
class Server:
def __init__(self, port=8000, server_class=HTTPServer):
utils._type_check(
(port, int)
)
self.port = port
self.urlpatterns = []
self._handler_class = Handler
self._handler_class.localserver = self
self._server_class = server_class
def add_default_paths(self): ## urls must ends with /, path is file_path
if len(self.urlpatterns) == 0 and settings.DEBUG:
self.urlpatterns.append( Path('', _home_page_handler ) )
self.urlpatterns += [
Path(settings.STATIC_URL, _static_handler),
]
## admin pages
USE_ADMIN_PAGE = settings.__dict__['USE_ADMIN_PAGE'] if hasattr(settings, 'USE_ADMIN_PAGE') else True
if USE_ADMIN_PAGE:
ADMIN_URL = settings.__dict__['ADMIN_ULR'] if hasattr(settings, 'ADMIN_URL') else '/admin/'
self.urlpatterns += [
Path(ADMIN_URL, admin_page._handle_admin_home_page, name='admin-home'),
Path(ADMIN_URL+'login/', admin_page._handle_admin_login_page, name='admin-login'),
Path(ADMIN_URL+'logout/', admin_page._handle_admin_logout_page, name='admin-logout'),
Path(ADMIN_URL+'<app_name>/<table_name>', admin_page._handle_admin_table_page),
]
def run(self):
self.add_default_paths()
server_address = ('', self.port)
httpd = self._server_class(server_address, self._handler_class)
print('running server at http://localhost:%s/'%self.port)
httpd.serve_forever()
| 33.875 | 110 | 0.630535 |
6232726565431ff968e0b5f9cc941a888cb9075f | 448 | py | Python | mezzanine/galleries/migrations/0002_auto_20141227_0224.py | interrogator/mezzanine | 5011534749a2319891c35457dda61152407a6dde | [
"BSD-2-Clause"
] | null | null | null | mezzanine/galleries/migrations/0002_auto_20141227_0224.py | interrogator/mezzanine | 5011534749a2319891c35457dda61152407a6dde | [
"BSD-2-Clause"
] | null | null | null | mezzanine/galleries/migrations/0002_auto_20141227_0224.py | interrogator/mezzanine | 5011534749a2319891c35457dda61152407a6dde | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import migrations
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('galleries', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='galleryimage',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
preserve_default=True,
),
]
| 22.4 | 84 | 0.602679 |
88e0ee4b5126da566502ba255355d25909374956 | 4,361 | py | Python | tb_rest_client/models/models_pe/customer_id.py | samson0v/python_tb_rest_client | 08ff7898740f7cec2170e85d5c3c89e222e967f7 | [
"Apache-2.0"
] | 30 | 2020-06-19T06:42:50.000Z | 2021-08-23T21:16:36.000Z | tb_rest_client/models/models_pe/customer_id.py | samson0v/python_tb_rest_client | 08ff7898740f7cec2170e85d5c3c89e222e967f7 | [
"Apache-2.0"
] | 25 | 2021-08-30T01:17:27.000Z | 2022-03-16T14:10:14.000Z | tb_rest_client/models/models_pe/customer_id.py | samson0v/python_tb_rest_client | 08ff7898740f7cec2170e85d5c3c89e222e967f7 | [
"Apache-2.0"
] | 23 | 2020-07-06T13:41:54.000Z | 2021-08-23T21:04:50.000Z | # coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CustomerId(object):
"""NOTE: This class is auto generated by the swagger code generator program.
from tb_rest_client.api_client import ApiClient
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'entity_type': 'str'
}
attribute_map = {
'id': 'id',
'entity_type': 'entityType'
}
def __init__(self, id=None, entity_type=None): # noqa: E501
"""CustomerId - a model defined in Swagger""" # noqa: E501
self._id = None
self._entity_type = None
self.discriminator = None
self.id = id
self.entity_type = entity_type
@property
def id(self):
"""Gets the id of this CustomerId. # noqa: E501
ID of the entity, time-based UUID v1 # noqa: E501
:return: The id of this CustomerId. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CustomerId.
ID of the entity, time-based UUID v1 # noqa: E501
:param id: The id of this CustomerId. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def entity_type(self):
"""Gets the entity_type of this CustomerId. # noqa: E501
string # noqa: E501
:return: The entity_type of this CustomerId. # noqa: E501
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""Sets the entity_type of this CustomerId.
string # noqa: E501
:param entity_type: The entity_type of this CustomerId. # noqa: E501
:type: str
"""
if entity_type is None:
raise ValueError("Invalid value for `entity_type`, must not be `None`") # noqa: E501
allowed_values = ["CUSTOMER"] # noqa: E501
if entity_type not in allowed_values:
raise ValueError(
"Invalid value for `entity_type` ({0}), must be one of {1}" # noqa: E501
.format(entity_type, allowed_values)
)
self._entity_type = entity_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CustomerId, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomerId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.268456 | 97 | 0.559734 |
4fcf8c5bef23383b384ac39574be95089bfa7d16 | 9,369 | py | Python | src/cake/test/task.py | anlongfei/cake | 8ede70a58516d2cf3117fcef050bc14046be91c9 | [
"MIT"
] | 14 | 2017-06-16T11:56:24.000Z | 2022-01-27T15:09:22.000Z | src/cake/test/task.py | anlongfei/cake | 8ede70a58516d2cf3117fcef050bc14046be91c9 | [
"MIT"
] | 19 | 2015-01-18T10:45:50.000Z | 2019-12-19T06:02:24.000Z | src/cake/test/task.py | anlongfei/cake | 8ede70a58516d2cf3117fcef050bc14046be91c9 | [
"MIT"
] | 6 | 2017-06-07T04:14:12.000Z | 2021-03-24T14:25:53.000Z | """Task Unit Tests.
"""
import unittest
import threading
import sys
import cake.task
class TaskTests(unittest.TestCase):
def testTaskFunctionExecutedExactlyOnce(self):
result = []
def f():
result.append(None)
e = threading.Event()
t = cake.task.Task(f)
t.addCallback(e.set)
self.assertFalse(t.started)
self.assertFalse(t.completed)
self.assertFalse(t.succeeded)
self.assertFalse(t.failed)
t.start()
self.assertTrue(t.started)
e.wait(0.5)
self.assertTrue(t.completed)
self.assertTrue(t.started)
self.assertTrue(t.succeeded)
self.assertFalse(t.failed)
self.assertEqual(len(result), 1)
def testFailingTask(self):
def f():
raise RuntimeError()
e = threading.Event()
t = cake.task.Task(f)
t.addCallback(e.set)
t.start()
e.wait(0.5)
self.assertTrue(t.completed)
self.assertTrue(t.started)
self.assertFalse(t.succeeded)
self.assertTrue(t.failed)
def testStartAfter(self):
result = []
def a():
result.append("a")
def b():
result.append("b")
eb = threading.Event()
ta = cake.task.Task(a)
tb = cake.task.Task(b)
tb.addCallback(eb.set)
tb.startAfter(ta)
self.assertTrue(tb.started)
self.assertFalse(ta.started)
ta.start()
self.assertTrue(ta.started)
eb.wait(0.5)
self.assertTrue(tb.completed)
self.assertTrue(ta.succeeded)
self.assertTrue(tb.started)
self.assertTrue(tb.succeeded)
self.assertEqual(result, ["a", "b"])
def testStartAfterCompletedTask(self):
result = []
def a():
result.append("a")
def b():
result.append("b")
ea = threading.Event()
eb = threading.Event()
ta = cake.task.Task(a)
tb = cake.task.Task(b)
ta.addCallback(ea.set)
tb.addCallback(eb.set)
ta.start()
self.assertTrue(ta.started)
ea.wait(0.5)
self.assertTrue(ta.completed)
tb.startAfter(ta)
self.assertTrue(tb.started)
eb.wait(0.5)
self.assertTrue(tb.completed)
self.assertTrue(ta.succeeded)
self.assertTrue(tb.started)
self.assertTrue(tb.succeeded)
self.assertEqual(result, ["a", "b"])
def testStartAfterFailedTask(self):
result = []
def a():
result.append("a")
raise RuntimeError()
def b():
result.append("b")
eb = threading.Event()
ta = cake.task.Task(a)
tb = cake.task.Task(b)
tb.addCallback(eb.set)
tb.startAfter(ta)
self.assertTrue(tb.started)
self.assertFalse(ta.started)
ta.start()
self.assertTrue(ta.started)
eb.wait(0.5)
self.assertTrue(tb.completed)
self.assertTrue(tb.failed)
self.assertTrue(tb.started)
self.assertTrue(tb.failed)
self.assertEqual(result, ["a"])
def testCompleteAfter(self):
result = []
def a():
result.append("a")
def b():
result.append("b")
t = cake.task.Task(b)
t.start()
cake.task.Task.getCurrent().completeAfter(t)
def c():
result.append("c")
ec = threading.Event()
ta = cake.task.Task(a)
tc = cake.task.Task(c)
tc.addCallback(ec.set)
tc.startAfter(ta)
ta.start()
ec.wait(0.5)
self.assertTrue(tc.completed)
self.assertEqual(result, ["a", "b", "c"])
def testStartAfterMultiple(self):
result = []
def a():
result.append("a")
def b():
result.append("b")
def c():
result.append("c")
ec = threading.Event()
ta = cake.task.Task(a)
tb = cake.task.Task(b)
tc = cake.task.Task(c)
tc.addCallback(ec.set)
tc.startAfter([ta, tb])
self.assertTrue(tc.started)
self.assertFalse(ta.started)
self.assertFalse(tb.started)
ta.start()
self.assertFalse(tc.completed)
tb.start()
ec.wait(0.5)
self.assertTrue(tc.completed)
self.assertTrue(ta.succeeded)
self.assertTrue(tb.succeeded)
self.assertTrue(tc.succeeded)
self.assertTrue(result in [["a", "b", "c"], ["b", "a", "c"]])
def testStartAfterMultipleSomeFail(self):
result = []
def a():
raise Exception()
def b():
result.append("b")
def c():
result.append("c")
eb = threading.Event()
ec = threading.Event()
ta = cake.task.Task(a)
tb = cake.task.Task(b)
tc = cake.task.Task(c)
tb.addCallback(eb.set)
tc.addCallback(ec.set)
tc.startAfter([ta, tb])
self.assertTrue(tc.started)
self.assertFalse(ta.started)
self.assertFalse(tb.started)
tb.start()
eb.wait(0.5)
self.assertTrue(tb.completed)
self.assertTrue(tb.succeeded)
self.assertFalse(tc.completed)
ta.start()
ec.wait(0.5)
self.assertTrue(tc.completed)
self.assertTrue(ta.failed)
self.assertTrue(tb.succeeded)
self.assertTrue(tc.failed)
self.assertEqual(result, ["b"])
def testMultipleSubTasks(self):
result = []
def a():
result.append("a")
t = cake.task.Task.getCurrent()
def b1():
self.assertTrue(cake.task.Task.getCurrent() is t1)
result.append("b1")
def b2():
self.assertTrue(cake.task.Task.getCurrent() is t2)
result.append("b2")
t1 = cake.task.Task(b1)
t1.start()
t2 = cake.task.Task(b2)
t2.start()
self.assertTrue(t1 is not t)
self.assertTrue(t2 is not t)
self.assertTrue(t1 is not t2)
t.completeAfter([t1, t2])
def c():
result.append("c")
ec = threading.Event()
ta = cake.task.Task(a)
tc = cake.task.Task(c)
tc.addCallback(ec.set)
tc.startAfter(ta)
ta.start()
ec.wait(0.5)
self.assertTrue(tc.completed)
self.assertTrue(tc.succeeded)
self.assertTrue(result in [
["a", "b1", "b2", "c"],
["a", "b2", "b1", "c"],
])
def testFailedSubTasksFailsParent(self):
result = []
def a():
result.append("a")
def b():
result.append("b")
raise RuntimeError()
t = cake.task.Task(b)
t.parent.completeAfter(t)
t.start()
def c():
result.append("c")
ec = threading.Event()
ta = cake.task.Task(a)
tc = cake.task.Task(c)
tc.addCallback(ec.set)
tc.startAfter(ta)
ta.start()
ec.wait(0.5)
self.assertTrue(tc.completed)
self.assertTrue(ta.failed)
self.assertTrue(tc.failed)
self.assertEqual(result, ["a", "b"])
def testCompleteAfterMultipleSomeFail(self):
result = []
def a():
result.append("a")
def b1():
raise Exception()
def b2():
result.append("b2")
def c():
result.append("c")
tb1 = cake.task.Task(b1)
tb2 = cake.task.Task(b2)
ta = cake.task.Task(a)
ta.completeAfter([tb1, tb2])
ec = threading.Event()
tc = cake.task.Task(c)
tc.addCallback(ec.set)
tc.startAfter(ta)
ta.start()
self.assertFalse(tc.completed)
self.assertFalse(ta.completed)
tb2.start()
self.assertFalse(tc.completed)
self.assertFalse(ta.completed)
tb1.start()
ec.wait(0.5)
self.assertTrue(tc.completed)
self.assertTrue(ta.failed)
self.assertTrue(tb1.failed)
self.assertTrue(tb2.succeeded)
self.assertTrue(tc.failed)
self.assertTrue(result in [["a", "b2"], ["b2", "a"]])
def testCancelBeforeStart(self):
def a():
pass
ta = cake.task.Task(a)
ta.cancel()
self.assertTrue(ta.started)
self.assertTrue(ta.completed)
self.assertFalse(ta.succeeded)
self.assertTrue(ta.failed)
def testCancelAfterCompleteThrows(self):
def a():
pass
ea = threading.Event()
ta = cake.task.Task(a)
ta.addCallback(ea.set)
ta.start()
ea.wait(0.5)
self.assertTrue(ta.completed)
self.assertRaises(cake.task.TaskError, ta.cancel)
def testCancelWhileExecutingFailsTask(self):
def a():
cake.task.Task.getCurrent().cancel()
ea = threading.Event()
ta = cake.task.Task(a)
ta.addCallback(ea.set)
ta.start()
ea.wait(0.5)
self.assertTrue(ta.completed)
self.assertTrue(ta.started)
self.assertTrue(ta.completed)
self.assertFalse(ta.succeeded)
self.assertTrue(ta.failed)
def testTaskResult(self):
def a():
return "a"
e = threading.Event()
t = cake.task.Task(a)
t.addCallback(e.set)
t.start()
e.wait(0.5)
self.assertTrue(t.completed)
self.assertEqual(t.result, "a")
def testNestedTaskResult(self):
def a():
tb = cake.task.Task(b)
tb.start()
return tb
def b():
return "b"
e = threading.Event()
ta = cake.task.Task(a)
ta.addCallback(e.set)
ta.start()
e.wait(0.5)
self.assertTrue(ta.succeeded)
self.assertEqual(ta.result, "b")
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TaskTests)
runner = unittest.TextTestRunner(verbosity=2)
sys.exit(not runner.run(suite).wasSuccessful())
| 20.10515 | 65 | 0.57829 |
366c39818dee161e42d0eea8cc6b21722923eb06 | 4,052 | py | Python | collaboration/telepathyclient.py | hafizyunus/TortugadeMexico | ed303715f77f79e697e33b3d0df082843ffc1492 | [
"MIT"
] | 1 | 2020-05-27T19:05:13.000Z | 2020-05-27T19:05:13.000Z | collaboration/telepathyclient.py | hafizyunus/TortugadeMexico | ed303715f77f79e697e33b3d0df082843ffc1492 | [
"MIT"
] | 4 | 2018-02-14T01:29:55.000Z | 2020-01-23T02:18:29.000Z | collaboration/telepathyclient.py | hafizyunus/TortugadeMexico | ed303715f77f79e697e33b3d0df082843ffc1492 | [
"MIT"
] | 4 | 2018-02-04T15:18:31.000Z | 2020-05-27T16:21:56.000Z | # Copyright (C) 2010 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import dbus
from dbus import PROPERTIES_IFACE
from telepathy.interfaces import CLIENT, \
CLIENT_APPROVER, \
CLIENT_HANDLER, \
CLIENT_INTERFACE_REQUESTS
from telepathy.server import DBusProperties
import dispatch
SUGAR_CLIENT_SERVICE = 'org.freedesktop.Telepathy.Client.Sugar'
SUGAR_CLIENT_PATH = '/org/freedesktop/Telepathy/Client/Sugar'
_instance = None
class TelepathyClient(dbus.service.Object, DBusProperties):
def __init__(self):
self._interfaces = set([CLIENT, CLIENT_HANDLER,
CLIENT_INTERFACE_REQUESTS, PROPERTIES_IFACE,
CLIENT_APPROVER])
bus = dbus.Bus()
bus_name = dbus.service.BusName(SUGAR_CLIENT_SERVICE, bus=bus)
dbus.service.Object.__init__(self, bus_name, SUGAR_CLIENT_PATH)
DBusProperties.__init__(self)
self._implement_property_get(CLIENT, {
'Interfaces': lambda: list(self._interfaces), })
self._implement_property_get(CLIENT_HANDLER, {
'HandlerChannelFilter': self.__get_filters_cb, })
self._implement_property_get(CLIENT_APPROVER, {
'ApproverChannelFilter': self.__get_filters_cb, })
self.got_channel = dispatch.Signal()
self.got_dispatch_operation = dispatch.Signal()
def __get_filters_cb(self):
logging.debug('__get_filters_cb')
filter_dict = dbus.Dictionary({}, signature='sv')
return dbus.Array([filter_dict], signature='a{sv}')
@dbus.service.method(dbus_interface=CLIENT_HANDLER,
in_signature='ooa(oa{sv})aota{sv}', out_signature='')
def HandleChannels(self, account, connection, channels, requests_satisfied,
user_action_time, handler_info):
logging.debug('HandleChannels\n%r\n%r\n%r\n%r\n%r\n%r\n', account,
connection, channels, requests_satisfied,
user_action_time, handler_info)
for channel in channels:
self.got_channel.send(self, account=account,
connection=connection, channel=channel)
@dbus.service.method(dbus_interface=CLIENT_INTERFACE_REQUESTS,
in_signature='oa{sv}', out_signature='')
def AddRequest(self, request, properties):
logging.debug('AddRequest\n%r\n%r', request, properties)
@dbus.service.method(dbus_interface=CLIENT_APPROVER,
in_signature='a(oa{sv})oa{sv}', out_signature='',
async_callbacks=('success_cb', 'error_cb_'))
def AddDispatchOperation(self, channels, dispatch_operation_path,
properties, success_cb, error_cb_):
success_cb()
try:
logging.debug('AddDispatchOperation\n%r\n%r\n%r', channels,
dispatch_operation_path, properties)
self.got_dispatch_operation.send(
self,
channels=channels,
dispatch_operation_path=dispatch_operation_path,
properties=properties)
except Exception, e:
logging.exception(e)
def get_instance():
global _instance
if not _instance:
_instance = TelepathyClient()
return _instance
| 38.590476 | 79 | 0.663623 |
6db4cb3e60af234c64ad1e3d4646448e58378dfd | 6,852 | py | Python | reamber/sm/SMMapMeta.py | Eve-ning/reamber_base_py | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
] | 10 | 2020-06-28T11:16:36.000Z | 2021-08-09T21:41:43.000Z | reamber/sm/SMMapMeta.py | Eve-ning/reamberPy | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
] | 35 | 2020-06-18T13:05:50.000Z | 2022-02-18T10:13:35.000Z | reamber/sm/SMMapMeta.py | Eve-ning/reamber_base_py | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
] | 2 | 2021-05-26T17:05:06.000Z | 2021-06-12T18:42:13.000Z | from dataclasses import dataclass
from dataclasses import field
from typing import List
class SMMapDifficulty:
BEGINNER: str = "Beginner"
EASY: str = "Easy"
MEDIUM: str = "Medium"
HARD: str = "Hard"
CHALLENGE: str = "Challenge"
EDIT: str = "Edit"
class SMMapChartTypes:
# Full Description in CHART_TYPES
DANCE_SINGLE: str = "dance-single" # Your normal 4 panel dance mode.
DANCE_DOUBLE: str = "dance-double" # Both P1 & P2 pads are used for one player.
DANCE_SOLO: str = "dance-solo" # 4-panel, except with additional top-left and top-right columns.
DANCE_COUPLE: str = "dance-couple" # One chart, but P1 & P2 have different steps.
DANCE_THREEPANEL: str = "dance-threepanel" # Like Single, but the down arrow isn't used.
DANCE_ROUTINE: str = "dance-routine" # It's like Couple in that it's for two players
PUMP_SINGLE: str = "pump-single" # Single, 5-panel pad.
PUMP_HALFDOUBLE: str = "pump-halfdouble" # Uses only six panels in the middle of the pad
PUMP_DOUBLE: str = "pump-double" # Same as Dance.
PUMP_COUPLE: str = "pump-couple" # Same as Dance.
PUMP_ROUTINE: str = "pump-routine" # Same as Dance.
KB7_SINGLE: str = "kb7-single" # Standard kb7 layout
KICKBOX_HUMAN: str = "kickbox-human" # 4key
KICKBOX_QUADARM: str = "kickbox-quadarm" # 4key
KICKBOX_INSECT: str = "kickbox-insect" # 6key
KICKBOX_ARACHNID: str = "kickbox-arachnid" # 8key
PARA_SINGLE: str = "para-single" # 5key.
BM_SINGLE5: str = "bm-single5" # 5+1key game mode
BM_VERSUS5: str = "bm-versus5" # Unknown, might be the beat equivalent to Couple?
BM_DOUBLE5: str = "bm-double5" # Both sides are used.
BM_SINGLE7: str = "bm-single7" # 7+1key game mode
BM_DOUBLE7: str = "bm-double7" # Both sides are used.
BM_VERSUS7: str = "bm-versus7" # Unknown (see versus5)
EZ2_SINGLE: str = "ez2-single" # 1 pad
EZ2_DOUBLE: str = "ez2-double" # 2 pad
EZ2_REAL: str = "ez2-real" # Divides the hand sensors into upper and lower halves.
PNM_FIVE: str = "pnm-five" # 5key game mode.
PNM_NINE: str = "pnm-nine" # 9key game mode.
TECHNO_SINGLE4: str = "techno-single4" # Identical to dance_single
TECHNO_SINGLE5: str = "techno-single5" # Identical to pump_single
TECHNO_SINGLE8: str = "techno-single8" # eight panels are used: ⬅⬇⬆➡↙↘↗↖
TECHNO_DOUBLE4: str = "techno-double4" # Identical to dance_double
TECHNO_DOUBLE5: str = "techno-double5" # Identical to pump_double
TECHNO_DOUBLE8: str = "techno-double8" # 16 panels (doubles)
DS3DDX_SINGLE: str = "ds3ddx-single" # 4key + 4hand...
MANIAX_SINGLE: str = "maniax-single" # 4key
MANIAX_DOUBLE: str = "maniax-double" # 8key
@staticmethod
def get_keys(chart: str) -> int or None:
if chart == SMMapChartTypes.DANCE_SINGLE: return 4
elif chart == SMMapChartTypes.DANCE_DOUBLE: return 8
elif chart == SMMapChartTypes.DANCE_SOLO: return 6
elif chart == SMMapChartTypes.DANCE_COUPLE: return 4
elif chart == SMMapChartTypes.DANCE_THREEPANEL: return 3
elif chart == SMMapChartTypes.DANCE_ROUTINE: return 8
elif chart == SMMapChartTypes.PUMP_SINGLE: return None
elif chart == SMMapChartTypes.PUMP_HALFDOUBLE: return None
elif chart == SMMapChartTypes.PUMP_DOUBLE: return None
elif chart == SMMapChartTypes.PUMP_COUPLE: return None
elif chart == SMMapChartTypes.PUMP_ROUTINE: return None
elif chart == SMMapChartTypes.KB7_SINGLE: return 7
elif chart == SMMapChartTypes.KICKBOX_HUMAN: return None
elif chart == SMMapChartTypes.KICKBOX_QUADARM: return None
elif chart == SMMapChartTypes.KICKBOX_INSECT: return None
elif chart == SMMapChartTypes.KICKBOX_ARACHNID: return None
elif chart == SMMapChartTypes.PARA_SINGLE: return None
elif chart == SMMapChartTypes.BM_SINGLE5: return None
elif chart == SMMapChartTypes.BM_VERSUS5: return None
elif chart == SMMapChartTypes.BM_DOUBLE5: return None
elif chart == SMMapChartTypes.BM_SINGLE7: return None
elif chart == SMMapChartTypes.BM_DOUBLE7: return None
elif chart == SMMapChartTypes.BM_VERSUS7: return None
elif chart == SMMapChartTypes.EZ2_SINGLE: return None
elif chart == SMMapChartTypes.EZ2_DOUBLE: return None
elif chart == SMMapChartTypes.EZ2_REAL: return None
elif chart == SMMapChartTypes.PNM_FIVE: return None
elif chart == SMMapChartTypes.PNM_NINE: return None
elif chart == SMMapChartTypes.TECHNO_SINGLE4: return None
elif chart == SMMapChartTypes.TECHNO_SINGLE5: return None
elif chart == SMMapChartTypes.TECHNO_SINGLE8: return None
elif chart == SMMapChartTypes.TECHNO_DOUBLE4: return None
elif chart == SMMapChartTypes.TECHNO_DOUBLE5: return None
elif chart == SMMapChartTypes.TECHNO_DOUBLE8: return None
elif chart == SMMapChartTypes.DS3DDX_SINGLE: return None
elif chart == SMMapChartTypes.MANIAX_SINGLE: return None
elif chart == SMMapChartTypes.MANIAX_DOUBLE: return None
@staticmethod
def get_type(keys: int) -> str or None:
""" Attempts to find the most suitable chart Type
Due to multiple types having the same keys, it may not be ideal every time. """
if keys == 4: return SMMapChartTypes.DANCE_SINGLE
elif keys == 8: return SMMapChartTypes.DANCE_DOUBLE
elif keys == 6: return SMMapChartTypes.DANCE_SOLO
elif keys == 3: return SMMapChartTypes.DANCE_THREEPANEL
elif keys == 7: return SMMapChartTypes.KB7_SINGLE
else: return ""
@dataclass
class SMMapMeta:
chart_type: str = SMMapChartTypes.DANCE_SINGLE
description: str = ""
difficulty: str = SMMapDifficulty.EASY
difficulty_val: int = 1
groove_radar: List[float] = field(default_factory=lambda: [0.0, 0.0, 0.0, 0.0, 0.0])
def _read_note_metadata(self, metadata: List[str]):
self.chart_type = metadata[0].strip()
self.description = metadata[1].strip()
self.difficulty = metadata[2].strip()
self.difficulty_val = int(metadata[3].strip())
self.groove_radar = [float(x) for x in metadata[4].strip().split(",")]
def _read_bpms(self, lines: List[str]):
pass
def _read_stops(self, lines: List[str]):
pass
| 52.707692 | 113 | 0.638646 |
614fedc5d118cc5a4bf210fe6551afac33c5370a | 8,356 | py | Python | osmnx/geocoder.py | Picajoluna/osmnx | eb4fab8789abfa0c99569627d4d9d6318497ad9d | [
"MIT"
] | null | null | null | osmnx/geocoder.py | Picajoluna/osmnx | eb4fab8789abfa0c99569627d4d9d6318497ad9d | [
"MIT"
] | null | null | null | osmnx/geocoder.py | Picajoluna/osmnx | eb4fab8789abfa0c99569627d4d9d6318497ad9d | [
"MIT"
] | null | null | null | """Geocode queries and create GeoDataFrames of place boundaries."""
import logging as lg
from collections import OrderedDict
import geopandas as gpd
from . import downloader
from . import projection
from . import settings
from . import utils
def geocode(query):
"""
Geocode a query string to (lat, lng) with the Nominatim geocoder.
Parameters
----------
query : string
the query string to geocode
Returns
-------
point : tuple
the (lat, lng) coordinates returned by the geocoder
"""
# define the parameters
params = OrderedDict()
params["format"] = "json"
params["limit"] = 1
params["dedupe"] = 0 # prevent deduping to get precise number of results
params["q"] = query
response_json = downloader.nominatim_request(params=params)
# if results were returned, parse lat and lng out of the result
if response_json and "lat" in response_json[0] and "lon" in response_json[0]:
lat = float(response_json[0]["lat"])
lng = float(response_json[0]["lon"])
point = (lat, lng)
utils.log(f'Geocoded "{query}" to {point}')
return point
else:
raise ValueError(f'Nominatim could not geocode query "{query}"')
def geocode_to_gdf(query, which_result=None, by_osmid=False, buffer_dist=None):
"""
Retrieve place(s) by name or ID from the Nominatim API as a GeoDataFrame.
You can query by place name or OSM ID. If querying by place name, the
query argument can be a string or structured dict, or a list of such
strings/dicts to send to geocoder. You can instead query by OSM ID by
setting `by_osmid=True`. In this case, geocode_to_gdf treats the query
argument as an OSM ID (or list of OSM IDs) for Nominatim lookup rather
than text search. OSM IDs must be prepended with their types: node (N),
way (W), or relation (R), in accordance with the Nominatim format. For
example, `query=["R2192363", "N240109189", "W427818536"]`.
If query argument is a list, then which_result should be either a single
value or a list with the same length as query. The queries you provide
must be resolvable to places in the Nominatim database. The resulting
GeoDataFrame's geometry column contains place boundaries if they exist in
OpenStreetMap.
Parameters
----------
query : string or dict or list
query string(s) or structured dict(s) to geocode
which_result : int
which geocoding result to use. if None, auto-select the first
(Multi)Polygon or raise an error if OSM doesn't return one. to get
the top match regardless of geometry type, set which_result=1
by_osmid : bool
if True, handle query as an OSM ID for lookup rather than text search
buffer_dist : float
distance to buffer around the place geometry, in meters
Returns
-------
gdf : geopandas.GeoDataFrame
a GeoDataFrame with one row for each query
"""
if not isinstance(query, (str, dict, list)): # pragma: no cover
raise ValueError("query must be a string or dict or list")
# if caller passed a list of queries but a scalar which_result value, then
# turn which_result into a list with same length as query list
if isinstance(query, list) and (isinstance(which_result, int) or which_result is None):
which_result = [which_result] * len(query)
# turn query and which_result into lists if they're not already
if not isinstance(query, list):
query = [query]
if not isinstance(which_result, list):
which_result = [which_result]
# ensure same length
if len(query) != len(which_result): # pragma: no cover
raise ValueError("which_result length must equal query length")
# ensure query type of each item
for q in query:
if not isinstance(q, (str, dict)): # pragma: no cover
raise ValueError("each query must be a dict or a string")
# geocode each query and add to GeoDataFrame as a new row
gdf = gpd.GeoDataFrame()
for q, wr in zip(query, which_result):
gdf = gdf.append(_geocode_query_to_gdf(q, wr, by_osmid))
# reset GeoDataFrame index and set its CRS
gdf = gdf.reset_index(drop=True)
gdf.crs = settings.default_crs
# if buffer_dist was passed in, project the geometry to UTM, buffer it in
# meters, then project it back to lat-lng
if buffer_dist is not None and len(gdf) > 0:
gdf_utm = projection.project_gdf(gdf)
gdf_utm["geometry"] = gdf_utm["geometry"].buffer(buffer_dist)
gdf = projection.project_gdf(gdf_utm, to_latlong=True)
utils.log(f"Buffered GeoDataFrame to {buffer_dist} meters")
utils.log(f"Created GeoDataFrame with {len(gdf)} rows from {len(query)} queries")
return gdf
def _geocode_query_to_gdf(query, which_result, by_osmid):
"""
Geocode a single place query to a GeoDataFrame.
Parameters
----------
query : string or dict
query string or structured dict to geocode
which_result : int
which geocoding result to use. if None, auto-select the first
(Multi)Polygon or raise an error if OSM doesn't return one. to get
the top match regardless of geometry type, set which_result=1
by_osmid : bool
if True, handle query as an OSM ID for lookup rather than text search
Returns
-------
gdf : geopandas.GeoDataFrame
a GeoDataFrame with one row containing the result of geocoding
"""
if which_result is None:
limit = 50
else:
limit = which_result
results = downloader._osm_place_download(query, by_osmid=by_osmid, limit=limit)
# choose the right result from the JSON response
if not results:
# if no results were returned, raise error
raise ValueError(f'Nominatim geocoder returned 0 results for query "{query}"')
elif by_osmid:
# if searching by OSM ID, always take the first (ie, only) result
result = results[0]
elif which_result is None:
# else, if which_result=None, auto-select the first (Multi)Polygon
result = _get_first_polygon(results, query)
elif len(results) >= which_result:
# else, if we got at least which_result results, choose that one
result = results[which_result - 1]
else: # pragma: no cover
# else, we got fewer results than which_result, raise error
msg = f'Nominatim geocoder only returned {len(results)} result(s) for query "{query}"'
raise ValueError(msg)
# if we got a non (Multi)Polygon geometry type (like a point), log warning
geom_type = result["geojson"]["type"]
if geom_type not in {"Polygon", "MultiPolygon"}:
msg = f'Nominatim geocoder returned a {geom_type} as the geometry for query "{query}"'
utils.log(msg, level=lg.WARNING)
# build the GeoJSON feature from the chosen result
south, north, west, east = result["boundingbox"]
feature = {
"type": "Feature",
"geometry": result["geojson"],
"properties": {
"bbox_north": north,
"bbox_south": south,
"bbox_east": east,
"bbox_west": west,
},
}
# add the other attributes we retrieved
for attr in result:
if attr not in {"address", "boundingbox", "geojson", "icon", "licence"}:
feature["properties"][attr] = result[attr]
# create and return the GeoDataFrame
gdf = gpd.GeoDataFrame.from_features([feature])
cols = ["lat", "lon", "bbox_north", "bbox_south", "bbox_east", "bbox_west"]
gdf[cols] = gdf[cols].astype(float)
return gdf
def _get_first_polygon(results, query):
"""
Choose first result of geometry type (Multi)Polygon from list of results.
Parameters
----------
results : list
list of results from downloader._osm_place_download
query : str
the query string or structured dict that was geocoded
Returns
-------
result : dict
the chosen result
"""
polygon_types = {"Polygon", "MultiPolygon"}
for result in results:
if result["geojson"]["type"] in polygon_types:
return result
# if we never found a polygon, throw an error
raise ValueError(f'Nominatim could not geocode query "{query}" to polygonal boundaries')
| 36.17316 | 94 | 0.663475 |
f6f551e1a0be75359d2927ba09ad33ac1e7e21f3 | 7,323 | py | Python | bb_structural_pipeline/bb_pipeline_struct.py | yilewang/tvb-ukbb | 9b89811e3c1acde1ecc73df9ff668bbdc6533d1b | [
"Apache-2.0"
] | 4 | 2022-01-25T15:53:07.000Z | 2022-02-04T01:24:45.000Z | bb_structural_pipeline/bb_pipeline_struct.py | yilewang/tvb-ukbb | 9b89811e3c1acde1ecc73df9ff668bbdc6533d1b | [
"Apache-2.0"
] | 11 | 2021-07-09T17:22:36.000Z | 2021-12-07T22:43:20.000Z | bb_structural_pipeline/bb_pipeline_struct.py | yilewang/tvb-ukbb | 9b89811e3c1acde1ecc73df9ff668bbdc6533d1b | [
"Apache-2.0"
] | 2 | 2022-02-14T01:38:51.000Z | 2022-02-18T20:38:32.000Z | #!/bin/env python
#
# Script name: bb_pipeline_struct.py
#
# Description: Script with the structural pipeline.
# This script will call the rest of structural functions.
#
# Authors: Fidel Alfaro-Almagro, Stephen M. Smith & Mark Jenkinson
#
# Copyright 2017 University of Oxford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import time
import sys
import json
sys.path.insert(1, os.path.dirname(__file__) + "/..")
import bb_pipeline_tools.bb_logging_tool as LT
def bb_pipeline_struct(subject, runTopup, fileConfiguration):
logger = LT.initLogging(__file__, subject)
logDir = logger.logDir
baseDir = logDir[0 : logDir.rfind("/logs/")]
jobSTRUCTINIT = "-1"
jobSWI = "-1"
subname = subject.replace("/", "_")
print("Beginning structural pipeline")
if (not "T1" in fileConfiguration) or (fileConfiguration["T1"] == ""):
logger.error("There is no T1. Subject " + subject + " cannot be processed.")
return -1
else:
# TODO: Adapt code to good syntax practices --> PEP 8
# Create the B0 AP - PA file to estimate the fieldmaps
b0_threshold = int(
np.loadtxt(os.environ["BB_BIN_DIR"] + "/bb_data/b0_threshold.txt")
)
jobsB0 = []
if runTopup:
# if encDir in ["dwi"]:
# pass
print("Running topup setup...")
for encDir in ["AP", "PA"]:
bvals = np.loadtxt(subject + "/dMRI/raw/" + encDir + ".bval")
numVols = int(sum(bvals <= b0_threshold))
# numVols= LT.runCommand(logger, "for f in `cat " + subject +"/dMRI/raw/" + encDir + ".bval` ; do echo $f; done | awk '{if($1==$1+0 && $1 < " + b0_threshold + " ) print $1}' |wc | awk '{print $1}'")
jobGETB01 = LT.runCommand(
logger,
"$BB_BIN_DIR/bb_structural_pipeline/bb_get_b0s.py -i "
+ subject
+ "/dMRI/raw/"
+ encDir
+ ".nii.gz -o "
+ subject
+ "/fieldmap/total_B0_"
+ encDir
+ ".nii.gz -n "
+ str(numVols)
+ " -l "
+ str(b0_threshold),
"bb_get_b0s_1_"
+ subname
)
jobsB0.append(
LT.runCommand(
logger,
"$BB_BIN_DIR/bb_structural_pipeline/bb_choose_bestB0 "
+ subject
+ "/fieldmap/total_B0_"
+ encDir
+ ".nii.gz "
+ subject
+ "/fieldmap/B0_"
+ encDir
+ ".nii.gz ",
"bb_choose_bestB0_1_"
+ subname
)
)
jobMERGE = LT.runCommand(
logger,
"${FSLDIR}/bin/fslmerge -t "
+ subject
+ "/fieldmap/B0_AP_PA "
+ subject
+ "/fieldmap/B0_AP "
+ subject
+ "/fieldmap/B0_PA",
"bb_fslmerge_"
+ subname
)
print("Topup setup completed.")
# Registrations - T1 to MNI - T2 to T1 - T2 to MNI (Combining the 2 previous ones)
print("Running bb_struct_init...")
jobSTRUCTINIT = LT.runCommand(
logger,
"${BB_BIN_DIR}/bb_structural_pipeline/bb_struct_init "
+ subject,
"bb_structinit_"
+ subname
)
print("bb_struct_init completed.")
# TODO: Do a better check here. This one looks arbitrary
if "SWI_TOTAL_MAG_TE2" in fileConfiguration:
print("Running SWI registration...")
jobSWI = LT.runCommand(
logger,
"$BB_BIN_DIR/bb_structural_pipeline/bb_swi_reg "
+ subject,
"bb_swi_reg_"
+ subname
)
print("SWI registration complete.")
# Topup
if runTopup:
print("Topup enabled. Running topup...")
jobPREPAREFIELDMAP = LT.runCommand(
logger,
"$BB_BIN_DIR/bb_structural_pipeline/bb_prepare_struct_fieldmap "
+ subject,
"bb_prepare_struct_fieldmap_"
+ subname
)
jobTOPUP = LT.runCommand(
logger,
"${FSLDIR}/bin/topup --imain="
+ subject
+ "/fieldmap/B0_AP_PA --datain="
+ subject
+ "/fieldmap/acqparams.txt --config=b02b0.cnf --out="
+ subject
+ "/fieldmap/fieldmap_out --fout="
+ subject
+ "/fieldmap/fieldmap_fout --jacout="
+ subject
+ "/fieldmap/fieldmap_jacout -v",
"bb_topup_"
+ subname
)
print("Topup complete.")
else:
logger.error(
"There is not enough/correct DWI data. TOPUP cannot be run. Continuing to run DWI and fMRI processing without TOPUP."
)
# HCP Structural pipeline
# jobHCPSTRUCT = LT.runCommand(logger, 'bb_HCP_structural ' + subject + ' ' + jobSTRUCTINIT + ' ' + str(boolT2))
if not runTopup:
print("Structural pipeline complete. Logfiles located in subject's logs directory.")
return ",".join([jobSTRUCTINIT, jobSWI])
else:
print("Running post-topup...")
jobPOSTTOPUP = LT.runCommand(
logger,
"$BB_BIN_DIR/bb_structural_pipeline/bb_post_topup "
+ subject,
"bb_post_topup_"
+ subname
)
print("Post-topup complete.")
print("Structural pipeline complete. Logfiles located in subject's logs directory.")
return jobPOSTTOPUP
if __name__ == "__main__":
# grab subject name from command
subject = sys.argv[1]
fd_fileName = "logs/file_descriptor.json"
# check if subject directory exists
if not os.path.isdir(subject):
print(f"{subject} is not a valid directory. Exiting")
sys.exit(1)
# attempt to open the JSON file
try:
json_path = os.path.abspath(f"./{subject}/{fd_fileName}")
with open(json_path, "r") as f:
fileConfig = json.load(f)
except:
print(f"{json_path} could not be loaded. Exiting")
sys.exit(1)
# call pipeline
bb_pipeline_struct(subject, False, fileConfig)
| 35.038278 | 214 | 0.517957 |
53e06baf5ebf1aa972351528c846a2d866752fe1 | 779 | py | Python | others/array_move_zeros_to_end.py | johnklee/algprac | 51a7c872806aec261c5e3db6cbac47ec34516b4d | [
"Apache-2.0"
] | null | null | null | others/array_move_zeros_to_end.py | johnklee/algprac | 51a7c872806aec261c5e3db6cbac47ec34516b4d | [
"Apache-2.0"
] | null | null | null | others/array_move_zeros_to_end.py | johnklee/algprac | 51a7c872806aec261c5e3db6cbac47ec34516b4d | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python3
r'''
Array Manipulation in Place: Coding Interview Question in Whiteboard Thursday
'''
def move_zero_to_end(alist):
nz_list = []
for i in range(len(alist)):
if alist[i] != 0:
nz_list.append(i)
if len(nz_list) == len(alist) or len(nz_list) == 0:
return alist
else:
for i in range(len(nz_list)):
alist[i] = alist[nz_list[i]]
for i in range(len(nz_list), len(alist)):
alist[i] = 0
return alist
print(move_zero_to_end([0, 1, 2, 0, 3, 0, 4]))
import unittest
class FAT(unittest.TestCase):
def test_d1(self):
alist = [0, 1, 2, 0, 3, 0, 4]
blist = move_zero_to_end(alist[:])
self.assertEqual('1,2,3,4,0,0,0', ','.join(map(str, blist)))
| 22.911765 | 77 | 0.57638 |
ee7169b4c6b649f6baf7cec7039c6d6a2d8c5f67 | 3,936 | py | Python | project_euler/problem_144/sol1.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | project_euler/problem_144/sol1.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | project_euler/problem_144/sol1.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | """
In laser physics, a "white cell" is a mirror system that acts as a delay line for the
laser beam. The beam enters the cell, bounces around on the mirrors, and eventually
works its way back out.
The specific white cell we will be considering is an ellipse with the equation
4x^2 + y^2 = 100
The section corresponding to −0.01 ≤ x ≤ +0.01 at the top is missing, allowing the
light to enter and exit through the hole.

The light beam in this problem starts at the point (0.0,10.1) just outside the white
cell, and the beam first impacts the mirror at (1.4,-9.6).
Each time the laser beam hits the surface of the ellipse, it follows the usual law of
reflection "angle of incidence equals angle of reflection." That is, both the incident
and reflected beams make the same angle with the normal line at the point of incidence.
In the figure on the left, the red line shows the first two points of contact between
the laser beam and the wall of the white cell; the blue line shows the line tangent to
the ellipse at the point of incidence of the first bounce.
The slope m of the tangent line at any point (x,y) of the given ellipse is: m = −4x/y
The normal line is perpendicular to this tangent line at the point of incidence.
The animation on the right shows the first 10 reflections of the beam.
How many times does the beam hit the internal surface of the white cell before exiting?
"""
from math import isclose, sqrt
def next_point(
point_x: float, point_y: float, incoming_gradient: float
) -> tuple[float, float, float]:
"""
Given that a laser beam hits the interior of the white cell at point
(point_x, point_y) with gradient incoming_gradient, return a tuple (x,y,m1)
where the next point of contact with the interior is (x,y) with gradient m1.
>>> next_point(5.0, 0.0, 0.0)
(-5.0, 0.0, 0.0)
>>> next_point(5.0, 0.0, -2.0)
(0.0, -10.0, 2.0)
"""
# normal_gradient = gradient of line through which the beam is reflected
# outgoing_gradient = gradient of reflected line
normal_gradient = point_y / 4 / point_x
s2 = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
c2 = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
outgoing_gradient = (s2 - c2 * incoming_gradient) / (c2 + s2 * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
quadratic_term = outgoing_gradient ** 2 + 4
linear_term = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
constant_term = (point_y - outgoing_gradient * point_x) ** 2 - 100
x_minus = (
-linear_term - sqrt(linear_term ** 2 - 4 * quadratic_term * constant_term)
) / (2 * quadratic_term)
x_plus = (
-linear_term + sqrt(linear_term ** 2 - 4 * quadratic_term * constant_term)
) / (2 * quadratic_term)
# two solutions, one of which is our input point
next_x = x_minus if isclose(x_plus, point_x) else x_plus
next_y = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def solution(first_x_coord: float = 1.4, first_y_coord: float = -9.6) -> int:
"""
Return the number of times that the beam hits the interior wall of the
cell before exiting.
>>> solution(0.00001,-10)
1
>>> solution(5, 0)
287
"""
num_reflections: int = 0
point_x: float = first_x_coord
point_y: float = first_y_coord
gradient: float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
point_x, point_y, gradient = next_point(point_x, point_y, gradient)
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"{solution() = }")
| 38.588235 | 88 | 0.667429 |
cd22327ea0b8ab35191e21ba14f53e319f284adc | 383 | py | Python | medcab/plotly-dash/dashboard.py | BobBriksz/front-end | 2519659e732552c684b8eb2e39ec724788412faf | [
"MIT"
] | null | null | null | medcab/plotly-dash/dashboard.py | BobBriksz/front-end | 2519659e732552c684b8eb2e39ec724788412faf | [
"MIT"
] | null | null | null | medcab/plotly-dash/dashboard.py | BobBriksz/front-end | 2519659e732552c684b8eb2e39ec724788412faf | [
"MIT"
] | 1 | 2021-01-04T23:03:34.000Z | 2021-01-04T23:03:34.000Z | from dash import Dash
def init_dashboard(server):
"""Create a Plotly Dash dashboard."""
dash_app = dash.Dash(
server=server,
routes_pathname_prefix='/dashapp/',
external_stylesheets=[
'/static/dist/css/styles.css',
]
)
# Create Dash Layout
dash_app.layout = html.Div(id='dash-container')
return dash_app.server
| 21.277778 | 51 | 0.62141 |
6703edc9f4930b1f9a10ee0d863d14a2c8109ecb | 202 | py | Python | tests/watson/filters/test_abc.py | watsonpy/watson-filters | 22e3473d4caa3a5b208eeecd6d810f58f2d3a834 | [
"BSD-3-Clause"
] | null | null | null | tests/watson/filters/test_abc.py | watsonpy/watson-filters | 22e3473d4caa3a5b208eeecd6d810f58f2d3a834 | [
"BSD-3-Clause"
] | 1 | 2021-12-03T23:47:40.000Z | 2021-12-03T23:47:40.000Z | tests/watson/filters/test_abc.py | watsonpy/watson-filters | 22e3473d4caa3a5b208eeecd6d810f58f2d3a834 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from pytest import raises
from watson.filters import abc
class TestFilterBase(object):
def test_call_error(self):
with raises(TypeError):
abc.Filter()
| 18.363636 | 31 | 0.658416 |
227e3090643aebe3bd3e188e599fb111a3643dcb | 3,085 | py | Python | detection_training/coco/yolov4_resize416_multi/train_config.py | LANCEREN/simpleAICV-pytorch-ImageNet-COCO-training | 86c1b38df3cdcb195ec5b6229c343f07a52aeb7b | [
"MIT"
] | 154 | 2020-05-31T08:31:12.000Z | 2021-05-14T02:46:32.000Z | detection_training/coco/yolov4_resize416_multi/train_config.py | LANCEREN/simpleAICV-pytorch-ImageNet-COCO-training | 86c1b38df3cdcb195ec5b6229c343f07a52aeb7b | [
"MIT"
] | 11 | 2021-05-20T09:02:50.000Z | 2022-01-20T09:55:47.000Z | detection_training/coco/yolov4_resize416_multi/train_config.py | LANCEREN/simpleAICV-pytorch-ImageNet-COCO-training | 86c1b38df3cdcb195ec5b6229c343f07a52aeb7b | [
"MIT"
] | 37 | 2020-07-20T06:14:25.000Z | 2021-05-18T01:27:19.000Z | import os
import sys
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
sys.path.append(BASE_DIR)
from tools.path import COCO2017_path
from simpleAICV.detection.models import yolov4
from simpleAICV.detection.losses import Yolov4Loss
from simpleAICV.detection.decode import Yolov4Decoder
from simpleAICV.datasets.cocodataset import CocoDetection
from simpleAICV.detection.common import RandomHorizontalFlip, RandomCrop, RandomTranslate, Normalize, YoloStyleResize, RetinaStyleResize
import torchvision.transforms as transforms
class config:
dataset_name = 'COCO'
network = 'yolov4'
pretrained = False
num_classes = 80
input_image_size = 416
model = yolov4.__dict__[network](**{
'pretrained':
pretrained,
'anchor_sizes': [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
[59, 119], [116, 90], [156, 198], [373, 326]],
'strides': [8, 16, 32],
'num_classes':
num_classes,
})
criterion = Yolov4Loss(
anchor_sizes=[[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
[59, 119], [116, 90], [156, 198], [373, 326]],
strides=[8, 16, 32],
)
decoder = Yolov4Decoder()
train_dataset = CocoDetection(COCO2017_path,
set_name='train2017',
transform=transforms.Compose([
RandomHorizontalFlip(flip_prob=0.5),
Normalize(),
YoloStyleResize(
resize=input_image_size,
multi_scale=True,
multi_scale_range=[0.8, 1.0]),
]))
val_dataset = CocoDetection(COCO2017_path,
set_name='val2017',
transform=transforms.Compose([
Normalize(),
YoloStyleResize(
resize=input_image_size,
multi_scale=False,
multi_scale_range=[0.8, 1.0]),
]))
seed = 0
# batch_size is total size in DataParallel mode
# batch_size is per gpu node size in DistributedDataParallel mode
batch_size = 32
num_workers = 16
# choose 'SGD' or 'AdamW'
optimizer = 'AdamW'
# 'AdamW' doesn't need gamma and momentum variable
gamma = 0.1
momentum = 0.9
# choose 'MultiStepLR' or 'CosineLR'
# milestones only use in 'MultiStepLR'
scheduler = 'MultiStepLR'
lr = 1e-4
weight_decay = 1e-3
milestones = [60, 90]
warm_up_epochs = 0
epochs = 100
eval_epoch = [1, 2, 3, 4, 5]
print_interval = 10
# only in DistributedDataParallel mode can use sync_bn
distributed = True
sync_bn = False
apex = True
| 33.901099 | 136 | 0.530308 |
022f3c82002345b7f4b3e36792908a65c7213bc8 | 1,104 | py | Python | guillotina/component/testing.py | rboixaderg/guillotina | fcae65c2185222272f3b8fee4bc2754e81e0e983 | [
"BSD-2-Clause"
] | 173 | 2017-03-10T18:26:12.000Z | 2022-03-03T06:48:56.000Z | guillotina/component/testing.py | rboixaderg/guillotina | fcae65c2185222272f3b8fee4bc2754e81e0e983 | [
"BSD-2-Clause"
] | 921 | 2017-03-08T14:04:43.000Z | 2022-03-30T10:28:56.000Z | guillotina/component/testing.py | rboixaderg/guillotina | fcae65c2185222272f3b8fee4bc2754e81e0e983 | [
"BSD-2-Clause"
] | 60 | 2017-03-16T19:59:44.000Z | 2022-03-03T06:48:59.000Z | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# flake8: noqa
# HACK to make sure basicmost event subscriber is installed
import guillotina.component.event
class PlacelessSetup(object):
def cleanUp(self):
from guillotina.component.globalregistry import base
base.__init__("base")
def setUp(self):
self.cleanUp()
def tearDown(self):
self.cleanUp()
def setUp(test=None):
PlacelessSetup().setUp()
def tearDown(test=None):
PlacelessSetup().tearDown()
| 28.307692 | 78 | 0.622283 |
9a663a604d3f60dd9feca0bf7d0559d994287fa9 | 7,068 | py | Python | numpyro/util.py | Anthonymcqueen21/numpyro | 94efe3a35491465eba66465b4dd1d4fb870d6c8c | [
"MIT"
] | 1 | 2019-06-24T04:27:18.000Z | 2019-06-24T04:27:18.000Z | numpyro/util.py | Anthonymcqueen21/numpyro | 94efe3a35491465eba66465b4dd1d4fb870d6c8c | [
"MIT"
] | null | null | null | numpyro/util.py | Anthonymcqueen21/numpyro | 94efe3a35491465eba66465b4dd1d4fb870d6c8c | [
"MIT"
] | null | null | null | from collections import namedtuple
from contextlib import contextmanager
import random
import numpy as onp
import tqdm
from jax import jit, lax, ops, vmap
from jax.flatten_util import ravel_pytree
import jax.numpy as np
from jax.tree_util import register_pytree_node
_DATA_TYPES = {}
_DISABLE_CONTROL_FLOW_PRIM = False
def set_rng_seed(rng_seed):
random.seed(rng_seed)
onp.random.seed(rng_seed)
# let JAX recognize _TreeInfo structure
# ref: https://github.com/google/jax/issues/446
# TODO: remove this when namedtuple is supported in JAX
def register_pytree(cls):
if not getattr(cls, '_registered', False):
register_pytree_node(
cls,
lambda xs: (tuple(xs), None),
lambda _, xs: cls(*xs)
)
cls._registered = True
def laxtuple(name, fields):
key = (name,) + tuple(fields)
if key in _DATA_TYPES:
return _DATA_TYPES[key]
cls = namedtuple(name, fields)
register_pytree(cls)
cls.update = cls._replace
_DATA_TYPES[key] = cls
return cls
@contextmanager
def optional(condition, context_manager):
"""
Optionally wrap inside `context_manager` if condition is `True`.
"""
if condition:
with context_manager:
yield
else:
yield
@contextmanager
def control_flow_prims_disabled():
global _DISABLE_CONTROL_FLOW_PRIM
stored_flag = _DISABLE_CONTROL_FLOW_PRIM
try:
_DISABLE_CONTROL_FLOW_PRIM = True
yield
finally:
_DISABLE_CONTROL_FLOW_PRIM = stored_flag
def cond(pred, true_operand, true_fun, false_operand, false_fun):
if _DISABLE_CONTROL_FLOW_PRIM:
if pred:
return true_fun(true_operand)
else:
return false_fun(false_operand)
else:
return lax.cond(pred, true_operand, true_fun, false_operand, false_fun)
def while_loop(cond_fun, body_fun, init_val):
if _DISABLE_CONTROL_FLOW_PRIM:
val = init_val
while cond_fun(val):
val = body_fun(val)
return val
else:
return lax.while_loop(cond_fun, body_fun, init_val)
def fori_loop(lower, upper, body_fun, init_val):
if _DISABLE_CONTROL_FLOW_PRIM:
val = init_val
for i in range(int(lower), int(upper)):
val = body_fun(i, val)
return val
else:
return lax.fori_loop(lower, upper, body_fun, init_val)
def identity(x):
return x
def fori_collect(lower, upper, body_fun, init_val, transform=identity, progbar=True, **progbar_opts):
"""
This looping construct works like :func:`~jax.lax.fori_loop` but with the additional
effect of collecting values from the loop body. In addition, this allows for
post-processing of these samples via `transform`, and progress bar updates.
Note that, `progbar=False` will be faster, especially when collecting a
lot of samples. Refer to example usage in :func:`~numpyro.mcmc.hmc`.
:param int lower: the index to start the collective work. In other words,
we will skip collecting the first `lower` values.
:param int upper: number of times to run the loop body.
:param body_fun: a callable that takes a collection of
`np.ndarray` and returns a collection with the same shape and
`dtype`.
:param init_val: initial value to pass as argument to `body_fun`. Can
be any Python collection type containing `np.ndarray` objects.
:param transform: a callable to post-process the values returned by `body_fn`.
:param progbar: whether to post progress bar updates.
:param `**progbar_opts`: optional additional progress bar arguments. A
`diagnostics_fn` can be supplied which when passed the current value
from `body_fun` returns a string that is used to update the progress
bar postfix. Also a `progbar_desc` keyword argument can be supplied
which is used to label the progress bar.
:return: collection with the same type as `init_val` with values
collected along the leading axis of `np.ndarray` objects.
"""
assert lower < upper
init_val_flat, unravel_fn = ravel_pytree(transform(init_val))
ravel_fn = lambda x: ravel_pytree(transform(x))[0] # noqa: E731
if not progbar:
collection = np.zeros((upper - lower,) + init_val_flat.shape)
def _body_fn(i, vals):
val, collection = vals
val = body_fun(val)
i = np.where(i >= lower, i - lower, 0)
collection = ops.index_update(collection, i, ravel_fn(val))
return val, collection
_, collection = jit(fori_loop, static_argnums=(2,))(0, upper, _body_fn,
(init_val, collection))
else:
diagnostics_fn = progbar_opts.pop('diagnostics_fn', None)
progbar_desc = progbar_opts.pop('progbar_desc', '')
collection = []
val = init_val
with tqdm.trange(upper, desc=progbar_desc) as t:
for i in t:
val = body_fun(val)
if i >= lower:
collection.append(jit(ravel_fn)(val))
if diagnostics_fn:
t.set_postfix_str(diagnostics_fn(val), refresh=False)
# XXX: jax.numpy.stack/concatenate is currently slow
collection = onp.stack(collection)
return vmap(unravel_fn)(collection)
def copy_docs_from(source_class, full_text=False):
"""
Decorator to copy class and method docs from source to destin class.
"""
def decorator(destin_class):
# This works only in python 3.3+:
# if not destin_class.__doc__:
# destin_class.__doc__ = source_class.__doc__
for name in dir(destin_class):
if name.startswith('_'):
continue
destin_attr = getattr(destin_class, name)
destin_attr = getattr(destin_attr, '__func__', destin_attr)
source_attr = getattr(source_class, name, None)
source_doc = getattr(source_attr, '__doc__', None)
if source_doc and not getattr(destin_attr, '__doc__', None):
if full_text or source_doc.startswith('See '):
destin_doc = source_doc
else:
destin_doc = 'See :meth:`{}.{}.{}`'.format(
source_class.__module__, source_class.__name__, name)
if isinstance(destin_attr, property):
# Set docs for object properties.
# Since __doc__ is read-only, we need to reset the property
# with the updated doc.
updated_property = property(destin_attr.fget,
destin_attr.fset,
destin_attr.fdel,
destin_doc)
setattr(destin_class, name, updated_property)
else:
destin_attr.__doc__ = destin_doc
return destin_class
return decorator
| 35.164179 | 101 | 0.630164 |
6dafad1c348fe7504b6adc6f9bef8c99584e152c | 4,987 | py | Python | openspeech/modules/relative_multi_head_attention.py | CanYouImagine/openspeech | 095d78828a9caed0151727897f35534231947846 | [
"Apache-2.0",
"MIT"
] | 207 | 2021-07-22T02:04:47.000Z | 2022-03-31T07:24:12.000Z | openspeech/modules/relative_multi_head_attention.py | tqslj2/openspeech | 10307587f08615224df5a868fb5249c68c70b12d | [
"Apache-2.0",
"MIT"
] | 81 | 2021-07-21T16:52:22.000Z | 2022-03-31T14:56:54.000Z | openspeech/modules/relative_multi_head_attention.py | tqslj2/openspeech | 10307587f08615224df5a868fb5249c68c70b12d | [
"Apache-2.0",
"MIT"
] | 43 | 2021-07-21T16:33:27.000Z | 2022-03-23T09:43:49.000Z | # MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import Optional
from openspeech.modules.wrapper import Linear
class RelativeMultiHeadAttention(nn.Module):
r"""
Multi-head attention with relative positional encoding.
This concept was proposed in the "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"
Args:
dim (int): The dimension of model
num_heads (int): The number of attention heads.
dropout_p (float): probability of dropout
Inputs: query, key, value, pos_embedding, mask
- **query** (batch, time, dim): Tensor containing query vector
- **key** (batch, time, dim): Tensor containing key vector
- **value** (batch, time, dim): Tensor containing value vector
- **pos_embedding** (batch, time, dim): Positional embedding tensor
- **mask** (batch, 1, time2) or (batch, time1, time2): Tensor containing indices to be masked
Returns:
- **outputs**: Tensor produces by relative multi head attention module.
"""
def __init__(
self,
dim: int = 512,
num_heads: int = 16,
dropout_p: float = 0.1,
) -> None:
super(RelativeMultiHeadAttention, self).__init__()
assert dim % num_heads == 0, "d_model % num_heads should be zero."
self.dim = dim
self.d_head = int(dim / num_heads)
self.num_heads = num_heads
self.sqrt_dim = math.sqrt(dim)
self.query_proj = Linear(dim, dim)
self.key_proj = Linear(dim, dim)
self.value_proj = Linear(dim, dim)
self.pos_proj = Linear(dim, dim, bias=False)
self.dropout = nn.Dropout(p=dropout_p)
self.u_bias = nn.Parameter(torch.Tensor(self.num_heads, self.d_head))
self.v_bias = nn.Parameter(torch.Tensor(self.num_heads, self.d_head))
torch.nn.init.xavier_uniform_(self.u_bias)
torch.nn.init.xavier_uniform_(self.v_bias)
self.out_proj = Linear(dim, dim)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
pos_embedding: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
batch_size = value.size(0)
query = self.query_proj(query).view(batch_size, -1, self.num_heads, self.d_head)
key = self.key_proj(key).view(batch_size, -1, self.num_heads, self.d_head).permute(0, 2, 1, 3)
value = self.value_proj(value).view(batch_size, -1, self.num_heads, self.d_head).permute(0, 2, 1, 3)
pos_embedding = self.pos_proj(pos_embedding).view(batch_size, -1, self.num_heads, self.d_head)
content_score = torch.matmul((query + self.u_bias).transpose(1, 2), key.transpose(2, 3))
pos_score = torch.matmul((query + self.v_bias).transpose(1, 2), pos_embedding.permute(0, 2, 3, 1))
pos_score = self._relative_shift(pos_score)
score = (content_score + pos_score) / self.sqrt_dim
if mask is not None:
mask = mask.unsqueeze(1)
score.masked_fill_(mask, -1e4)
attn = F.softmax(score, -1)
attn = self.dropout(attn)
context = torch.matmul(attn, value).transpose(1, 2)
context = context.contiguous().view(batch_size, -1, self.dim)
return self.out_proj(context)
def _relative_shift(self, pos_score: Tensor) -> Tensor:
batch_size, num_heads, seq_length1, seq_length2 = pos_score.size()
zeros = pos_score.new_zeros(batch_size, num_heads, seq_length1, 1)
padded_pos_score = torch.cat([zeros, pos_score], dim=-1)
padded_pos_score = padded_pos_score.view(batch_size, num_heads, seq_length2 + 1, seq_length1)
pos_score = padded_pos_score[:, :, 1:].view_as(pos_score)
return pos_score
| 40.877049 | 110 | 0.67295 |
73df76d75d09534335e8e60d3db04b825a0152b1 | 8,059 | py | Python | mmdetection/tools/analysis_tools/robustness_eval.py | lizhaoliu-Lec/Conformer | 577cff26b78b338f035c075727c408fca3272208 | [
"Apache-2.0"
] | null | null | null | mmdetection/tools/analysis_tools/robustness_eval.py | lizhaoliu-Lec/Conformer | 577cff26b78b338f035c075727c408fca3272208 | [
"Apache-2.0"
] | null | null | null | mmdetection/tools/analysis_tools/robustness_eval.py | lizhaoliu-Lec/Conformer | 577cff26b78b338f035c075727c408fca3272208 | [
"Apache-2.0"
] | null | null | null | import os.path as osp
from argparse import ArgumentParser
import mmcv
import numpy as np
def print_coco_results(results):
def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100):
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '0.50:0.95' \
if iouThr is None else f'{iouThr:0.2f}'
iStr = f' {titleStr:<18} {typeStr} @[ IoU={iouStr:<9} | '
iStr += f'area={areaRng:>6s} | maxDets={maxDets:>3d} ] = {result:0.3f}'
print(iStr)
stats = np.zeros((12,))
stats[0] = _print(results[0], 1)
stats[1] = _print(results[1], 1, iouThr=.5)
stats[2] = _print(results[2], 1, iouThr=.75)
stats[3] = _print(results[3], 1, areaRng='small')
stats[4] = _print(results[4], 1, areaRng='medium')
stats[5] = _print(results[5], 1, areaRng='large')
stats[6] = _print(results[6], 0, maxDets=1)
stats[7] = _print(results[7], 0, maxDets=10)
stats[8] = _print(results[8], 0)
stats[9] = _print(results[9], 0, areaRng='small')
stats[10] = _print(results[10], 0, areaRng='medium')
stats[11] = _print(results[11], 0, areaRng='large')
def get_coco_style_results(filename,
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
if metric is None:
metrics = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100',
'ARs', 'ARm', 'ARl'
]
elif isinstance(metric, list):
metrics = metric
else:
metrics = [metric]
for metric_name in metrics:
assert metric_name in [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100',
'ARs', 'ARm', 'ARl'
]
eval_output = mmcv.load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, len(metrics)), dtype='float32')
for corr_i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
for metric_j, metric_name in enumerate(metrics):
mAP = eval_output[distortion][severity][task][metric_name]
results[corr_i, severity, metric_j] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print(f'\nmodel: {osp.basename(filename)}')
if metric is None:
if 'P' in prints:
print(f'Performance on Clean Data [P] ({task})')
print_coco_results(P)
if 'mPC' in prints:
print(f'Mean Performance under Corruption [mPC] ({task})')
print_coco_results(mPC)
if 'rPC' in prints:
print(f'Realtive Performance under Corruption [rPC] ({task})')
print_coco_results(rPC)
else:
if 'P' in prints:
print(f'Performance on Clean Data [P] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} = {P[metric_i]:0.3f}')
if 'mPC' in prints:
print(f'Mean Performance under Corruption [mPC] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} = {mPC[metric_i]:0.3f}')
if 'rPC' in prints:
print(f'Relative Performance under Corruption [rPC] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} => {rPC[metric_i] * 100:0.1f} %')
return results
def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
eval_output = mmcv.load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, 20), dtype='float32')
for i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
mAP = [
eval_output[distortion][severity][j]['ap']
for j in range(len(eval_output[distortion][severity]))
]
results[i, severity, :] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print(f'\nmodel: {osp.basename(filename)}')
if 'P' in prints:
print(f'Performance on Clean Data [P] in AP50 = {np.mean(P):0.3f}')
if 'mPC' in prints:
print('Mean Performance under Corruption [mPC] in AP50 = '
f'{np.mean(mPC):0.3f}')
if 'rPC' in prints:
print('Realtive Performance under Corruption [rPC] in % = '
f'{np.mean(rPC) * 100:0.1f}')
return np.mean(results, axis=2, keepdims=True)
def get_results(filename,
dataset='coco',
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert dataset in ['coco', 'voc', 'cityscapes']
if dataset in ['coco', 'cityscapes']:
results = get_coco_style_results(
filename,
task=task,
metric=metric,
prints=prints,
aggregate=aggregate)
elif dataset == 'voc':
if task != 'bbox':
print('Only bbox analysis is supported for Pascal VOC')
print('Will report bbox results\n')
if metric not in [None, ['AP'], ['AP50']]:
print('Only the AP50 metric is supported for Pascal VOC')
print('Will report AP50 metric\n')
results = get_voc_style_results(
filename, prints=prints, aggregate=aggregate)
return results
def get_distortions_from_file(filename):
eval_output = mmcv.load(filename)
return get_distortions_from_results(eval_output)
def get_distortions_from_results(eval_output):
distortions = []
for i, distortion in enumerate(eval_output):
distortions.append(distortion.replace('_', ' '))
return distortions
def main():
parser = ArgumentParser(description='Corruption Result Analysis')
parser.add_argument('filename', help='result file path')
parser.add_argument(
'--dataset',
type=str,
choices=['coco', 'voc', 'cityscapes'],
default='coco',
help='dataset type')
parser.add_argument(
'--task',
type=str,
nargs='+',
choices=['bbox', 'segm'],
default=['bbox'],
help='task to report')
parser.add_argument(
'--metric',
nargs='+',
choices=[
None, 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
],
default=None,
help='metric to report')
parser.add_argument(
'--prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print')
parser.add_argument(
'--aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those \
for benchmark corruptions')
args = parser.parse_args()
for task in args.task:
get_results(
args.filename,
dataset=args.dataset,
task=task,
metric=args.metric,
prints=args.prints,
aggregate=args.aggregate)
if __name__ == '__main__':
main()
| 32.62753 | 79 | 0.554287 |
37a667e590d47df7ad5d5f36165bcef00c989b6b | 1,842 | py | Python | official/vision/beta/dataloaders/utils.py | gujralsanyam22/models | d96f8f043dbe2b5ca8ea1785f57df8faf68d8875 | [
"Apache-2.0"
] | 15 | 2018-08-15T19:29:39.000Z | 2021-11-05T02:14:59.000Z | official/vision/beta/dataloaders/utils.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 5 | 2020-10-01T09:02:34.000Z | 2021-02-21T12:50:11.000Z | official/vision/beta/dataloaders/utils.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 8 | 2019-06-06T20:37:15.000Z | 2022-03-04T13:54:38.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader utils."""
# Import libraries
import tensorflow as tf
from official.vision.beta.ops import preprocess_ops
def process_source_id(source_id):
"""Processes source_id to the right format."""
if source_id.dtype == tf.string:
source_id = tf.cast(tf.strings.to_number(source_id), tf.int64)
with tf.control_dependencies([source_id]):
source_id = tf.cond(
pred=tf.equal(tf.size(input=source_id), 0),
true_fn=lambda: tf.cast(tf.constant(-1), tf.int64),
false_fn=lambda: tf.identity(source_id))
return source_id
def pad_groundtruths_to_fixed_size(groundtruths, size):
"""Pads the first dimension of groundtruths labels to the fixed size."""
groundtruths['boxes'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['boxes'], size, -1)
groundtruths['is_crowds'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['is_crowds'], size, 0)
groundtruths['areas'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['areas'], size, -1)
groundtruths['classes'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['classes'], size, -1)
return groundtruths
| 40.043478 | 80 | 0.711183 |
f788c16c24983e25f2c6c22334da81ea3519c3c6 | 6,369 | py | Python | SARSA/SARSA.py | knit-pk/AI-Section-2017 | a744b130defe58050264a37d88732af66ecabf40 | [
"MIT"
] | 2 | 2017-11-17T13:32:55.000Z | 2017-11-17T21:29:58.000Z | SARSA/SARSA.py | knit-pk/ai-section-2017 | a744b130defe58050264a37d88732af66ecabf40 | [
"MIT"
] | null | null | null | SARSA/SARSA.py | knit-pk/ai-section-2017 | a744b130defe58050264a37d88732af66ecabf40 | [
"MIT"
] | null | null | null | '''
Example implementation of SARSA algorithm for learning the path through frozen lake.
The is_slippery flag lets us change the rules of the game, if True the probability of
changing the chosen direction is 4 out of 10.
'''
import gym
import numpy as np
import time
import pygame
class Game:
stan = 0;
def __init__(self, field):
self.field = field
def step(self, action):
reward = -0.04
done = False
info = False
if (action == 0) and ((self.stan % 4) != 0):
self.stan -= 1
if (action == 1) and (self.stan < 12):
self.stan += 4
if (action == 2) and ((self.stan % 4) != 3):
self.stan += 1
if (action == 3) and (self.stan > 3):
self.stan -= 4
if self.field[self.stan] == 'H':
reward = -5
done = True
if self.field[self.stan] == 'G':
reward = 1
done = True
return self.stan, reward, done, info;
def reset(self):
self.stan = 0
return self.stan;
def drawGridWorld(Q, field, player, action):
# Grid world init
pygame.init()
font = pygame.font.SysFont("monospace", 30, True)
surface = pygame.display.set_mode((860, 860)) # width x height
pygame.display.set_caption('GridWorld')
sleep_time = 0.02;
surface.fill((0, 0, 0))
wiersz = 0
kolumna = 0
offset = 10
size = 200
# print(action)
for pole in range(len(Q)): # Y # pola pionowo
if pole != 0 and (pole % len(Q[0]) == 0):
wiersz += 1
kolumna = 0
x_cord = offset + offset * kolumna + kolumna * size
y_cord = offset + offset * wiersz + wiersz * size
# Field
field_color = (189, 189, 189)
if field[pole] == 'H':
field_color = (33, 33, 33)
if field[pole] == 'S':
field_color = (255, 179, 0)
if field[pole] == 'G':
field_color = (118, 255, 3)
pygame.draw.rect(surface, field_color, (x_cord, y_cord, size, size))
# Player
if pole == player:
field_color = (3, 169, 244)
pygame.draw.circle(surface, field_color, (
int(round(x_cord + size / 2)), int(round(y_cord + size / 2))),
int(round(size / 2)))
if action == 0:
move_action = font.render("<", False, (255, 0, 0))
if action == 1:
move_action = font.render("\/", False, (255, 0, 0))
if action == 2:
move_action = font.render(">", False, (255, 0, 0))
if action == 3:
move_action = font.render("/\\", False, (255, 0, 0))
surface.blit(move_action, (0, 0))
# QMatrix
color = (255, 255, 255)
best = Q[pole].argmax()
for i in range(4):
# print(best)
if i == best:
color = (255, 0, 0)
x_label_cord = 0
y_label_cord = 0
if i == 0: # left
x_label_cord = x_cord
y_label_cord = y_cord
direction = 'left'
# color = (0, 0, 255) # blue
if i == 1: # down
x_label_cord = x_cord
y_label_cord = y_cord + size / 4
direction = 'down'
# color = (0, 255, 0) # green
if i == 2: # right
x_label_cord = x_cord
y_label_cord = y_cord + size / 4 * 2
direction = 'right'
# color = (0, 255, 255) # green blue
if i == 3: # up
x_label_cord = x_cord
y_label_cord = y_cord + size / 2 + size / 4
direction = 'up'
# color = (255, 0, 0) # red
label = font.render("{}:{}".format(direction, round(Q[pole][i], 3)), False,
color)
surface.blit(label, (x_label_cord, y_label_cord))
kolumna += 1
pygame.display.update()
time.sleep(sleep_time)
def learn(is_slippery):
if is_slippery:
env = gym.make('FrozenLake-v0')
Q = np.zeros([env.observation_space.n, env.action_space.n])
else:
field = ['S', 'F', 'F', 'F',
'F', 'H', 'F', 'H',
'F', 'F', 'F', 'H',
'H', 'F', 'F', 'G'
]
env = Game(field)
Q = np.zeros([16, 4])
a = .8 # alpha
y = .95 # gamma
num_episodes = 2000
for i in range(num_episodes):
current_state = env.reset()
current_action = np.argmax(Q[current_state, :])
for j in range(100):
next_state, reward, done, _ = env.step(current_action)
if is_slippery:
next_action = np.argmax(
Q[next_state, :] + np.random.randn(1, env.action_space.n) * (
1. / (i + 1)))
else:
next_action = np.argmax(Q[next_state, :] + np.random.randn(1, 4) * (
1. / (i + 1)))
Q[current_state, current_action] += a * (
reward + y * Q[next_state, next_action] - Q[
current_state, current_action])
current_state = next_state
current_action = next_action
if done == True:
break
return Q
def play(inQ, is_slippery):
field = ['S', 'F', 'F', 'F',
'F', 'H', 'F', 'H',
'F', 'F', 'F', 'H',
'H', 'F', 'F', 'G'
]
if is_slippery:
env = gym.make('FrozenLake-v0')
else:
env = Game(field)
num_episodes = 2000
Q = inQ
rList = [] # reward list
for i in range(num_episodes):
total_reward = 0
state = env.reset()
drawGridWorld(Q, field, state, 0)
action = np.argmax(Q[state, :])
for j in range(100):
drawGridWorld(Q, field, state, action)
state, reward, done, _ = env.step(action)
action = np.argmax(Q[state, :])
total_reward += reward
if done == True:
break
rList.append(total_reward)
print("Score over time: " + str(sum(rList) / num_episodes))
if __name__ == '__main__':
is_slippery = False
Q = learn(is_slippery)
play(Q, is_slippery)
| 28.057269 | 87 | 0.477626 |
1619aa8873afea22e625673b61d1ba0a37ecbf5c | 5,574 | py | Python | fastapi_asyncpg/__init__.py | sashad/fastapi_asyncpg | 7a6155e55b66c6f4dd22d11b5a4ad9af2d8efeff | [
"MIT"
] | null | null | null | fastapi_asyncpg/__init__.py | sashad/fastapi_asyncpg | 7a6155e55b66c6f4dd22d11b5a4ad9af2d8efeff | [
"MIT"
] | null | null | null | fastapi_asyncpg/__init__.py | sashad/fastapi_asyncpg | 7a6155e55b66c6f4dd22d11b5a4ad9af2d8efeff | [
"MIT"
] | null | null | null | from __future__ import annotations
from fastapi import FastAPI
import asyncpg
import typing
async def noop(db: asyncpg.Connection):
return
class configure_asyncpg:
def __init__(
self,
dsn: str,
*,
init_db: typing.Callable = None, # callable for running sql on init
pool=None, # usable on testing
**options,
):
"""This is the entry point to configure an asyncpg pool with fastapi.
Arguments
app: The fastapp application that we use to store the pool
and bind to it's initialitzation events
dsn: A postgresql desn like postgresql://user:password@postgresql:5432/db
init_db: Optional callable that receives a db connection,
for doing an initialitzation of it
pool: This is used for testing to skip the pool initialitzation
an just use the SingleConnectionTestingPool
**options: connection options to directly pass to asyncpg driver
see: https://magicstack.github.io/asyncpg/current/api/index.html#connection-pools
"""
self.dsn = dsn
self.init_db = init_db
self.con_opts = options
self._pool = pool
async def on_connect(self, app):
"""handler called during initialitzation of asgi app, that connects to
the db"""
self.app = app
# if the pool is comming from outside (tests), don't connect it
if self._pool:
self.app.state.pool = self._pool
return
pool = await asyncpg.create_pool(dsn=self.dsn, **self.con_opts)
async with pool.acquire() as db:
await self.init_db(db)
self.app.state.pool = pool
async def on_disconnect(self):
# if the pool is comming from outside, don't desconnect it
# someone else will do (usualy a pytest fixture)
if self._pool:
return
await self.app.state.pool.close()
def on_init(self, func):
self.init_db = func
return func
@property
def pool(self):
return self.app.state.pool
async def connection(self):
"""
A ready to use connection Dependency just usable
on your path functions that gets a connection from the pool
Example:
db = configure_asyncpg(app, "dsn://")
@app.get("/")
async def get_content(db = Depens(db.connection)):
await db.fetch("SELECT * from pg_schemas")
"""
async with self.pool.acquire() as db:
yield db
async def transaction(self):
"""
A ready to use transaction Dependecy just usable on a path function
Example:
db = configure_asyncpg(app, "dsn://")
@app.get("/")
async def get_content(db = Depens(db.transaction)):
await db.execute("insert into keys values (1, 2)")
await db.execute("insert into keys values (1, 2)")
All view function executed, are wrapped inside a postgresql transaction
"""
async with self.pool.acquire() as db:
txn = db.transaction()
await txn.start()
try:
yield db
except: # noqa
await txn.rollback()
raise
else:
await txn.commit()
atomic = transaction
class SingleConnectionTestingPool:
"""A fake pool that simulates pooling, but runs on
a single transaction that it's rolled back after
each test.
With some large schemas this seems to be faster than
the other approach
"""
def __init__(
self,
conn: asyncpg.Connection,
initialize: typing.Callable = None,
add_logger_postgres: bool = False,
):
self._conn = conn
self.tx = None
self.started = False
self.add_logger_postgres = add_logger_postgres
self.initialize = initialize
def acquire(self, *, timeout=None):
return ConAcquireContext(self._conn, self)
async def start(self):
if self.started:
return
def log_postgresql(con, message):
print(message)
if self.add_logger_postgres:
self._conn.add_log_listener(log_postgresql)
self.tx = self._conn.transaction()
await self.tx.start()
await self.initialize(self._conn)
self.started = True
async def release(self):
if self.tx:
await self.tx.rollback()
def __getattr__(self, key):
return getattr(self._conn, key)
async def create_pool_test(
dsn: str,
*,
initialize: typing.Callable = None,
add_logger_postgres: bool = False,
):
"""This part is only used for testing,
we create a fake "pool" that just starts a connecion,
that does a transaction inside it"""
conn = await asyncpg.connect(dsn=dsn)
pool = SingleConnectionTestingPool(
conn, initialize=initialize, add_logger_postgres=add_logger_postgres
)
return pool
class ConAcquireContext:
def __init__(self, conn, manager):
self._conn = conn
self.manager = manager
async def __aenter__(self):
if not self.manager.tx:
await self.manager.start()
self.tr = self._conn.transaction()
await self.tr.start()
return self._conn
async def __aexit__(self, exc_type, exc, tb):
if exc_type:
await self.tr.rollback()
else:
await self.tr.commit()
| 30.293478 | 97 | 0.601005 |
9f066043f55a510f45e9edae6fc8cb276b861771 | 28,990 | py | Python | tests/validation/tests/v3_api/test_monitoring.py | gezb/rancher | e11d4781405448701b706cbf505837bf932ac6d9 | [
"Apache-2.0"
] | 1 | 2020-02-19T08:36:18.000Z | 2020-02-19T08:36:18.000Z | tests/validation/tests/v3_api/test_monitoring.py | guichuanghua/rancher | 88be50a880482e3f32d851b9122ee7fc1b1daca3 | [
"Apache-2.0"
] | null | null | null | tests/validation/tests/v3_api/test_monitoring.py | guichuanghua/rancher | 88be50a880482e3f32d851b9122ee7fc1b1daca3 | [
"Apache-2.0"
] | null | null | null | import pytest
import copy
from .common import * # NOQA
namespace = {
"cluster": None,
"project": None,
"system_project": None,
"system_project_client": None
}
cluster_query_template = {
"obj": None,
"action_name": "query",
"filters": {},
"metricParams": {},
"interval": "5s",
"isDetails": True,
"from": "now-5s",
"to": "now"
}
cluster_graph_list = [
"cluster-network-packet",
"cluster-network-io",
"cluster-disk-io",
"cluster-cpu-load",
"cluster-cpu-usage",
"cluster-fs-usage-percent",
"cluster-memory-usage",
]
etcd_graph_list = [
"etcd-grpc-client",
"etcd-stream",
"etcd-raft-proposals",
"etcd-server-leader-sum",
"etcd-db-bytes-sum",
"etcd-sync-duration",
"etcd-server-failed-proposal",
"etcd-leader-change",
"etcd-rpc-rate",
'etcd-peer-traffic'
]
kube_component_graph_list = [
"scheduler-total-preemption-attempts",
"ingresscontroller-nginx-connection",
"apiserver-request-count",
"controllermanager-queue-depth",
"scheduler-e-2-e-scheduling-latency-seconds-quantile",
"scheduler-pod-unscheduler",
"apiserver-request-latency",
]
node_graph_list = [
"node-network-packet",
"node-network-io",
"node-fs-usage-percent",
"node-cpu-load",
"node-disk-io",
"node-memory-usage",
"node-cpu-usage",
]
rancher_component_graph_list = [
"fluentd-buffer-queue-length",
"fluentd-input-record-number",
"fluentd-output-errors",
"fluentd-output-record-number",
]
workload_graph_list = [
"workload-network-packet",
"workload-memory-usage-bytes-sum",
"workload-cpu-usage",
"workload-network-io",
"workload-disk-io",
]
name_mapping = {
"cluster": cluster_graph_list,
"etcd": etcd_graph_list,
"kube-component": kube_component_graph_list,
"rancher-component": rancher_component_graph_list,
"workload": workload_graph_list,
"node": node_graph_list,
}
C_MONITORING_ANSWERS = {"operator-init.enabled": "true",
"exporter-node.enabled": "true",
"exporter-node.ports.metrics.port": "9796",
"exporter-kubelets.https": "true",
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"operator.resources.limits.memory": "500Mi",
"prometheus.retention": "12h",
"grafana.persistence.enabled": "false",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.storageClass": "default",
"grafana.persistence.storageClass": "default",
"grafana.persistence.size": "10Gi",
"prometheus.persistence.size": "50Gi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.resources.core.limits.memory": "1000Mi",
"prometheus.persistent.useReleaseName": "true"}
P_MONITORING_ANSWER = {"prometheus.retention": "12h",
"grafana.persistence.enabled": "false",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.storageClass": "default",
"grafana.persistence.storageClass": "default",
"grafana.persistence.size": "10Gi",
"prometheus.persistence.size": "50Gi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.resources.core.limits.memory": "1000Mi",
"prometheus.persistent.useReleaseName": "true"}
MONITORING_VERSION = os.environ.get('RANCHER_MONITORING_VERSION', "")
MONITORING_TEMPLATE_ID = "cattle-global-data:system-library-rancher-monitoring"
CLUSTER_MONITORING_APP = "cluster-monitoring"
MONITORING_OPERATOR_APP = "monitoring-operator"
PROJECT_MONITORING_APP = "project-monitoring"
GRAFANA_PROJECT_MONITORING = "grafana-project-monitoring"
PROMETHEUS_PROJECT_MONITORING = "prometheus-project-monitoring"
NUM_PROJECT_MONITOR_GRAPH = 13 # /v3/projectmonitorgraphs
NUM_CLUSTER_MONITOR_GRAPH = 37 # /v3/clustermonitorgraphs
def test_monitoring_cluster_graph():
rancher_client, cluster = get_user_client_and_cluster()
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "cluster"
validate_cluster_graph(query1, "cluster")
def test_monitoring_etcd_graph():
rancher_client, cluster = get_user_client_and_cluster()
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "etcd"
validate_cluster_graph(query1, "etcd")
def test_monitoring_kube_component_graph():
rancher_client, cluster = get_user_client_and_cluster()
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["displayResourceType"] = "kube-component"
validate_cluster_graph(query1, "kube-component")
# rancher component graphs are from the fluent app for cluster logging
def test_monitoring_rancher_component_graph():
rancher_client, cluster = get_user_client_and_cluster()
# check if the cluster logging is enabled, assuming fluent is used
if cluster.enableClusterAlerting is False:
print("cluster logging is not enabled, skip the test")
return
else:
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["displayResourceType"] = "rancher-component"
validate_cluster_graph(query1, "rancher-component")
def test_monitoring_node_graph():
rancher_client, cluster = get_user_client_and_cluster()
node_list_raw = rancher_client.list_node(clusterId=cluster.id).data
for node in node_list_raw:
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "node"
query1["metricParams"]["instance"] = node.id
validate_cluster_graph(query1, "node")
def test_monitoring_workload_graph():
rancher_client, cluster = get_user_client_and_cluster()
system_project = rancher_client.list_project(clusterId=cluster.id,
name="System").data[0]
project_monitoring_obj = rancher_client.list_projectMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = project_monitoring_obj
query1["filters"]["projectId"] = system_project.id
query1["filters"]["resourceType"] = "workload"
query1["metricParams"]["workloadName"] = \
"deployment:cattle-prometheus:grafana-cluster-monitoring"
validate_cluster_graph(query1, "workload")
def test_monitoring_project_monitoring():
validate_project_monitoring(namespace["project"], USER_TOKEN)
# ------------------ RBAC for Project Monitoring ------------------
@if_test_rbac
def test_rbac_cluster_owner_control_project_monitoring():
# cluster owner can enable and disable monitoring in any project
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_client = get_client_for_token(user_token)
project = user_client.reload(rbac_get_project())
if project["enableProjectMonitoring"] is True:
assert "disableMonitoring" in project.actions.keys()
disable_project_monitoring(project, user_token)
validate_project_monitoring(project, user_token)
@if_test_rbac
def test_rbac_cluster_member_control_project_monitoring(remove_resource):
# cluster member can enable and disable monitoring in his project
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_client = get_client_for_token(user_token)
# create a new project
project = create_project(user_client, namespace["cluster"])
validate_project_monitoring(project, user_token)
remove_resource(project)
@if_test_rbac
def test_rbac_project_owner_control_project_monitoring():
# project owner can enable and disable monitoring in his project
user_token = rbac_get_user_token_by_role(PROJECT_OWNER)
user_client = get_client_for_token(user_token)
project = user_client.reload(rbac_get_project())
if project["enableProjectMonitoring"] is True:
assert "disableMonitoring" in project.actions.keys()
disable_project_monitoring(project, user_token)
validate_project_monitoring(project, user_token)
@if_test_rbac
def test_rbac_project_member_control_project_monitoring():
# project member can NOT enable and disable monitoring in his project
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_permission_project_monitoring(token)
@if_test_rbac
def test_rbac_project_read_only_control_project_monitoring():
# project read-only can NOT enable and disable monitoring in his project
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_permission_project_monitoring(token)
@if_test_rbac
def test_rbac_project_owner_project_graph_1():
# project owner can see graphs in his project
project = rbac_get_project()
wl = rbac_get_workload()
token = rbac_get_user_token_by_role(PROJECT_OWNER)
check_permission_project_graph(project, wl, token, True)
@if_test_rbac
def test_rbac_project_owner_project_graph_2():
# project owner can NOT see graphs in others' project
project = rbac_get_unshared_project()
wl = rbac_get_unshared_workload()
token = rbac_get_user_token_by_role(PROJECT_OWNER)
check_permission_project_graph(project, wl, token, False)
@if_test_rbac
def test_rbac_project_member_project_graph_1():
# project member can see graphs in his project
project = rbac_get_project()
wl = rbac_get_workload()
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
check_permission_project_graph(project, wl, token, True)
@if_test_rbac
def test_rbac_project_member_project_graph_2():
# project member can NOT see graphs in others' project
project = rbac_get_unshared_project()
wl = rbac_get_unshared_workload()
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
check_permission_project_graph(project, wl, token, False)
@if_test_rbac
def test_rbac_project_read_only_project_graph_1():
# project read-only can see graphs in his project
project = rbac_get_project()
wl = rbac_get_workload()
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
check_permission_project_graph(project, wl, token, True)
@if_test_rbac
def test_rbac_project_read_only_project_graph_2():
# project read-only can NOT see graphs in other's project
project = rbac_get_unshared_project()
wl = rbac_get_unshared_workload()
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
check_permission_project_graph(project, wl, token, False)
@if_test_rbac
def test_rbac_cluster_owner_project_graph():
# cluster owner can see graphs in all projects
token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project1 = rbac_get_project()
wl1 = rbac_get_workload()
check_permission_project_graph(project1, wl1, token, True)
project2 = rbac_get_unshared_project()
wl2 = rbac_get_unshared_workload()
check_permission_project_graph(project2, wl2, token, True)
@if_test_rbac
def test_rbac_cluster_member_project_graph_1(remove_resource):
# cluster member can see graphs in his project only
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = create_project_and_ns(token,
namespace["cluster"],
random_test_name("cluster-member"))
p_client = get_project_client_for_token(project, token)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
wait_for_wl_to_active(p_client, workload)
remove_resource(project)
check_permission_project_graph(project, workload, token, True)
@if_test_rbac
def test_rbac_cluster_member_project_graph_2():
# cluster member can NOT see graphs in other's project
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project = rbac_get_project()
wl = rbac_get_workload()
check_permission_project_graph(project, wl, token, False)
# ------------------ RBAC for Cluster Monitoring ------------------
@if_test_rbac
def test_rbac_project_owner_cluster_graphs():
# project owner can NOT see cluster graphs
token = rbac_get_user_token_by_role(PROJECT_OWNER)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, False)
@if_test_rbac
def test_rbac_project_member_cluster_graphs():
# project member can NOT see cluster graphs
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, False)
@if_test_rbac
def test_rbac_project_read_only_cluster_graphs():
# project read-only can NOT see cluster graphs
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, False)
@if_test_rbac
def test_rbac_cluster_owner_cluster_graphs():
# cluster owner can see cluster graph
token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, True)
@if_test_rbac
def test_rbac_cluster_member_cluster_graphs():
# cluster member can see cluster graphs
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, True)
@if_test_rbac
def test_rbac_cluster_member_control_cluster_monitoring():
# cluster member can NOT enable or disable the cluster monitoring
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_no_permission_cluster_monitoring(token)
@if_test_rbac
def test_rbac_project_owner_control_cluster_monitoring():
# project owner can NOT enable or disable the cluster monitoring
token = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_no_permission_cluster_monitoring(token)
@if_test_rbac
def test_rbac_project_member_control_cluster_monitoring():
# project member can NOT enable or disable the cluster monitoring
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_permission_cluster_monitoring(token)
@if_test_rbac
def test_rbac_project_read_only_control_cluster_monitoring():
# project read-only can NOT enable or disable the cluster monitoring
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_permission_cluster_monitoring(token)
@pytest.mark.last
@if_test_rbac
def test_rbac_cluster_owner_control_cluster_monitoring():
# cluster owner can enable and disable the cluster monitoring
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
client = get_client_for_token(user_token)
user_client, cluster = get_user_client_and_cluster(client)
if cluster["enableClusterMonitoring"] is True:
assert "disableMonitoring" in cluster.actions.keys()
user_client.action(cluster, "disableMonitoring")
# sleep 10 seconds to wait for all apps removed
time.sleep(10)
cluster = user_client.reload(cluster)
assert "enableMonitoring" in cluster.actions.keys()
user_client.action(cluster, "enableMonitoring",
answers=C_MONITORING_ANSWERS,
version=MONITORING_VERSION)
validate_cluster_monitoring_apps()
@pytest.fixture(scope="module", autouse="True")
def create_project_client(request):
global MONITORING_VERSION
rancher_client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
project = create_project(rancher_client, cluster,
random_test_name("p-monitoring"))
system_project = rancher_client.list_project(clusterId=cluster.id,
name="System").data[0]
sys_proj_client = get_project_client_for_token(system_project, USER_TOKEN)
namespace["cluster"] = cluster
namespace["project"] = project
namespace["system_project"] = system_project
namespace["system_project_client"] = sys_proj_client
monitoring_template = rancher_client.list_template(
id=MONITORING_TEMPLATE_ID).data[0]
if MONITORING_VERSION == "":
MONITORING_VERSION = monitoring_template.defaultVersion
print("MONITORING_VERSION=" + MONITORING_VERSION)
# enable the cluster monitoring
if cluster["enableClusterMonitoring"] is False:
rancher_client.action(cluster, "enableMonitoring",
answers=C_MONITORING_ANSWERS,
version=MONITORING_VERSION)
validate_cluster_monitoring_apps()
# wait 2 minute for all graphs to be available
time.sleep(60 * 2)
def fin():
rancher_client.delete(project)
cluster = rancher_client.reload(namespace["cluster"])
if cluster["enableClusterMonitoring"] is True:
rancher_client.action(cluster, "disableMonitoring")
request.addfinalizer(fin)
def check_data(source, target_list):
""" check if any graph is missing or any new graph is introduced"""
if not hasattr(source, "data"):
return False
data = source.get("data")
if len(data) == 0:
print("no graph is received")
return False
target_copy = copy.deepcopy(target_list)
res = []
extra = []
for item in data:
if not hasattr(item, "series"):
return False
if len(item.series) == 0:
print("no data point")
return False
name = item.get("graphID").split(":")[1]
res.append(name)
if name in target_list:
target_copy.remove(name)
else:
extra.append(name)
target_list.sort()
res.sort()
target_copy.sort()
extra.sort()
print("target graphs : {}".format(target_list))
print("actual graphs : {}".format(res))
print("missing graphs: {}".format(target_copy))
print("extra graphs : {}".format(extra))
if len(target_copy) != 0 or len(extra) != 0:
return False
return True
def validate_cluster_graph(action_query, resource_type, timeout=10):
target_graph_list = copy.deepcopy(name_mapping.get(resource_type))
rancher_client, cluster = get_user_client_and_cluster()
# handle the special case that if the graph etcd-peer-traffic is
# is not available if there is only one etcd node in the cluster
if resource_type == "etcd":
nodes = get_etcd_nodes(cluster, rancher_client)
if len(nodes) == 1:
target_graph_list.remove("etcd-peer-traffic")
start = time.time()
while True:
res = rancher_client.action(**action_query)
if check_data(res, target_graph_list):
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for all graphs to be available")
time.sleep(2)
def wait_for_target_up(token, cluster, project, job):
"""wait for a job's state to be up in Prometheus"""
project_client = get_project_client_for_token(project, token)
app = project_client.list_app(name=PROJECT_MONITORING_APP).data[0]
url = CATTLE_TEST_URL + '/k8s/clusters/' + cluster.id \
+ '/api/v1/namespaces/' + app.targetNamespace \
+ '/services/http:access-prometheus:80/proxy/api/v1/targets'
headers1 = {'Authorization': 'Bearer ' + token}
start = time.time()
while True:
t = requests.get(headers=headers1, url=url, verify=False).json()
for item in t["data"]["activeTargets"]:
if "job" in item["labels"].keys():
if item["labels"]["job"] == job and item["health"] == "up":
return
if time.time() - start > DEFAULT_MONITORING_TIMEOUT:
raise AssertionError(
"Timed out waiting for target to be up")
time.sleep(5)
def validate_cluster_monitoring_apps():
sys_project_client = namespace["system_project_client"]
wait_for_app_to_active(sys_project_client, CLUSTER_MONITORING_APP)
wait_for_app_to_active(sys_project_client, MONITORING_OPERATOR_APP)
def validate_no_permission_cluster_monitoring(user_token):
client = get_client_for_token(user_token)
_, cluster = get_user_client_and_cluster(client)
actions = cluster.actions.keys()
assert "enableMonitoring" not in actions
assert "disableMonitoring" not in actions
assert "editMonitoring" not in actions
def validate_no_permission_project_monitoring(user_token):
user_client = get_client_for_token(user_token)
project = user_client.reload(rbac_get_project())
actions = project.actions.keys()
assert "enableMonitoring" not in actions
assert "disableMonitoring" not in actions
assert "editMonitoring" not in actions
def enable_project_monitoring(project, token):
client = get_client_for_token(token)
user_client, cluster = get_user_client_and_cluster(client)
system_project_client = namespace["system_project_client"]
project = user_client.reload(project)
project_client = get_project_client_for_token(project, token)
# enable the project monitoring
if project["enableProjectMonitoring"] is False:
assert "enableMonitoring" in project.actions.keys()
user_client.action(project, "enableMonitoring",
answers=P_MONITORING_ANSWER,
version=MONITORING_VERSION)
wait_for_app_to_active(project_client, PROJECT_MONITORING_APP)
wait_for_app_to_active(system_project_client, MONITORING_OPERATOR_APP)
# wait for targets to be up
wait_for_target_up(token, cluster, project, "expose-prometheus-metrics")
wait_for_target_up(token, cluster, project, "expose-grafana-metrics")
def disable_project_monitoring(project, token):
user_client = get_client_for_token(token)
project = user_client.reload(project)
p_client = get_project_client_for_token(project, token)
# disable the project monitoring
assert "disableMonitoring" in project.actions.keys()
user_client.action(project, "disableMonitoring")
start = time.time()
while True:
if time.time() - start > 30:
raise AssertionError(
"Timed out waiting for disabling project monitoring")
apps = p_client.list_app(name=PROJECT_MONITORING_APP)
wl1 = p_client.list_workload(name=PROMETHEUS_PROJECT_MONITORING)
wl2 = p_client.list_workload(name=GRAFANA_PROJECT_MONITORING)
if len(apps.data) == 0 and len(wl1.data) == 0 and len(wl2.data) == 0:
break
def validate_project_prometheus(project, token):
"""
This function deploys a workload which exposes a metrics
in the target project, and validate if the metrics is scraped
by the project prometheus.
"""
cluster = namespace["cluster"]
project_client = get_project_client_for_token(project, token)
# deploy a workload to test project monitoring
cluster_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(cluster_client, cluster, project, random_name())
port = {"containerPort": 8080,
"type": "containerPort",
"kind": "NodePort",
"protocol": "TCP"}
metrics = [{"path": "/metrics",
"port": 8080,
"schema": "HTTP"}]
con = [{"name": "test-web",
"image": "loganhz/web",
"ports": [port]}]
wl_name = random_name()
workload = project_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns.id,
workloadMetrics=metrics)
wait_for_wl_to_active(project_client, workload)
app = project_client.list_app(name=PROJECT_MONITORING_APP).data[0]
url = CATTLE_TEST_URL + '/k8s/clusters/' + cluster.id \
+ '/api/v1/namespaces/' + app.targetNamespace \
+ '/services/http:access-prometheus:80/proxy/api/v1/' \
+ 'query?query=web_app_online_user_count'
headers1 = {'Authorization': 'Bearer ' + USER_TOKEN}
start = time.time()
while True:
result = requests.get(headers=headers1, url=url, verify=False).json()
if len(result["data"]["result"]) > 0:
project_client.delete(workload)
return
if time.time() - start > DEFAULT_MONITORING_TIMEOUT:
project_client.delete(workload)
raise AssertionError(
"Timed out waiting for the graph data available in Prometheus")
time.sleep(5)
def check_permission_project_graph(project, workload, token, permission=True):
"""
check if the user has the permission to see graphs in the project
:param project: the target project where graphs are from
:param workload: the target workload in the project
:param token: the user's token
:param permission: the user can see graphs if permission is True
:return: None
"""
p_id = project["id"]
client = get_client_for_token(token)
project_monitoring_obj = client.list_project_monitor_graph(projectId=p_id)
graphs_list = project_monitoring_obj.get("data")
if permission:
assert len(graphs_list) == NUM_PROJECT_MONITOR_GRAPH
else:
assert len(graphs_list) == 0
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = project_monitoring_obj
query1["filters"]["projectId"] = p_id
query1["filters"]["resourceType"] = "workload"
query1["metricParams"]["workloadName"] = workload.get("id")
res = client.action(**query1)
if permission:
start_time = time.time()
while time.time() - start_time < DEFAULT_TIMEOUT \
and "data" not in res.keys():
time.sleep(10)
res = client.action(**query1)
assert "data" in res.keys()
assert len(res.get("data")) > 0
else:
assert "data" not in res.keys()
def check_permission_cluster_graph(cluster, token, permission=True):
"""
check if the user has the permission to see graphs in the cluster
:param cluster: the target cluster where graphs are from
:param token: the user's token
:param permission: the user can see graphs if permission is True
:return: None
"""
c_id = cluster["id"]
client = get_client_for_token(token)
cluster_monitoring_obj = client.list_cluster_monitor_graph(clusterId=c_id)
graphs_list = cluster_monitoring_obj.get("data")
if permission:
assert len(graphs_list) == NUM_CLUSTER_MONITOR_GRAPH
else:
assert len(graphs_list) == 0
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "cluster"
res = client.action(**query1)
if permission:
start_time = time.time()
while time.time() - start_time < DEFAULT_TIMEOUT \
and "data" not in res.keys():
time.sleep(10)
res = client.action(**query1)
assert "data" in res.keys()
assert len(res.get("data")) > 0
else:
assert "data" not in res.keys()
def validate_project_monitoring(project, token):
enable_project_monitoring(project, token)
validate_project_prometheus(project, token)
disable_project_monitoring(project, token)
| 38.094612 | 79 | 0.693446 |
f4709b4fa73fd04ac14b56f70a848ee104668d76 | 2,618 | py | Python | src/extraction.py | glemaitre/bow-sklearn-example | 202b1e388596d922319a0e5586d3b96bbabf359a | [
"MIT"
] | null | null | null | src/extraction.py | glemaitre/bow-sklearn-example | 202b1e388596d922319a0e5586d3b96bbabf359a | [
"MIT"
] | null | null | null | src/extraction.py | glemaitre/bow-sklearn-example | 202b1e388596d922319a0e5586d3b96bbabf359a | [
"MIT"
] | null | null | null | from scipy.misc import imread
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.decomposition import PCA
import numpy as np
import os
from joblib import Parallel, delayed
import multiprocessing
def proc_image(path_image, patch_size=(9,9), max_patches=10000):
# Parameter for the patch extraction
rng = np.random.RandomState(0)
# Read the current image
im = imread(path_image)
# Extract patches
patch = extract_patches_2d(im, patch_size=patch_size,
max_patches=max_patches, random_state=rng)
return patch.reshape((max_patches, np.prod(patch_size) * len(im.shape)))
def image_extraction_projection(path_image, dict_PCA, patch_size=(9,9)):
# Parameter for the patch extraction
max_patches = None
rng = np.random.RandomState(0)
# Read the current image
im = imread(path_image)
# Extract patches
patch = extract_patches_2d(im, patch_size=patch_size,
max_patches=max_patches, random_state=rng)
return dict_PCA.transform(patch.reshape((patch.shape[0],
np.prod(patch_size) *
len(im.shape))))
############### Script starts here ###############
# Parameters for the script
patch_size = (9, 9)
max_patches = 100
n_jobs = -1
n_components = 9
# Define the path name for the data
data_path = '../data/PNGImages'
png_files = [os.path.join(root, name)
for root, dirs, files in os.walk(data_path)
for name in files
if name.endswith((".png"))]
############### Dictionary learning through PCA ###############
# Extract the patch
patch_arr = Parallel(n_jobs=n_jobs)(delayed(proc_image)(path_im, patch_size, max_patches)
for path_im in png_files)
print 'Extracted patch to build dictionary'
# Create a plain matrix to apply the PCA decomposition
patch_arr = np.array(patch_arr)
patch_arr = patch_arr.reshape((patch_arr.shape[0] * patch_arr.shape[1],
patch_arr.shape[2]))
# Build a PCA model
dict_PCA = PCA(n_components=n_components)
dict_PCA.fit(patch_arr)
print 'Built the PCA dictionary'
############### Feature extraction and projection ################
# Extract and project all the image feature
patch_arr = Parallel(n_jobs=n_jobs)(delayed(image_extraction_projection)
(path_im, dict_PCA, patch_size)
for path_im in png_files)
print 'Extracted and projected patches for image classification'
| 31.542169 | 90 | 0.640565 |
0dc8816eceeefa2e4ec18026e1d0e59c283fa0d9 | 1,278 | py | Python | assign_gt_distribution/save_txt.py | SunshineOnLeft/share | cca0f32fbedb935e5a338ddfcb2694701049f907 | [
"Apache-2.0"
] | 1 | 2020-04-28T11:42:04.000Z | 2020-04-28T11:42:04.000Z | assign_gt_distribution/save_txt.py | SunshineOnLeft/mmdetection | cca0f32fbedb935e5a338ddfcb2694701049f907 | [
"Apache-2.0"
] | null | null | null | assign_gt_distribution/save_txt.py | SunshineOnLeft/mmdetection | cca0f32fbedb935e5a338ddfcb2694701049f907 | [
"Apache-2.0"
] | null | null | null |
def save_txt(name, pos_gt_bboxes_size_max, gt_bboxes_size_mean, gt_bboxes_size_std, initiallize_csv):
print("gt_bboxes_size_mean", gt_bboxes_size_mean)
print("gt_bboxes_size_std", gt_bboxes_size_std)
if initiallize_csv:
initiallize_csv = 0
headers = ['p3','p4','p5','p6','p7']
with open('assign_gt_distribution/%s_all.txt' %name,'w')as f:
f.write(",".join(headers) + "\n")
with open('assign_gt_distribution/%s_mean.txt' %name,'w')as f:
f.write(",".join(headers) + "\n")
with open('assign_gt_distribution/%s_std.txt' %name,'w')as f:
f.write(",".join(headers) + "\n")
with open('assign_gt_distribution/%s_all.txt' %name,'a')as f:
f.write("|".join(pos_gt_bboxes_size_max) + "\n")
with open('assign_gt_distribution/%s_mean.txt' %name,'a')as f:
f.write(",".join(gt_bboxes_size_mean) + "\n")
with open('assign_gt_distribution/%s_std.txt' %name,'a')as f:
f.write(",".join(gt_bboxes_size_std) + "\n")
print("-----------------------------------------------------")
with open('assign_gt_distribution/%s_mean.txt' %name, 'r') as f:
if len(f.readlines()) > 100:
raise Exception("finish collection")
return initiallize_csv | 53.25 | 101 | 0.600939 |
3f18470344020d83e7dd1d65e0f43d7adef00e38 | 579 | py | Python | trinity/components/builtin/metrics/registry.py | AYCH-Inc/aych.eth.client | 1c8be83cebffd889c1c98d48605bba741743f31d | [
"MIT"
] | null | null | null | trinity/components/builtin/metrics/registry.py | AYCH-Inc/aych.eth.client | 1c8be83cebffd889c1c98d48605bba741743f31d | [
"MIT"
] | null | null | null | trinity/components/builtin/metrics/registry.py | AYCH-Inc/aych.eth.client | 1c8be83cebffd889c1c98d48605bba741743f31d | [
"MIT"
] | null | null | null | import time
from types import ModuleType
from typing import Dict, Any
from pyformance import MetricsRegistry
class HostMetricsRegistry(MetricsRegistry):
def __init__(self, host: str, clock: ModuleType = time) -> None:
super().__init__(clock)
self.host = host
def dump_metrics(self) -> Dict[str, Dict[str, Any]]:
metrics = super().dump_metrics()
for key in metrics:
# We want every metric to include a 'host' identifier to be able to filter accordingly
metrics[key]['host'] = self.host
return metrics
| 26.318182 | 98 | 0.66494 |
a00f00222d693191cf1f99690c0d3a6a91f89010 | 1,727 | py | Python | PythonEngine/VanillaClass.py | marioharper182/OptionsPricing | 0212a1e421380e7a903439aaef93d99373e71fc6 | [
"Apache-2.0"
] | null | null | null | PythonEngine/VanillaClass.py | marioharper182/OptionsPricing | 0212a1e421380e7a903439aaef93d99373e71fc6 | [
"Apache-2.0"
] | null | null | null | PythonEngine/VanillaClass.py | marioharper182/OptionsPricing | 0212a1e421380e7a903439aaef93d99373e71fc6 | [
"Apache-2.0"
] | null | null | null | __author__ = 'HarperMain'
import numpy as np
from numpy import exp, log, sqrt
from scipy.stats import norm
class Vanilla(object):
def __init__(self, flag, S, K, r, v, T, div):
self.Vanilla = self.BlackSholes(flag, float(S),
float(K), float(r), float(v),
float(T), float(div))
self.GetValue()
self.EuroD1(S,K,r,v,T)
self.GetDelta()
def BlackSholes(self, CallPutFlag,S,K,r,v,T, div):
d1 = (log(S/K)+(r-div+v*v/2.)*T)/(v*sqrt(T))
d2 = d1-v*sqrt(T)
if CallPutFlag=='c':
return S*exp(-div*T)*self.norm_cdf(d1)-K*exp(-r*T)*self.norm_cdf(d2)
else:
return K*exp(-r*T)*self.norm_cdf(-d2)-S*exp(-div*T)*self.norm_cdf(-d1)
def norm_pdf(self, x):
"""Standard normal probability density function"""
return (1.0/((2*np.pi)**0.5))*exp(-0.5*x*x)
def norm_cdf(self, x):
"""An approximation to the cumulative distribution function for the standard normal distribution:
N(x) = \frac{1}{sqrt(2*\pi)} \int^x_{-\infty} e^{-\frac{1}{2}s^2} ds"""
k = 1.0/(1.0+0.2316419*x)
k_sum = k*(0.319381530 + k*(-0.356563782 + k*(1.781477937 + k*(-1.821255978 + 1.330274429*k))))
if x >= 0.0:
return (1.0 - (1.0/((2*np.pi)**0.5))*exp(-0.5*x*x) * k_sum)
else:
return 1.0 - self.norm_cdf(-x)
def GetValue(self):
return self.Vanilla
def EuroD1(self, spot, strike, rate, sigma, dt):
self.d1 = ((log(spot/strike))+(rate+.5*sigma*sigma)*dt)/(sigma*sqrt(dt))
return self.d1
def GetDelta(self):
delta = norm.cdf(self.d1)
return delta | 33.211538 | 105 | 0.539085 |
97ba86725dc3ce37cfe7f39dcc4726faf5873897 | 711 | py | Python | ditto/pinboard/migrations/0018_auto_20160414_1637.py | garrettc/django-ditto | fcf15beb8f9b4d61634efd4a88064df12ee16a6f | [
"MIT"
] | 54 | 2016-08-15T17:32:41.000Z | 2022-02-27T03:32:05.000Z | ditto/pinboard/migrations/0018_auto_20160414_1637.py | garrettc/django-ditto | fcf15beb8f9b4d61634efd4a88064df12ee16a6f | [
"MIT"
] | 229 | 2015-07-23T12:50:47.000Z | 2022-03-24T10:33:20.000Z | ditto/pinboard/migrations/0018_auto_20160414_1637.py | garrettc/django-ditto | fcf15beb8f9b4d61634efd4a88064df12ee16a6f | [
"MIT"
] | 8 | 2015-09-10T17:10:35.000Z | 2022-03-25T13:05:01.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
("pinboard", "0017_auto_20160413_0906"),
]
operations = [
migrations.AlterField(
model_name="bookmark",
name="latitude",
field=models.DecimalField(
decimal_places=6, blank=True, null=True, max_digits=9
),
),
migrations.AlterField(
model_name="bookmark",
name="longitude",
field=models.DecimalField(
decimal_places=6, blank=True, null=True, max_digits=9
),
),
]
| 24.517241 | 69 | 0.561181 |
5ae0f79be99bc946cfed14af3f17815f12a23bb1 | 2,079 | py | Python | python/kserve/test/test_v1beta1_explainer_config.py | ittus/kserve | 922a9b7e8a9a86b5ae65faf4ce863927873fd456 | [
"Apache-2.0"
] | 1,146 | 2019-03-27T21:14:34.000Z | 2021-09-22T08:36:46.000Z | python/kserve/test/test_v1beta1_explainer_config.py | ittus/kserve | 922a9b7e8a9a86b5ae65faf4ce863927873fd456 | [
"Apache-2.0"
] | 1,803 | 2019-03-27T22:16:02.000Z | 2021-09-22T15:27:44.000Z | python/kserve/test/test_v1beta1_explainer_config.py | ittus/kserve | 922a9b7e8a9a86b5ae65faf4ce863927873fd456 | [
"Apache-2.0"
] | 573 | 2019-03-27T21:14:58.000Z | 2021-09-20T21:15:52.000Z | # Copyright 2021 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KServe
Python SDK for KServe # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kserve
from kserve.models.v1beta1_explainer_config import V1beta1ExplainerConfig # noqa: E501
from kserve.rest import ApiException
class TestV1beta1ExplainerConfig(unittest.TestCase):
"""V1beta1ExplainerConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1ExplainerConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kserve.models.v1beta1_explainer_config.V1beta1ExplainerConfig() # noqa: E501
if include_optional :
return V1beta1ExplainerConfig(
default_image_version = '0',
image = '0'
)
else :
return V1beta1ExplainerConfig(
default_image_version = '0',
image = '0',
)
def testV1beta1ExplainerConfig(self):
"""Test V1beta1ExplainerConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 29.7 | 95 | 0.688312 |
1b753960b369bc636d817a9a157a0c0200787288 | 11,445 | py | Python | src/cranet/nn/modules/module.py | shizuku/cranet | 4c86ad16029ed76a74e22b5e5e4c21267d6b9996 | [
"MIT"
] | 4 | 2021-10-31T13:31:13.000Z | 2021-12-11T08:45:36.000Z | src/cranet/nn/modules/module.py | Azathoth1729/cranet | 4c86ad16029ed76a74e22b5e5e4c21267d6b9996 | [
"MIT"
] | null | null | null | src/cranet/nn/modules/module.py | Azathoth1729/cranet | 4c86ad16029ed76a74e22b5e5e4c21267d6b9996 | [
"MIT"
] | 2 | 2021-10-31T13:34:28.000Z | 2021-11-21T09:11:46.000Z | from __future__ import annotations
from cranet import Tensor
from ..parameter import Parameter
from typing import (
Iterator,
Iterable,
Optional,
List,
Dict,
Set,
Union,
Tuple,
Callable,
)
from collections import OrderedDict
class Module:
_version = 1
training: bool
def __init__(self):
self.training = True
self._parameters: Dict[str, Optional[Parameter]] = OrderedDict()
self._buffers: Dict[str, Optional[Tensor]] = OrderedDict()
self._non_persistent_buffers_set: Set[str] = set()
self._modules: Dict[str, Optional[Module]] = OrderedDict()
def register_buffer(self, name: str, tensor: Optional[Tensor], persistent: bool = True) -> None:
r"""
This is typically used to register a buffer that should not to be
considered a model parameter.
Args:
name (string): name of the buffer. The buffer can be accessed
from this module using the given name
tensor (Tensor): buffer to be registered.
persistent (bool): whether the buffer is part of this module's
:attr:`state_dict`.
"""
if name == "":
raise KeyError("key can't be empty string")
if '.' in name:
raise KeyError("key can't contain '.'")
if hasattr(self, name) and name not in self._buffers:
raise KeyError(f"attribute {name} already exist")
self._buffers[name] = tensor
if persistent:
self._non_persistent_buffers_set.discard(name)
else:
self._non_persistent_buffers_set.add(name)
def register_parameter(self, name: str, param: Optional[Parameter]) -> None:
"""add a parameter to the module"""
if name == "":
raise KeyError("key can't be empty string")
if '.' in name:
raise KeyError("key can't contain '.'")
if hasattr(self, name) and name not in self._parameters:
raise KeyError(f"attribute {name} already exist")
if param is None:
self._parameters[name] = None
elif not isinstance(param, Parameter):
raise TypeError("cranet.nn.Parameter or None required")
else:
self._parameters[name] = param
def register_module(self, name: str, module: Optional[Module]) -> None:
if name == "":
raise KeyError("key can't be empty string")
if '.' in name:
raise KeyError("key can't contain '.'")
if hasattr(self, name) and name not in self._modules:
raise KeyError(f"attribute {name} already exist")
self._modules[name] = module
def get_name(self) -> str:
return self.__class__.__name__
def zero_grad(self):
for p in self.parameters():
p.zero_grad()
def forward(self, *args, **kwargs) -> Tensor:
raise NotImplementedError
def __call__(self, *args, **kwargs) -> Tensor:
return self.forward(*args, **kwargs)
def __setstate__(self, state):
self.__dict__.update(state)
if '_non_persistent_buffers_set' not in self.__dict__:
self._non_persistent_buffers_set = set()
def __getattr__(self, name: str):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return _parameters[name]
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return _buffers[name]
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return modules[name]
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
def __setattr__(self, name: str, value: Union[Tensor, Module]) -> None:
def remove_from(*dicts_or_sets):
for d in dicts_or_sets:
if name in d:
if isinstance(d, dict):
del d[name]
else:
d.discard(name)
params = self.__dict__.get('_parameters')
if isinstance(value, Parameter):
if params is None:
raise AttributeError(
"cannot assign parameters before Module.__init__() call")
remove_from(self.__dict__, self._buffers, self._modules, self._non_persistent_buffers_set)
self.register_parameter(name, value)
elif params is not None and name in params:
if value is not None:
raise TypeError(f"cannot assign '{type(value)}' as parameter '{name}' "
"(torch.nn.Parameter or None expected)")
self.register_parameter(name, value)
else:
modules = self.__dict__.get('_modules')
if isinstance(value, Module):
if modules is None:
raise AttributeError("cannot assign module before Module.__init__() call")
remove_from(self.__dict__, self._parameters, self._buffers, self._non_persistent_buffers_set)
modules[name] = value
elif modules is not None and name in modules:
if value is not None:
raise TypeError(f"cannot assign '{type(value)}' as child module '{name}' "
"(torch.nn.Module or None expected)")
modules[name] = value
else:
buffers = self.__dict__.get('_buffers')
if buffers is not None and name in buffers:
if value is not None and not isinstance(value, Tensor):
raise TypeError(f"cannot assign '{type(value)}' as buffer '{name}' "
"(torch.Tensor or None expected)")
buffers[name] = value
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in self._parameters:
del self._parameters[name]
elif name in self._buffers:
del self._buffers[name]
self._non_persistent_buffers_set.discard(name)
elif name in self._modules:
del self._modules[name]
else:
object.__delattr__(self, name)
def _named_members(self, get_members_fn: Callable[[Module], Iterable], prefix='', recurse=True) -> Iterator:
r"""Helper method for yielding various names + members of modules."""
memo = set()
modules = self.named_modules(prefix=prefix) if recurse else [(prefix, self)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None or v in memo:
continue
memo.add(v)
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Parameter]]:
memo: Set[Parameter] = set()
modules: List[Tuple[str, Module]] = self.named_modules(prefix=prefix) if recurse else [(prefix, self)]
for module_prefix, module in modules:
members = module._parameters.items()
for k, v in members:
if v is None or v in memo:
continue
memo.add(v)
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
def named_modules(self, memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True):
if memo is None:
memo = set()
if self not in memo:
if remove_duplicate:
memo.add(self)
yield prefix, self
for name, module in self._modules.items():
if module is None:
continue
submodule_prefix = prefix + ('.' if prefix else '') + name
for m in module.named_modules(memo, submodule_prefix, remove_duplicate):
yield m
def children(self) -> Iterator['Module']:
r"""Returns an iterator over immediate children modules.
Yields:
Module: a child module
"""
for name, module in self.named_children():
yield module
def named_children(self) -> Iterator[Tuple[str, 'Module']]:
r"""Returns an iterator over immediate children modules, yielding both
the name of the module as well as the module itself.
Yields:
(string, Module): Tuple containing a name and child module
"""
memo = set()
for name, module in self._modules.items():
if module is not None and module not in memo:
memo.add(module)
yield name, module
def parameters(self, recurse: bool = True) -> Iterator[Parameter]:
for name, param in self.named_parameters(recurse=recurse):
yield param
def state_dict(self, destination=None, prefix='', keep_vars=False):
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = dict(version=self._version)
self._save_to_state_dict(destination, prefix, keep_vars)
for name, module in self._modules.items():
if module is not None:
module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)
return destination
def _save_to_state_dict(self, destination, prefix, keep_vars):
for name, param in self._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
destination[prefix + name] = buf if keep_vars else buf.detach()
def train(self, mode: bool = True):
self.training = mode
for m in self.children():
m.train(mode)
return self
def eval(self):
return self.train(False)
def extra_repr(self) -> str:
return ''
def _get_name(self):
return self.__class__.__name__
def __repr__(self) -> str:
extra_lines = []
extra_repr = self.extra_repr()
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
for key, module in self._modules.items():
mod_str = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + '('
if lines:
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
return main_str
def _addindent(s_, num_spaces):
s = s_.split('\n')
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * ' ') + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
return s
| 38.023256 | 113 | 0.571865 |
be0f55848f464a912443f5b7d114789d41bdcd34 | 542 | py | Python | example.py | efrontier-io/gnosis-extension | 7212e92cc0902ace0f2800762e1d8c5784dddea8 | [
"MIT"
] | null | null | null | example.py | efrontier-io/gnosis-extension | 7212e92cc0902ace0f2800762e1d8c5784dddea8 | [
"MIT"
] | null | null | null | example.py | efrontier-io/gnosis-extension | 7212e92cc0902ace0f2800762e1d8c5784dddea8 | [
"MIT"
] | null | null | null | import os
from extensions.gnosis_extension import GnosisExchange
def main():
if not os.getenv('GN_PRIVATE'):
print("""
Usage:
place your private key to GN_PRIVATE:
export GN_PRIVATE
run example:
python example.py
""")
return 1
gnosis = GnosisExchange({'secret': os.getenv('GN_PRIVATE'), 'network': 'rinkeby'})
balance = gnosis.fetch_balance()
print(balance)
return 0
if __name__ == "__main__":
exit(main())
| 22.583333 | 86 | 0.564576 |
c7d6c5f5c9315ecea1c73cc08a574e0cfd032eff | 1,487 | py | Python | nipype/interfaces/fsl/tests/test_auto_CopyGeom.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 7 | 2017-02-17T08:54:26.000Z | 2022-03-10T20:57:23.000Z | nipype/interfaces/fsl/tests/test_auto_CopyGeom.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 1 | 2016-04-25T15:07:09.000Z | 2016-04-25T15:07:09.000Z | nipype/interfaces/fsl/tests/test_auto_CopyGeom.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 2 | 2017-09-23T16:22:00.000Z | 2019-08-01T14:18:52.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import CopyGeom
def test_CopyGeom_inputs():
input_map = dict(
args=dict(argstr='%s', ),
dest_file=dict(
argstr='%s',
copyfile=True,
mandatory=True,
name_source='dest_file',
name_template='%s',
output_name='out_file',
position=1,
),
environ=dict(
nohash=True,
usedefault=True,
),
ignore_dims=dict(
argstr='-d',
position='-1',
),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
in_file=dict(
argstr='%s',
mandatory=True,
position=0,
),
output_type=dict(),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
)
inputs = CopyGeom.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_CopyGeom_outputs():
output_map = dict(out_file=dict(), )
outputs = CopyGeom.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 27.537037 | 67 | 0.535306 |
0568e1e5e39136520add27310c9317e27cd461cb | 975 | py | Python | src/main.py | ericponce/assassin-server | f7e87af00ce784b33a3985d7115caba8e5b7c359 | [
"MIT"
] | null | null | null | src/main.py | ericponce/assassin-server | f7e87af00ce784b33a3985d7115caba8e5b7c359 | [
"MIT"
] | null | null | null | src/main.py | ericponce/assassin-server | f7e87af00ce784b33a3985d7115caba8e5b7c359 | [
"MIT"
] | null | null | null | #!/bin/python
import email_util
import database
import sqlite3
if __name__ == "__main__":
f = open('email_config.cfg', 'r')
username = f.readline().strip()
password = f.readline().strip()
dbase = f.readline().strip()
f.close()
database.init_table(dbase);
while(1):
num, sender, body = email_util.receive_email_subj("add", username, password)
if (num != -1):
print "Received new user email requesting add!"
sender = sender[sender.index("<") + 1:-1]
body = body.split();
if not body:
database.add_user(sender, sender, dbase)
else:
database.add_user(body[0], sender, dbase)
email_util.delete_email(num, username, password)
num, sender, body = email_util.receive_email_subj("quit", username, password)
if (num != -1):
print "Received new user email requesting removal!"
sender = sender[sender.index("<") + 1:-1]
body = body.split();
database.remove_user(sender, dbase)
email_util.delete_email(num, username, password)
| 27.857143 | 79 | 0.685128 |
51c3b090aab506c5c5b7f7161d5fe8e2f7c5d35f | 12,997 | py | Python | quid/quid/training/data.py | hovinhthinh/Qsearch | ed450efbb0eebe1a5ad625422edb435b402b096e | [
"Apache-2.0"
] | 7 | 2022-01-26T16:37:56.000Z | 2022-02-19T09:31:55.000Z | quid/quid/training/data.py | hovinhthinh/Qsearch | ed450efbb0eebe1a5ad625422edb435b402b096e | [
"Apache-2.0"
] | null | null | null | quid/quid/training/data.py | hovinhthinh/Qsearch | ed450efbb0eebe1a5ad625422edb435b402b096e | [
"Apache-2.0"
] | null | null | null | import gzip
import json
import math
import multiprocessing as mp
import os
import time
from functools import lru_cache
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from quantity.kb import parse
from util.iterutils import chunk
from util.mongo import get_collection, open_client, close_client
from util.monitor import CounterMonitor
_STEMMER = PorterStemmer()
_QT_TOKEN = '__QT__'
_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
def _get_all_ids():
return [doc['_id'] for doc in get_collection('enwiki-09-2021').find({}, {'_id': 1})]
_get_content_collection = None
@lru_cache
def _get_content(doc_id):
global _get_content_collection
if _get_content_collection is None:
open_client()
_get_content_collection = get_collection('enwiki-09-2021')
return _get_content_collection.find_one({'_id': doc_id})
def _convert_to_train_doc(qt, passages):
content = '\n'.join(passages)
return {
'qt': qt,
'text': '{}{}{}'.format(content[:qt['span'][0]], _QT_TOKEN, content[qt['span'][1]:]),
}
def index_wikipedia():
open_client()
m = CounterMonitor(name='IndexWikipedia')
m.start()
collection = get_collection('enwiki-09-2021')
collection.drop()
buffer = []
buffer_size = 128
for line in gzip.open('/GW/D5data-14/hvthinh/enwiki-09-2021/standardized.json.gz', 'rt'):
o = json.loads(line.strip())
buffer.append({
'_id': o['source'].split('?curid=')[-1],
'title': o['title'],
'source': o['source'],
'passages': o['content'].split('\n'),
})
m.inc()
if len(buffer) == buffer_size:
collection.insert_many(buffer)
buffer.clear()
collection.insert_many(buffer)
m.shutdown()
def _qt_recog_func(doc_id):
content = _get_content(doc_id)
passages = content['passages']
qts = []
for i, p in enumerate(passages):
for u in parse(p):
qts.append({
'p_idx': i,
'span': u.span,
'surface': u.surface,
'value': u.value,
'unit': str(u.unit),
'kb_unit': None if u.kb_unit is None else {
'entity': u.kb_unit.entity,
'wd_entry': u.kb_unit.wd_entry,
'si_unit': u.kb_unit.si_unit,
'conversion_to_si': u.kb_unit.conversion_to_si
}
})
return {
'doc_id': content['_id'],
'source': content['source'],
'qts': qts
}
def _export_quantities_to_mongo(quantity_file):
m = CounterMonitor(name='LoadQuantities')
m.start()
domain_2_qts = {}
with gzip.open(quantity_file, 'rt') as f:
for line in f:
m.inc()
try:
doc = json.loads(line.strip()[:-1])
except:
print('Err:', line)
continue
for qt in doc['qts']:
unit = qt['kb_unit']
if unit is not None:
unit = unit['si_unit'] if unit['si_unit'] is not None else unit['entity']
export_qt = {
'doc_id': doc['doc_id'],
'source': doc['source']
}
export_qt.update(qt)
if unit not in domain_2_qts:
domain_2_qts[unit] = []
domain_2_qts[unit].append(export_qt)
m.shutdown()
domain_2_qts = list(domain_2_qts.items())
domain_2_qts.sort(reverse=True, key=lambda k: len(k[1]))
print('Domain stats:')
for l in domain_2_qts:
print(l[0], len(l[1]))
open_client()
def _export_domain_to_mongo(domain, qts):
for qt in qts:
scale = 1 if qt['kb_unit'] is None or qt['kb_unit']['conversion_to_si'] is None \
else qt['kb_unit']['conversion_to_si']
qt['n_value'] = qt['value'] * scale
qts.sort(key=lambda k: k['n_value'])
collection = get_collection('.'.join(['quantities', str(domain)]))
collection.drop()
collection.create_index('_id')
m = CounterMonitor('ExportQuantitiesToMongo-{}'.format(domain), len(qts))
m.start()
for ch in chunk(qts, 128):
collection.insert_many(ch)
m.inc(len(ch))
for domain, qts in domain_2_qts:
if len(qts) >= 10000:
_export_domain_to_mongo(domain, qts)
def _recognize_quantities(output_file):
open_client()
ids = _get_all_ids()
close_client()
f = gzip.open(output_file, 'wt')
f.write('[\n')
n_printed = 0
start = time.time()
with mp.Pool(256) as pool:
for doc in pool.imap_unordered(_qt_recog_func, ids):
f.write(json.dumps(doc))
n_printed += 1
if n_printed > 0:
f.write(',\n')
if time.time() > start + 10:
print('\rProcessed: {}'.format(n_printed))
start = time.time()
f.write(']\n')
f.close()
# augmented frequency: tf = 0.5 + 0.5 * (f / max_f)
def _tf_set(content):
tf = {}
tokenized = word_tokenize(content)
for w in tokenized:
stemmed = _STEMMER.stem(w)
if stemmed in tf:
tf[stemmed] += 1
else:
tf[stemmed] = 1
if len(tf) > 0:
m = max(tf.values())
for w in tf:
tf[w] = 0.5 + 0.5 * tf[w] / m
return tf
def _wordset_stemming_func(doc_id):
return list(_tf_set(' '.join(_get_content(doc_id).pop('passages'))).keys())
def _create_df_wikipedia():
open_client()
ids = _get_all_ids()
close_client()
count = {
'_N_DOC': len(ids)
}
n_printed = 0
start = time.time()
with mp.Pool(256) as pool:
for doc in pool.imap_unordered(_wordset_stemming_func, ids):
for w in doc:
if w not in count:
count[w] = 1
else:
count[w] += 1
n_printed += 1
if time.time() > start + 10:
print('\rProcessed: {}'.format(n_printed))
start = time.time()
with gzip.open(os.path.join(os.path.dirname(__file__), 'df_wiki.gz'), 'wt') as f:
f.write(json.dumps(count))
class IDF:
_DEFAULT_IDF = None
_ROBERTSON_IDF = None
_OOV_DEFAULT_IDF = None
_OOV_ROBERTSON_IDF = None
_MIN_IDF = 1e-6
@staticmethod
def _load_idf_wikipedia():
IDF._DEFAULT_IDF = {}
IDF._ROBERTSON_IDF = {}
df = json.loads(gzip.open(os.path.join(os.path.dirname(__file__), 'df_wiki.gz'), 'rt').read())
n_doc = df['_N_DOC']
for w, f in df.items():
if w == '_N_DOC':
continue
IDF._DEFAULT_IDF[w] = max(IDF._MIN_IDF, math.log(n_doc / (f + 1)))
IDF._ROBERTSON_IDF[w] = max(IDF._MIN_IDF, math.log10(n_doc - f + 0.5) / (f + 0.5))
IDF._OOV_DEFAULT_IDF = max(IDF._MIN_IDF, math.log(n_doc / (0 + 1))) # df = 0
IDF._OOV_ROBERTSON_IDF = max(IDF._MIN_IDF, math.log10((n_doc - 0 + 0.5) / (0 + 0.5))) # df = 0
@staticmethod
def get_default_idf(word, allow_oov=True, stemming=True):
idf = IDF._DEFAULT_IDF.get(_STEMMER.stem(word) if stemming else word)
if idf is not None:
return idf
else:
return IDF._OOV_DEFAULT_IDF if allow_oov else IDF._MIN_IDF
@staticmethod
def get_robertson_idf(word, allow_oov=True, stemming=True):
idf = IDF._ROBERTSON_IDF.get(_STEMMER.stem(word) if stemming else word)
if idf is not None:
return idf
else:
return IDF._OOV_ROBERTSON_IDF if allow_oov else IDF._MIN_IDF
def tdidf_doc_sim(content_1, content_2):
tf_1 = _tf_set(content_1)
tf_2 = _tf_set(content_2)
for w in tf_1:
tf_1[w] *= IDF.get_default_idf(w, stemming=False)
for w in tf_2:
tf_2[w] *= IDF.get_default_idf(w, stemming=False)
dot_prod = sum([tf_1.get(w, 0) * tf_2.get(w, 0) for w in set.union(set(tf_1.keys()), set(tf_2.keys()))])
len_1 = math.sqrt(sum([v ** 2 for v in tf_1.values()]))
len_2 = math.sqrt(sum([v ** 2 for v in tf_2.values()]))
return dot_prod / len_1 / len_2
def _numeric_dist(a, b):
return 0 if a == b == 0 else abs(a - b) / max(abs(a), abs(b))
_QTS = None
def _qt_pair_eval_func(input):
l, r, thresholds = input
output = []
for i in range(l, r):
ca = _get_content(_QTS[i]['doc_id'])['passages']
for j in range(i + 1, r):
rel_dist = _numeric_dist(_QTS[i]['n_value'], _QTS[j]['n_value'])
if rel_dist > thresholds['max_candidate_relative_qt_dist']:
break
if _QTS[i]['doc_id'] == _QTS[j]['doc_id']:
continue
cb = _get_content(_QTS[j]['doc_id'])['passages']
# TF-IDF par sim
par_sim = tdidf_doc_sim(ca[_QTS[i]['p_idx']], cb[_QTS[j]['p_idx']])
if par_sim < thresholds['min_tfidf_par_sim']:
continue
# TF-IDF doc sim
doc_sim = tdidf_doc_sim(' '.join(ca), ' '.join(cb))
if doc_sim < thresholds['min_tfidf_doc_sim']:
continue
output.append({
'doc_1': _convert_to_train_doc(_QTS[i], ca),
'doc_2': _convert_to_train_doc(_QTS[j], cb),
'tfidf_doc_sim': doc_sim,
'tfidf_par_sim': par_sim,
'qt_dist': rel_dist,
'cl_size': r - l,
})
return output
def _generate_positive_training_pairs(domain,
max_cluster_size=100,
max_onchain_relative_qt_dist=0,
min_tfidf_doc_sim=0.1,
min_tfidf_par_sim=0.1,
max_candidate_relative_qt_dist=0,
):
open_client()
qts = [doc for doc in get_collection('.'.join(['quantities', domain])).find({})]
close_client()
for qt in qts:
qt.pop('_id')
global _QTS
_QTS = qts
eval_input = []
r = 0
for l in range(len(qts)):
if l < r:
continue
r = l + 1
while r < len(qts) and _numeric_dist(qts[r - 1]['n_value'], qts[r]['n_value']) <= max_onchain_relative_qt_dist:
r += 1
if r - l > max_cluster_size or r - l == 1:
continue
eval_input.append((l, r, {
'min_tfidf_doc_sim': min_tfidf_doc_sim,
'min_tfidf_par_sim': min_tfidf_par_sim,
'max_candidate_relative_qt_dist': max_candidate_relative_qt_dist,
}))
m = CounterMonitor('GeneratePositivePairs-{}'.format(domain), len(eval_input))
m.start()
cnt = 0
with mp.Pool(256) as pool:
pool_output = pool.imap_unordered(_qt_pair_eval_func, eval_input)
open_client()
collection = get_collection('.'.join(['train', 'positive', domain]))
collection.drop()
collection.create_index('_id')
for pos_samples in pool_output:
for sample in pos_samples:
cnt += 1
sample['_id'] = cnt
if len(pos_samples) > 0:
collection.insert_many(pos_samples)
m.inc()
def _sample_collection(source, destination,
min_tdidf_doc_sim=0,
min_tdidf_par_sim=0,
n=50):
open_client()
source = get_collection(source)
destination = get_collection(destination)
destination.drop()
filtered = source.aggregate([
{'$match': {'tfidf_doc_sim': {'$gte': min_tdidf_doc_sim},
'tfidf_par_sim': {'$gte': min_tdidf_par_sim}}},
{'$sample': {'size': n}}
])
destination.insert_many(filtered)
def _calculate_precision(collection):
open_client()
total = 0
labeled = 0
positive = 0
for doc in get_collection(collection).find():
total += 1
if 'ok' not in doc:
continue
labeled += 1
positive += (1 if doc['ok'] else 0)
print('Precision: {} ({}/{}) -- from Total: {}'.format(None if labeled == 0 else positive / labeled, positive,
labeled, total))
if __name__ == '__main__':
# index_wikipedia()
# _create_df_wikipedia()
# _recognize_quantities('/GW/D5data-14/hvthinh/quid/wikipedia_quantities.gz')
# _export_quantities_to_mongo('/GW/D5data-14/hvthinh/quid/wikipedia_quantities.gz')
# IDF._load_idf_wikipedia()
# _generate_positive_training_pairs('<Metre>')
_sample_collection('train.positive.<Metre>', 'train.positive.<Metre>.shuf')
# print(_calculate_precision('train.positive.<Metre>.shuf'))
| 31.019093 | 134 | 0.556898 |
58e7949850053c3cd8529f45ac0d6d66594a7f04 | 1,493 | py | Python | polyaxon/signals/utils.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/signals/utils.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/signals/utils.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | from db.models.bookmarks import Bookmark
from libs.paths.data_paths import validate_persistence_data
from libs.paths.outputs_paths import validate_persistence_outputs
from schemas.environments import PersistenceConfig
def set_tags(instance):
if not instance.tags and instance.specification:
instance.tags = instance.specification.tags
def set_persistence(instance, default_persistence_data=None, default_persistence_outputs=None):
if instance.persistence:
return
persistence_data = None
persistence_outputs = None
if instance.specification and instance.specification.persistence:
persistence_data = instance.specification.persistence.data
persistence_outputs = instance.specification.persistence.outputs
if not persistence_data and default_persistence_data:
persistence_data = default_persistence_data
if not persistence_outputs and default_persistence_outputs:
persistence_outputs = default_persistence_outputs
persistence_data = validate_persistence_data(persistence_data=persistence_data)
persistence_outputs = validate_persistence_outputs(persistence_outputs=persistence_outputs)
persistence_config = PersistenceConfig(data=persistence_data, outputs=persistence_outputs)
instance.persistence = persistence_config.to_dict()
def remove_bookmarks(object_id, content_type):
# Remove any bookmark
Bookmark.objects.filter(content_type__model=content_type, object_id=object_id).delete()
| 41.472222 | 95 | 0.816477 |
8f41f982cdf2b79153994102ddea19d2d3273072 | 94,671 | py | Python | plotly_study/graph_objs/histogram/marker/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | plotly_study/graph_objs/histogram/marker/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | plotly_study/graph_objs/histogram/marker/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color`is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to histogram.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly_study.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd']
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color`is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on plot.ly for width .
The 'widthsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram.marker"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on plot.ly for width .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.marker.Line
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on plot.ly for width .
Returns
-------
Line
"""
super(Line, self).__init__("line")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.marker.Line
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.marker.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram.marker import line as v_line
# Initialize validators
# ---------------------
self._validators["autocolorscale"] = v_line.AutocolorscaleValidator()
self._validators["cauto"] = v_line.CautoValidator()
self._validators["cmax"] = v_line.CmaxValidator()
self._validators["cmid"] = v_line.CmidValidator()
self._validators["cmin"] = v_line.CminValidator()
self._validators["color"] = v_line.ColorValidator()
self._validators["coloraxis"] = v_line.ColoraxisValidator()
self._validators["colorscale"] = v_line.ColorscaleValidator()
self._validators["colorsrc"] = v_line.ColorsrcValidator()
self._validators["reversescale"] = v_line.ReversescaleValidator()
self._validators["width"] = v_line.WidthValidator()
self._validators["widthsrc"] = v_line.WidthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
self["autocolorscale"] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop("cauto", None)
self["cauto"] = cauto if cauto is not None else _v
_v = arg.pop("cmax", None)
self["cmax"] = cmax if cmax is not None else _v
_v = arg.pop("cmid", None)
self["cmid"] = cmid if cmid is not None else _v
_v = arg.pop("cmin", None)
self["cmin"] = cmin if cmin is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("coloraxis", None)
self["coloraxis"] = coloraxis if coloraxis is not None else _v
_v = arg.pop("colorscale", None)
self["colorscale"] = colorscale if colorscale is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("reversescale", None)
self["reversescale"] = reversescale if reversescale is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
_v = arg.pop("widthsrc", None)
self["widthsrc"] = widthsrc if widthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.marker.colorbar.Tickfont
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly_study.graph_objs.histogram.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly_study.graph_objs.histogram.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly_study.graph_objs.histogram.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.histogram.mark
er.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
histogram.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.marker.colorbar.Tickformatstop
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly_study.graph_objs.histogram.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on plot.ly for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on plot.ly for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.marker.colorbar.Title
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly_study.graph_objs.histogram.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use histogram.marker.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.marker.colorbar.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use histogram.marker.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram.marker"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of plotly_study.graph_objects.histogram.marker.colorb
ar.Tickformatstop instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
plotly_study.graph_objects.histogram.marker.colorbar.Title
instance or dict with compatible properties
titlefont
Deprecated: Please use
histogram.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram.marker.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.histogram.marker.ColorBar
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of plotly_study.graph_objects.histogram.marker.colorb
ar.Tickformatstop instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
plotly_study.graph_objects.histogram.marker.colorbar.Title
instance or dict with compatible properties
titlefont
Deprecated: Please use
histogram.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram.marker.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.marker.ColorBar
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.marker.ColorBar"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram.marker import colorbar as v_colorbar
# Initialize validators
# ---------------------
self._validators["bgcolor"] = v_colorbar.BgcolorValidator()
self._validators["bordercolor"] = v_colorbar.BordercolorValidator()
self._validators["borderwidth"] = v_colorbar.BorderwidthValidator()
self._validators["dtick"] = v_colorbar.DtickValidator()
self._validators["exponentformat"] = v_colorbar.ExponentformatValidator()
self._validators["len"] = v_colorbar.LenValidator()
self._validators["lenmode"] = v_colorbar.LenmodeValidator()
self._validators["nticks"] = v_colorbar.NticksValidator()
self._validators["outlinecolor"] = v_colorbar.OutlinecolorValidator()
self._validators["outlinewidth"] = v_colorbar.OutlinewidthValidator()
self._validators["separatethousands"] = v_colorbar.SeparatethousandsValidator()
self._validators["showexponent"] = v_colorbar.ShowexponentValidator()
self._validators["showticklabels"] = v_colorbar.ShowticklabelsValidator()
self._validators["showtickprefix"] = v_colorbar.ShowtickprefixValidator()
self._validators["showticksuffix"] = v_colorbar.ShowticksuffixValidator()
self._validators["thickness"] = v_colorbar.ThicknessValidator()
self._validators["thicknessmode"] = v_colorbar.ThicknessmodeValidator()
self._validators["tick0"] = v_colorbar.Tick0Validator()
self._validators["tickangle"] = v_colorbar.TickangleValidator()
self._validators["tickcolor"] = v_colorbar.TickcolorValidator()
self._validators["tickfont"] = v_colorbar.TickfontValidator()
self._validators["tickformat"] = v_colorbar.TickformatValidator()
self._validators["tickformatstops"] = v_colorbar.TickformatstopsValidator()
self._validators[
"tickformatstopdefaults"
] = v_colorbar.TickformatstopValidator()
self._validators["ticklen"] = v_colorbar.TicklenValidator()
self._validators["tickmode"] = v_colorbar.TickmodeValidator()
self._validators["tickprefix"] = v_colorbar.TickprefixValidator()
self._validators["ticks"] = v_colorbar.TicksValidator()
self._validators["ticksuffix"] = v_colorbar.TicksuffixValidator()
self._validators["ticktext"] = v_colorbar.TicktextValidator()
self._validators["ticktextsrc"] = v_colorbar.TicktextsrcValidator()
self._validators["tickvals"] = v_colorbar.TickvalsValidator()
self._validators["tickvalssrc"] = v_colorbar.TickvalssrcValidator()
self._validators["tickwidth"] = v_colorbar.TickwidthValidator()
self._validators["title"] = v_colorbar.TitleValidator()
self._validators["x"] = v_colorbar.XValidator()
self._validators["xanchor"] = v_colorbar.XanchorValidator()
self._validators["xpad"] = v_colorbar.XpadValidator()
self._validators["y"] = v_colorbar.YValidator()
self._validators["yanchor"] = v_colorbar.YanchorValidator()
self._validators["ypad"] = v_colorbar.YpadValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("borderwidth", None)
self["borderwidth"] = borderwidth if borderwidth is not None else _v
_v = arg.pop("dtick", None)
self["dtick"] = dtick if dtick is not None else _v
_v = arg.pop("exponentformat", None)
self["exponentformat"] = exponentformat if exponentformat is not None else _v
_v = arg.pop("len", None)
self["len"] = len if len is not None else _v
_v = arg.pop("lenmode", None)
self["lenmode"] = lenmode if lenmode is not None else _v
_v = arg.pop("nticks", None)
self["nticks"] = nticks if nticks is not None else _v
_v = arg.pop("outlinecolor", None)
self["outlinecolor"] = outlinecolor if outlinecolor is not None else _v
_v = arg.pop("outlinewidth", None)
self["outlinewidth"] = outlinewidth if outlinewidth is not None else _v
_v = arg.pop("separatethousands", None)
self["separatethousands"] = (
separatethousands if separatethousands is not None else _v
)
_v = arg.pop("showexponent", None)
self["showexponent"] = showexponent if showexponent is not None else _v
_v = arg.pop("showticklabels", None)
self["showticklabels"] = showticklabels if showticklabels is not None else _v
_v = arg.pop("showtickprefix", None)
self["showtickprefix"] = showtickprefix if showtickprefix is not None else _v
_v = arg.pop("showticksuffix", None)
self["showticksuffix"] = showticksuffix if showticksuffix is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("thicknessmode", None)
self["thicknessmode"] = thicknessmode if thicknessmode is not None else _v
_v = arg.pop("tick0", None)
self["tick0"] = tick0 if tick0 is not None else _v
_v = arg.pop("tickangle", None)
self["tickangle"] = tickangle if tickangle is not None else _v
_v = arg.pop("tickcolor", None)
self["tickcolor"] = tickcolor if tickcolor is not None else _v
_v = arg.pop("tickfont", None)
self["tickfont"] = tickfont if tickfont is not None else _v
_v = arg.pop("tickformat", None)
self["tickformat"] = tickformat if tickformat is not None else _v
_v = arg.pop("tickformatstops", None)
self["tickformatstops"] = tickformatstops if tickformatstops is not None else _v
_v = arg.pop("tickformatstopdefaults", None)
self["tickformatstopdefaults"] = (
tickformatstopdefaults if tickformatstopdefaults is not None else _v
)
_v = arg.pop("ticklen", None)
self["ticklen"] = ticklen if ticklen is not None else _v
_v = arg.pop("tickmode", None)
self["tickmode"] = tickmode if tickmode is not None else _v
_v = arg.pop("tickprefix", None)
self["tickprefix"] = tickprefix if tickprefix is not None else _v
_v = arg.pop("ticks", None)
self["ticks"] = ticks if ticks is not None else _v
_v = arg.pop("ticksuffix", None)
self["ticksuffix"] = ticksuffix if ticksuffix is not None else _v
_v = arg.pop("ticktext", None)
self["ticktext"] = ticktext if ticktext is not None else _v
_v = arg.pop("ticktextsrc", None)
self["ticktextsrc"] = ticktextsrc if ticktextsrc is not None else _v
_v = arg.pop("tickvals", None)
self["tickvals"] = tickvals if tickvals is not None else _v
_v = arg.pop("tickvalssrc", None)
self["tickvalssrc"] = tickvalssrc if tickvalssrc is not None else _v
_v = arg.pop("tickwidth", None)
self["tickwidth"] = tickwidth if tickwidth is not None else _v
_v = arg.pop("title", None)
self["title"] = title if title is not None else _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("xanchor", None)
self["xanchor"] = xanchor if xanchor is not None else _v
_v = arg.pop("xpad", None)
self["xpad"] = xpad if xpad is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
_v = arg.pop("yanchor", None)
self["yanchor"] = yanchor if yanchor is not None else _v
_v = arg.pop("ypad", None)
self["ypad"] = ypad if ypad is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["ColorBar", "Line", "colorbar"]
from plotly_study.graph_objs.histogram.marker import colorbar
| 37.883553 | 108 | 0.574706 |
86956dd638793dd23a4857313ce9bcc56abb5cbb | 15,278 | py | Python | shellpartylib/lib/log.py | satoshichain/satoshiparty-lib | 87b99678e5498b1f536f9be589b1b0cde6f21ad0 | [
"MIT"
] | null | null | null | shellpartylib/lib/log.py | satoshichain/satoshiparty-lib | 87b99678e5498b1f536f9be589b1b0cde6f21ad0 | [
"MIT"
] | null | null | null | shellpartylib/lib/log.py | satoshichain/satoshiparty-lib | 87b99678e5498b1f536f9be589b1b0cde6f21ad0 | [
"MIT"
] | null | null | null | import logging
logger = logging.getLogger(__name__)
import decimal
D = decimal.Decimal
import binascii
import collections
import json
import time
from datetime import datetime
from dateutil.tz import tzlocal
import os
from colorlog import ColoredFormatter
from shellpartylib.lib import config
from shellpartylib.lib import exceptions
from shellpartylib.lib import util
def set_up(logger, verbose=False, logfile=None):
log_level = logging.DEBUG if verbose else logging.INFO
logger.setLevel(log_level)
# Console Logging
console = logging.StreamHandler()
console.setLevel(log_level)
LOGFORMAT = '%(log_color)s[%(levelname)s] %(message)s%(reset)s'
LOGCOLORS = {'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red'}
formatter = ColoredFormatter(LOGFORMAT, log_colors=LOGCOLORS)
console.setFormatter(formatter)
logger.addHandler(console)
# File Logging
if logfile:
max_log_size = 20 * 1024 * 1024 # 20 MB
if os.name == 'nt':
from shellpartylib.lib import util_windows
fileh = util_windows.SanitizedRotatingFileHandler(logfile, maxBytes=max_log_size, backupCount=5)
else:
fileh = logging.handlers.RotatingFileHandler(logfile, maxBytes=max_log_size, backupCount=5)
fileh.setLevel(logging.DEBUG)
LOGFORMAT = '%(asctime)s [%(levelname)s] %(message)s'
formatter = logging.Formatter(LOGFORMAT, '%Y-%m-%d-T%H:%M:%S%z')
fileh.setFormatter(formatter)
logger.addHandler(fileh)
# Quieten noisy libraries.
requests_log = logging.getLogger("requests")
requests_log.setLevel(log_level)
requests_log.propagate = False
urllib3_log = logging.getLogger('urllib3')
urllib3_log.setLevel(log_level)
urllib3_log.propagate = False
# Disable InsecureRequestWarning
import requests
requests.packages.urllib3.disable_warnings()
def curr_time():
return int(time.time())
def isodt (epoch_time):
try:
return datetime.fromtimestamp(epoch_time, tzlocal()).isoformat()
except OSError:
return '<datetime>'
def message(db, block_index, command, category, bindings, tx_hash=None):
cursor = db.cursor()
# Get last message index.
messages = list(cursor.execute('''SELECT * FROM messages
WHERE message_index = (SELECT MAX(message_index) from messages)'''))
if messages:
assert len(messages) == 1
message_index = messages[0]['message_index'] + 1
else:
message_index = 0
# Not to be misleading…
if block_index == config.MEMPOOL_BLOCK_INDEX:
try:
del bindings['status']
del bindings['block_index']
del bindings['tx_index']
except KeyError:
pass
# Handle binary data.
items = []
for item in sorted(bindings.items()):
if type(item[1]) == bytes:
items.append((item[0], binascii.hexlify(item[1]).decode('ascii')))
else:
items.append(item)
bindings_string = json.dumps(collections.OrderedDict(items))
cursor.execute('insert into messages values(:message_index, :block_index, :command, :category, :bindings, :timestamp)',
(message_index, block_index, command, category, bindings_string, curr_time()))
# Log only real transactions.
if block_index != config.MEMPOOL_BLOCK_INDEX:
log(db, command, category, bindings)
cursor.close()
def log (db, command, category, bindings):
cursor = db.cursor()
for element in bindings.keys():
try:
str(bindings[element])
except KeyError:
bindings[element] = '<Error>'
# Slow?!
def output (quantity, asset):
try:
if asset not in ('fraction', 'leverage'):
return str(util.value_out(db, quantity, asset)) + ' ' + asset
else:
return str(util.value_out(db, quantity, asset))
except exceptions.AssetError:
return '<AssetError>'
except decimal.DivisionByZero:
return '<DivisionByZero>'
except TypeError:
return '<None>'
if command == 'update':
if category == 'order':
logger.debug('Database: set status of order {} to {}.'.format(bindings['tx_hash'], bindings['status']))
elif category == 'bet':
logger.debug('Database: set status of bet {} to {}.'.format(bindings['tx_hash'], bindings['status']))
elif category == 'order_matches':
logger.debug('Database: set status of order_match {} to {}.'.format(bindings['order_match_id'], bindings['status']))
elif category == 'bet_matches':
logger.debug('Database: set status of bet_match {} to {}.'.format(bindings['bet_match_id'], bindings['status']))
# TODO: elif category == 'balances':
# logger.debug('Database: set balance of {} in {} to {}.'.format(bindings['address'], bindings['asset'], output(bindings['quantity'], bindings['asset']).split(' ')[0]))
elif command == 'insert':
if category == 'credits':
logger.debug('Credit: {} to {} #{}# <{}>'.format(output(bindings['quantity'], bindings['asset']), bindings['address'], bindings['action'], bindings['event']))
elif category == 'debits':
logger.debug('Debit: {} from {} #{}# <{}>'.format(output(bindings['quantity'], bindings['asset']), bindings['address'], bindings['action'], bindings['event']))
elif category == 'sends':
logger.info('Send: {} from {} to {} ({}) [{}]'.format(output(bindings['quantity'], bindings['asset']), bindings['source'], bindings['destination'], bindings['tx_hash'], bindings['status']))
elif category == 'orders':
logger.info('Order: {} ordered {} for {} in {} blocks, with a provided fee of {} {} and a required fee of {} {} ({}) [{}]'.format(bindings['source'], output(bindings['give_quantity'], bindings['give_asset']), output(bindings['get_quantity'], bindings['get_asset']), bindings['expiration'], bindings['fee_provided'] / config.UNIT, config.SCH, bindings['fee_required'] / config.UNIT, config.SCH, bindings['tx_hash'], bindings['status']))
elif category == 'order_matches':
logger.info('Order Match: {} for {} ({}) [{}]'.format(output(bindings['forward_quantity'], bindings['forward_asset']), output(bindings['backward_quantity'], bindings['backward_asset']), bindings['id'], bindings['status']))
elif category == 'shellpays':
logger.info('{} Payment: {} paid {} to {} for order match {} ({}) [{}]'.format(config.SCH, bindings['source'], output(bindings['shell_amount'], config.SCH), bindings['destination'], bindings['order_match_id'], bindings['tx_hash'], bindings['status']))
elif category == 'issuances':
if bindings['transfer']:
logger.info('Issuance: {} transfered asset {} to {} ({}) [{}]'.format(bindings['source'], bindings['asset'], bindings['issuer'], bindings['tx_hash'], bindings['status']))
elif bindings['locked']:
logger.info('Issuance: {} locked asset {} ({}) [{}]'.format(bindings['issuer'], bindings['asset'], bindings['tx_hash'], bindings['status']))
else:
if bindings['divisible']:
divisibility = 'divisible'
unit = config.UNIT
else:
divisibility = 'indivisible'
unit = 1
try:
quantity = util.value_out(db, bindings['quantity'], None, divisible=bindings['divisible'])
except Exception as e:
quantity = '?'
logger.info('Issuance: {} created {} of {} asset {} ({}) [{}]'.format(bindings['issuer'], quantity, divisibility, bindings['asset'], bindings['tx_hash'], bindings['status']))
elif category == 'broadcasts':
if bindings['locked']:
logger.info('Broadcast: {} locked his feed ({}) [{}]'.format(bindings['source'], bindings['tx_hash'], bindings['status']))
else:
logger.info('Broadcast: ' + bindings['source'] + ' at ' + isodt(bindings['timestamp']) + ' with a fee of {}%'.format(output(D(bindings['fee_fraction_int'] / 1e8) * D(100), 'fraction')) + ' (' + bindings['tx_hash'] + ')' + ' [{}]'.format(bindings['status']))
elif category == 'bets':
logger.info('Bet: {} against {}, by {}, on {}'.format(output(bindings['wager_quantity'], config.SHP), output(bindings['counterwager_quantity'], config.SHP), bindings['source'], bindings['feed_address']))
elif category == 'bet_matches':
placeholder = ''
if bindings['target_value'] >= 0: # Only non‐negative values are valid.
placeholder = ' that ' + str(output(bindings['target_value'], 'value'))
if bindings['leverage']:
placeholder += ', leveraged {}x'.format(output(bindings['leverage'] / 5040, 'leverage'))
logger.info('Bet Match: {} for {} against {} for {} on {} at {}{} ({}) [{}]'.format(util.BET_TYPE_NAME[bindings['tx0_bet_type']], output(bindings['forward_quantity'], config.SHP), util.BET_TYPE_NAME[bindings['tx1_bet_type']], output(bindings['backward_quantity'], config.SHP), bindings['feed_address'], isodt(bindings['deadline']), placeholder, bindings['id'], bindings['status']))
elif category == 'dividends':
logger.info('Dividend: {} paid {} per unit of {} ({}) [{}]'.format(bindings['source'], output(bindings['quantity_per_unit'], bindings['dividend_asset']), bindings['asset'], bindings['tx_hash'], bindings['status']))
elif category == 'burns':
logger.info('Burn: {} burned {} for {} ({}) [{}]'.format(bindings['source'], output(bindings['burned'], config.SCH), output(bindings['earned'], config.SHP), bindings['tx_hash'], bindings['status']))
elif category == 'cancels':
logger.info('Cancel: {} ({}) [{}]'.format(bindings['offer_hash'], bindings['tx_hash'], bindings['status']))
elif category == 'rps':
log_message = 'RPS: {} opens game with {} possible moves and a wager of {}'.format(bindings['source'], bindings['possible_moves'], output(bindings['wager'], 'SHP'))
logger.info(log_message)
elif category == 'rps_matches':
log_message = 'RPS Match: {} is playing a {}-moves game with {} with a wager of {} ({}) [{}]'.format(bindings['tx0_address'], bindings['possible_moves'], bindings['tx1_address'], output(bindings['wager'], 'SHP'), bindings['id'], bindings['status'])
logger.info(log_message)
elif category == 'rpsresolves':
if bindings['status'] == 'valid':
rps_matches = list(cursor.execute('''SELECT * FROM rps_matches WHERE id = ?''', (bindings['rps_match_id'],)))
assert len(rps_matches) == 1
rps_match = rps_matches[0]
log_message = 'RPS Resolved: {} is playing {} on a {}-moves game with {} with a wager of {} ({}) [{}]'.format(rps_match['tx0_address'], bindings['move'], rps_match['possible_moves'], rps_match['tx1_address'], output(rps_match['wager'], 'SHP'), rps_match['id'], rps_match['status'])
else:
log_message = 'RPS Resolved: {} [{}]'.format(bindings['tx_hash'], bindings['status'])
logger.info(log_message)
elif category == 'order_expirations':
logger.info('Expired order: {}'.format(bindings['order_hash']))
elif category == 'order_match_expirations':
logger.info('Expired Order Match awaiting payment: {}'.format(bindings['order_match_id']))
elif category == 'bet_expirations':
logger.info('Expired bet: {}'.format(bindings['bet_hash']))
elif category == 'bet_match_expirations':
logger.info('Expired Bet Match: {}'.format(bindings['bet_match_id']))
elif category == 'bet_match_resolutions':
# DUPE
cfd_type_id = util.BET_TYPE_ID['BullCFD'] + util.BET_TYPE_ID['BearCFD']
equal_type_id = util.BET_TYPE_ID['Equal'] + util.BET_TYPE_ID['NotEqual']
if bindings['bet_match_type_id'] == cfd_type_id:
if bindings['settled']:
logger.info('Bet Match Settled: {} credited to the bull, {} credited to the bear, and {} credited to the feed address ({})'.format(output(bindings['bull_credit'], config.SHP), output(bindings['bear_credit'], config.SHP), output(bindings['fee'], config.SHP), bindings['bet_match_id']))
else:
logger.info('Bet Match Force‐Liquidated: {} credited to the bull, {} credited to the bear, and {} credited to the feed address ({})'.format(output(bindings['bull_credit'], config.SHP), output(bindings['bear_credit'], config.SHP), output(bindings['fee'], config.SHP), bindings['bet_match_id']))
elif bindings['bet_match_type_id'] == equal_type_id:
logger.info('Bet Match Settled: {} won the pot of {}; {} credited to the feed address ({})'.format(bindings['winner'], output(bindings['escrow_less_fee'], config.SHP), output(bindings['fee'], config.SHP), bindings['bet_match_id']))
elif category == 'rps_expirations':
logger.info('Expired RPS: {}'.format(bindings['rps_hash']))
elif category == 'rps_match_expirations':
logger.info('Expired RPS Match: {}'.format(bindings['rps_match_id']))
elif category == 'contracts':
logger.info('New Contract: {}'.format(bindings['contract_id']))
elif category == 'executions':
"""
try:
payload_hex = binascii.hexlify(bindings['payload']).decode('ascii')
except TypeError:
payload_hex = '<None>'
try:
output_hex = binascii.hexlify(bindings['output']).decode('ascii')
except TypeError:
output_hex = '<None>'
logger.info('Execution: {} executed contract {}, funded with {}, at a price of {} (?), at a final cost of {}, reclaiming {}, and also sending {}, with a data payload of {}, yielding {} ({}) [{}]'.format(bindings['source'], bindings['contract_id'], output(bindings['gas_start'], config.SHP), bindings['gas_price'], output(bindings['gas_cost'], config.SHP), output(bindings['gas_remaining'], config.SHP), output(bindings['value'], config.SHP), payload_hex, output_hex, bindings['tx_hash'], bindings['status']))
"""
if bindings['contract_id']:
logger.info('Execution: {} executed contract {} ({}) [{}]'.format(bindings['source'], bindings['contract_id'], bindings['tx_hash'], bindings['status']))
else:
logger.info('Execution: {} created contract {} ({}) [{}]'.format(bindings['source'], bindings['output'], bindings['tx_hash'], bindings['status']))
elif category == 'destructions':
logger.info('Destruction: {} destroyed {} {} with tag ‘{}’({}) [{}]'.format(bindings['source'], bindings['quantity'], bindings['asset'], bindings['tag'], bindings['tx_hash'], bindings['status']))
cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 54.177305 | 520 | 0.6133 |
b68f65ddbece6a49234e2dd5b0472a96cf419c32 | 8,359 | py | Python | darkhex/algorithms/pone_optimal.py | BedirT/dark-hex-tools | c4b803253e57d3c87edddde6e82740c643378dde | [
"MIT"
] | null | null | null | darkhex/algorithms/pone_optimal.py | BedirT/dark-hex-tools | c4b803253e57d3c87edddde6e82740c643378dde | [
"MIT"
] | 10 | 2022-01-16T05:55:09.000Z | 2022-03-09T20:21:21.000Z | darkhex/algorithms/pone_optimal.py | BedirT/darkhex | c4b803253e57d3c87edddde6e82740c643378dde | [
"MIT"
] | null | null | null | # PONE (Probability One)
#
# Finds all probability one wins for the players for the game Dark Hex
# Assuming both players are playing optimally.
#
# Uses pyspiel states to traverse the tree.
#
# Uses a dictionary to save the states that are definite win states.
# These data can be used to early stop when doing search on the tree.
import math
import numpy as np
import pyspiel
import darkhex.algorithms.gel_all_information_states as gel
from tqdm import tqdm
from darkhex.utils.cell_state import cellState
class PoneOptimal:
def __init__(self, num_rows, num_cols):
self.num_rows = num_rows
self.num_cols = num_cols
self.num_cells = self.num_rows * self.num_cols
self.game = pyspiel.load_game(
f"dark_hex_ir(num_cols={num_cols},num_rows={num_rows})")
self.reset_data()
print("Generating all states...")
self.all_info_states = gel.get_all_information_states(
self.game, num_rows, num_cols, True)
print(f"All States gathered: {len(self.all_info_states)}")
def reset_data(self) -> None:
self.legal_actions = {}
self.legal_states = [{} for _ in range(self.num_cells)]
self.empty_states = [set() for _ in range(self.num_cells + 1)
] # pyspiel.State
def search(self, player) -> list:
"""
Find all the legal positions, examine every position
in depth for prob 1 wins for players. It fills the dictionary
'state results'.
"""
self.player = player
self.opponent = 1 - player
self.setup_legal_states() # setup the legal states
for e in range(self.num_cells + 1): # empty cells
for h in range(math.ceil(self.num_cells / 2)): # hidden cells
print(f"Examining {e} empty cells and {h} hidden cells")
if e + h > self.num_cells:
continue
legal_states = self.empty_states[e + h]
for info_state in tqdm(legal_states):
res = self.legal_states[h].get(info_state, -2)
if res in [-1, 0, 1]:
res = self.pone_search(info_state, h)
self.legal_states[h][info_state] = res
return self.legal_states
def pone_search(self, info_state: str, h: int) -> bool:
"""
Recursive algorithm to iterate through sub states and
determine if the given position is a prob 1 win for player
or not.
Args:
- info_state: Information state to check.
- h: Number of hidden stones.
Returns:
- True/False If given info_state and h is a definite win
"""
status = self.legal_states[h][info_state]
if status == self.player:
return status
if status == self.opponent:
return -3
if self.turn_info(info_state, h) != self.player:
if self.check_state(info_state, h + 1) != -2:
return self.pone_search(info_state, h + 1)
return -3
else:
legal_actions = self.legal_actions[(info_state, h)]
if h == 0:
for action in legal_actions:
n_state = self.update_state(info_state, action, self.player,
h)
if n_state in self.legal_states[h]:
if self.pone_search(n_state, h) == self.player:
self.legal_states[h][n_state] = self.player
return self.player
elif h > 0:
for action in legal_actions:
n_state_hW = self.update_state(info_state, action,
self.opponent, h - 1)
if n_state_hW not in self.legal_states[h - 1]:
continue
n_state_B = self.update_state(info_state, action,
self.player, h)
if n_state_B in self.legal_states[h]:
if self.pone_search(n_state_hW, h - 1) == self.player:
if self.pone_search(n_state_B, h) == self.player:
self.legal_states[h][n_state_B] = self.player
self.legal_states[h -
1][n_state_hW] = self.player
return self.player
return -3
def check_state(self, info_state: str, h: int) -> str:
"""If the given state is a legal state"""
return self.legal_states[h].get(info_state, -2)
def turn_info(self, info_state: str, h: int) -> str:
"""Which players turn is it in the given state"""
count_player = self.count(info_state, self.player)
count_opp = self.count(info_state, self.opponent) + h
if self.player == 0 and count_player <= count_opp:
return 0
elif self.player == 1 and count_opp <= count_player:
return 0
return 1
def update_state(self, info_state: str, action: int, player: int,
h: int) -> list:
"""New state after the given action"""
new_state = list(info_state)
new_state[
action] = cellState.kBlack if player == 0 else cellState.kWhite
new_state = "".join(new_state)
if self.check_state(new_state, h) != -2:
return new_state
return ""
def setup_legal_states(self) -> None:
"""
Setup the legal states for each h and e.
"""
for info_state_pair, data in self.all_info_states.items():
info_state = info_state_pair[self.player]
info_state = self.simplify_state(info_state)
h = self._num_hidden_stones(info_state_pair)
e = self._num_empty_cells(info_state)
self.legal_actions[(info_state, h)] = data[0][self.player]
if info_state in self.legal_states[h]:
if data[1] == self.opponent:
self.legal_states[h][info_state] = self.opponent
else:
self.legal_states[h][info_state] = data[1]
self.empty_states[e].add(info_state)
def _num_hidden_stones(self, info_state_pair: tuple) -> int:
"""
Get the number of hidden stones in the given state.
Args:
- state: State to get the number of hidden stones.
Returns:
- int: Number of hidden stones.
"""
player_perspective = self.simplify_state(info_state_pair[self.player])
# count the known opp stones and substract from the real num of opp stones
opp_perspective = self.simplify_state(info_state_pair[self.opponent])
opp_stone = cellState.kBlack if self.opponent == 0 else cellState.kWhite
num_hidden_stones = 0
for p_cell, o_cell in zip(player_perspective, opp_perspective):
if o_cell == opp_stone and p_cell != opp_stone:
num_hidden_stones += 1
return num_hidden_stones
def _num_empty_cells(self, p_info_state: str) -> int:
"""
Get the number of empty cells in the given state.
Args:
- state: State to get the number of empty cells.
Returns:
- int: Number of empty cells.
"""
return p_info_state.count('.')
@staticmethod
def count(info_state: str, player: int) -> int:
"""Count the player stones in the given info_state"""
# count cellState.black_pieces if player == cellState.kBlack
ct = 0
for c in info_state:
if player == 0 and c in cellState.black_pieces:
ct += 1
elif player == 1 and c in cellState.white_pieces:
ct += 1
return ct
@staticmethod
def simplify_state(info_state: str) -> str:
info_state = info_state[3:]
info_state = info_state.replace("\n", "")
for cell in cellState.black_pieces:
info_state = info_state.replace(cell, cellState.kBlack)
for cell in cellState.white_pieces:
info_state = info_state.replace(cell, cellState.kWhite)
return info_state
| 40.1875 | 82 | 0.567532 |
4e7c44d1048b71433e1784c3807957ae6cb3b73b | 1,592 | py | Python | src/apps/logs/models.py | COAStatistics/alss | e1b14cb13de7b8455fa6835587cb1ceb16e4595c | [
"MIT"
] | null | null | null | src/apps/logs/models.py | COAStatistics/alss | e1b14cb13de7b8455fa6835587cb1ceb16e4595c | [
"MIT"
] | null | null | null | src/apps/logs/models.py | COAStatistics/alss | e1b14cb13de7b8455fa6835587cb1ceb16e4595c | [
"MIT"
] | null | null | null | from django.db.models import (
Model,
IntegerField,
PositiveIntegerField,
ForeignKey,
DateTimeField,
CASCADE,
Q,
)
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
SURVEY_CHOICES = (
Q(app_label="surveys18", model="survey")
| Q(app_label="surveys19", model="survey")
| Q(app_label="surveys20", model="survey")
| Q(app_label="surveys22", model="survey")
)
class ReviewLog(Model):
user = ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=CASCADE,
related_name="review_logs",
verbose_name=_("User"),
)
content_type = ForeignKey(
ContentType, limit_choices_to=SURVEY_CHOICES, on_delete=CASCADE
)
object_id = PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
initial_errors = IntegerField(
null=True, blank=True, verbose_name=_("Initialed Error Count")
)
current_errors = IntegerField(
null=True, blank=True, verbose_name=_("Current Error Count")
)
exception_errors = IntegerField(
default=0, verbose_name=_("Exception Error Count")
)
update_time = DateTimeField(
auto_now=True,
auto_now_add=False,
null=True,
blank=True,
verbose_name=_("Updated"),
)
class Meta:
verbose_name = _("ReviewLog")
verbose_name_plural = _("ReviewLogs")
| 27.929825 | 71 | 0.670854 |
fe90ec972f67e365632b690120f82c991b7e3dd5 | 2,683 | py | Python | tests/io/epmc/test_client.py | wellcometrust/WellcomeML | f7f5427f6dfdc6e5ee1342764263c6411e0f9bdf | [
"MIT"
] | 29 | 2020-01-31T17:05:38.000Z | 2021-12-14T14:17:55.000Z | tests/io/epmc/test_client.py | wellcometrust/WellcomeML | f7f5427f6dfdc6e5ee1342764263c6411e0f9bdf | [
"MIT"
] | 342 | 2020-02-05T10:40:43.000Z | 2022-03-17T19:50:23.000Z | tests/io/epmc/test_client.py | wellcometrust/WellcomeML | f7f5427f6dfdc6e5ee1342764263c6411e0f9bdf | [
"MIT"
] | 9 | 2020-06-07T17:01:00.000Z | 2021-11-24T16:03:38.000Z | from unittest.mock import MagicMock
import pytest
from wellcomeml.io.epmc.client import EPMCClient
@pytest.fixture
def epmc_client():
return EPMCClient(
max_retries=3
)
def test_search(epmc_client):
epmc_client._execute_query = MagicMock()
epmc_client.search(
"session", "query", result_type="not core",
page_size=15, only_first=False
)
expected_params = {
"query": "query",
"format": "json",
"resultType": "not core",
"pageSize": 15
}
epmc_client._execute_query.assert_called_with("session", expected_params, False)
def test_search_by_pmid(epmc_client):
epmc_client.search = MagicMock(return_value="results")
epmc_client.search_by_pmid("session", "pmid")
epmc_client.search.assert_called_with("session", "ext_id:pmid", only_first=True)
def test_search_by_doi(epmc_client):
epmc_client.search = MagicMock(return_value="results")
epmc_client.search_by_doi("session", "doi")
epmc_client.search.assert_called_with("session", "doi:doi", only_first=True)
def test_search_by_pmcid(epmc_client):
epmc_client.search = MagicMock(return_value="results")
epmc_client.search_by_pmcid("session", "PMCID0")
epmc_client.search.assert_called_with("session", "pmcid:PMCID0", only_first=True)
def test_search_by_invalid_pmcid(epmc_client):
epmc_client.search = MagicMock(return_value="results")
with pytest.raises(ValueError):
epmc_client.search_by_pmcid("session", "pmcid")
def test_get_full_text(epmc_client):
epmc_client._get_response_content = MagicMock(return_value="content")
epmc_client.get_full_text("session", "pmid")
epmc_endpoint = epmc_client.api_endpoint
epmc_client._get_response_content.assert_called_with(
"session",
f"{epmc_endpoint}/pmid/fullTextXML"
)
def test_get_references(epmc_client):
epmc_client._get_response_json = MagicMock(return_value={"references": []})
epmc_client.get_references("session", "pmid")
epmc_endpoint = epmc_client.api_endpoint
params = {"format": "json", "page": 1, "pageSize": 1000}
epmc_client._get_response_json.assert_called_with(
"session",
f"{epmc_endpoint}/MED/pmid/references",
params
)
def test_get_citations(epmc_client):
epmc_client._get_response_json = MagicMock(return_value={"references": []})
epmc_client.get_citations("session", "pmid")
epmc_endpoint = epmc_client.api_endpoint
params = {"format": "json", "page": 1, "pageSize": 1000}
epmc_client._get_response_json.assert_called_with(
"session",
f"{epmc_endpoint}/MED/pmid/citations",
params
)
| 30.146067 | 85 | 0.714499 |
92df3ece3bbe871212b0aaf8e2edd13bf3d236d9 | 746 | py | Python | django-guestbook/apps/brounder/brounder/wsgi.py | ser0090/deployment-pm | 41d3174e0516d519aad5a248028f7c12fc84f27e | [
"MIT"
] | null | null | null | django-guestbook/apps/brounder/brounder/wsgi.py | ser0090/deployment-pm | 41d3174e0516d519aad5a248028f7c12fc84f27e | [
"MIT"
] | null | null | null | django-guestbook/apps/brounder/brounder/wsgi.py | ser0090/deployment-pm | 41d3174e0516d519aad5a248028f7c12fc84f27e | [
"MIT"
] | null | null | null | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "brounder.settings")
application = get_wsgi_application()
| 32.434783 | 74 | 0.774799 |
87d0f52128e7c7e84243f57acee38471b5d4b396 | 651 | py | Python | src-server/app/handlers/apis/about_modules.py | mjmcconnell/sra | ff1c2563dead2fe4d81cda3d431482defd0f6a62 | [
"Apache-2.0"
] | null | null | null | src-server/app/handlers/apis/about_modules.py | mjmcconnell/sra | ff1c2563dead2fe4d81cda3d431482defd0f6a62 | [
"Apache-2.0"
] | null | null | null | src-server/app/handlers/apis/about_modules.py | mjmcconnell/sra | ff1c2563dead2fe4d81cda3d431482defd0f6a62 | [
"Apache-2.0"
] | null | null | null | """Api endpoints to interactions for the Image ndb model
"""
# local imports
from app.base.handlers import AdminAjaxHandler
from app.handlers.apis.mixins import ListCreateMixin
from app.handlers.apis.mixins import OrderMixin
from app.handlers.apis.mixins import RetrieveUpdateDeleteMixin
from app.forms.about_modules import AboutModuleForm
from app.models.about_modules import AboutModule
class AdminList(ListCreateMixin, OrderMixin, AdminAjaxHandler):
form = AboutModuleForm
model = AboutModule
sort_order = 'order'
class AdminDetail(RetrieveUpdateDeleteMixin, AdminAjaxHandler):
form = AboutModuleForm
model = AboutModule
| 27.125 | 63 | 0.809524 |
226986db0791ced185b117db5de3e962f8feac47 | 3,306 | py | Python | examples/adspygoogle/dfa/v1_20/create_rotation_group.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/dfa/v1_20/create_rotation_group.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/dfa/v1_20/create_rotation_group.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | 2 | 2020-04-02T19:00:31.000Z | 2020-08-06T03:28:38.000Z | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a rotation group ad in a given campaign. To get ad types
run get_ad_types.py. Start and end date for the ad must be within campaign
start and end dates. To create creatives, run create_[type]_creative.py. To get
available placements, run get_placements.py. To get a size ID, run
get_size.py example.
Tags: ad.saveAd
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
SIZE_ID = 'INSERT_SIZE_ID_HERE'
CREATIVE_ID = 'INSERT_CREATIVE_ID_HERE'
PLACEMENT_ID = 'INSERT_PLACEMENT_ID_HERE'
AD_NAME = 'INSERT_AD_NAME_HERE'
START_DATE = '%(year)s-%(month)02d-%(day)02dT12:00:00' % {
'year': 'INSERT_START_YEAR_HERE',
'month': int('INSERT_START_MONTH_HERE'),
'day': int('INSERT_START_DAY_HERE')}
END_DATE = '%(year)s-%(month)02d-%(day)02dT12:00:00' % {
'year': 'INSERT_END_YEAR_HERE',
'month': int('INSERT_END_MONTH_HERE'),
'day': int('INSERT_END_DAY_HERE')}
def main(client, campaign_id, size_id, creative_id, placement_id, ad_name,
start_date, end_date):
# Initialize appropriate service.
ad_service = client.GetAdService(
'https://advertisersapitest.doubleclick.net', 'v1.20')
# Construct basic rotation group structure.
rotation_group = {
'xsi_type': 'RotationGroup',
'name': ad_name,
'active': 'true',
'archived': 'false',
'campaignId': campaign_id,
'sizeId': size_id,
'typeId': '1',
'priority': '12',
'ratio': '1',
'rotationType': '1',
'startTime': start_date,
'endTime': end_date,
}
# Construct creative assignments and add them to the rotation group.
creative_assignment = {
'active': 'true',
'creativeId': creative_id,
'clickThroughUrl': {
'defaultLandingPageUsed': 'true',
'landingPageId': '0'
}
}
rotation_group['creativeAssignments'] = [creative_assignment]
# Construct placement assignments and add them to the rotation group.
placement_assignment = {
'active': 'true',
'placementId': placement_id
}
rotation_group['placementAssignments'] = [placement_assignment]
# Save the rotation group.
result = ad_service.SaveAd(rotation_group)[0]
# Display results.
print 'Ad with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client, CAMPAIGN_ID, SIZE_ID, CREATIVE_ID, PLACEMENT_ID, AD_NAME,
START_DATE, END_DATE)
| 32.097087 | 80 | 0.692377 |
56d09c916a7cf0d7a2e5c88e24b522522d8d55cf | 1,691 | py | Python | ivi/agilent/agilentDSOX92004A.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 161 | 2015-01-23T17:43:01.000Z | 2022-03-29T14:42:42.000Z | ivi/agilent/agilentDSOX92004A.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 45 | 2015-01-15T13:35:04.000Z | 2021-06-03T01:58:55.000Z | ivi/agilent/agilentDSOX92004A.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 87 | 2015-01-31T10:55:23.000Z | 2022-03-17T08:18:47.000Z | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent90000 import *
class agilentDSOX92004A(agilent90000):
"Agilent Infiniium DSOX92004A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSOX92004A')
super(agilentDSOX92004A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 20e9
self._init_channels()
| 37.577778 | 86 | 0.755766 |
2b62a46c27dfc2ef57a7adef054319f893fa09b4 | 482 | py | Python | daemon/core/gui/graph/shapeutils.py | alehmannFRA-UAS/core | bcf74297851e40e383c279f1f0a7eff3257c258b | [
"BSD-2-Clause"
] | 463 | 2015-07-28T16:28:12.000Z | 2022-03-31T16:11:29.000Z | daemon/core/gui/graph/shapeutils.py | alehmannFRA-UAS/core | bcf74297851e40e383c279f1f0a7eff3257c258b | [
"BSD-2-Clause"
] | 392 | 2015-08-07T09:14:40.000Z | 2022-03-31T18:12:28.000Z | daemon/core/gui/graph/shapeutils.py | alehmannFRA-UAS/core | bcf74297851e40e383c279f1f0a7eff3257c258b | [
"BSD-2-Clause"
] | 163 | 2015-07-30T15:54:03.000Z | 2022-03-21T22:54:03.000Z | import enum
from typing import Set
class ShapeType(enum.Enum):
MARKER = "marker"
OVAL = "oval"
RECTANGLE = "rectangle"
TEXT = "text"
SHAPES: Set[ShapeType] = {ShapeType.OVAL, ShapeType.RECTANGLE}
def is_draw_shape(shape_type: ShapeType) -> bool:
return shape_type in SHAPES
def is_shape_text(shape_type: ShapeType) -> bool:
return shape_type == ShapeType.TEXT
def is_marker(shape_type: ShapeType) -> bool:
return shape_type == ShapeType.MARKER
| 19.28 | 62 | 0.711618 |
625922d735eb5677c96ea74384b6bba56c2dce1b | 8,128 | py | Python | shgp/models/pgpr.py | GiovanniPasserello/SHGP | 7b8d06eaeb00cb745c4ad449524dfe97d404fd4e | [
"MIT"
] | null | null | null | shgp/models/pgpr.py | GiovanniPasserello/SHGP | 7b8d06eaeb00cb745c4ad449524dfe97d404fd4e | [
"MIT"
] | null | null | null | shgp/models/pgpr.py | GiovanniPasserello/SHGP | 7b8d06eaeb00cb745c4ad449524dfe97d404fd4e | [
"MIT"
] | null | null | null | from typing import Optional, Tuple
import numpy as np
import tensorflow as tf
from gpflow.config import default_float
from gpflow.covariances.dispatch import Kuf, Kuu
from gpflow.inducing_variables import InducingPoints
from gpflow.kernels import Kernel
from gpflow.mean_functions import MeanFunction
from gpflow.models.model import GPModel, MeanAndVariance
from gpflow.models.training_mixins import InputData, InternalDataTrainingLossMixin, RegressionData
from gpflow.models.util import data_input_to_tensor, inducingpoint_wrapper
from gpflow.utilities import to_default_float
from shgp.likelihoods.polya_gamma import PolyaGammaLikelihood
from shgp.robustness.linalg import robust_cholesky
tf.config.run_functions_eagerly(True)
class PGPR(GPModel, InternalDataTrainingLossMixin):
"""
Collapsed implementation of Polya-Gamma GPR, based on the heteroscedastic
implementation of SGPR with Polya-Gamma data augmentation.
This model is one of the key contributions of the thesis - for a derivation
of the below operations, see the thesis appendices.
"""
def __init__(
self,
data: RegressionData,
kernel: Kernel,
*,
inducing_variable: Optional[InducingPoints] = None,
mean_function: Optional[MeanFunction] = None,
num_latent_gps: Optional[int] = None
):
"""
`data`: a tuple of (X, Y), where the inputs X has shape [N, D]
and the outputs Y has shape [N, R].
`kernel`, `mean_function` are appropriate GPflow objects.
`inducing_variable`: an InducingPoints instance or a matrix of
the pseudo inputs Z, of shape [M, D].
"""
X_data, Y_data = data_input_to_tensor(data)
# Y_data must be in (-1, +1), not (0, 1)
assert_y = tf.Assert(tf.reduce_all((Y_data == 0.0) | (Y_data == 1.0)), [Y_data])
with tf.control_dependencies([assert_y]):
Y_data = Y_data * 2.0 - 1.0
self.data = X_data, Y_data
self.num_data = X_data.shape[0]
self.likelihood = PolyaGammaLikelihood(num_data=self.num_data, variance=0.1)
num_latent_gps = Y_data.shape[-1] if num_latent_gps is None else num_latent_gps
super().__init__(kernel, self.likelihood, mean_function, num_latent_gps=num_latent_gps)
if inducing_variable is None:
self.inducing_variable = inducingpoint_wrapper(data[0].copy())
else:
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
def maximum_log_likelihood_objective(self, *args, **kwargs) -> tf.Tensor:
return self.elbo()
def optimise_ci(self, num_iters=10):
"""
Iteratively update the local parameters, this forms a cycle between
updating c_i and q_u which we iterate a number of times. Typically we
can choose num_iters < 10.
"""
for _ in range(num_iters):
Fmu, Fvar = self.predict_f(self.data[0])
self.likelihood.update_c_i(Fmu, Fvar)
def elbo(self) -> tf.Tensor:
"""
Computes a lower bound on the marginal likelihood of the PGPR model.
"""
# metadata
X_data, Y_data = self.data
num_inducing = to_default_float(self.inducing_variable.num_inducing)
output_dim = to_default_float(tf.shape(Y_data)[1])
num_data = to_default_float(self.num_data)
# compute initial matrices
err = Y_data - self.mean_function(X_data)
Kdiag = self.kernel(X_data, full_cov=False)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel)
L = robust_cholesky(kuu)
theta = tf.transpose(self.likelihood.compute_theta())
theta_sqrt = tf.sqrt(theta)
theta_sqrt_inv = tf.math.reciprocal(theta_sqrt)
# compute intermediate matrices
A = tf.linalg.triangular_solve(L, kuf, lower=True) * theta_sqrt
AAT = tf.matmul(A, A, transpose_b=True)
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = robust_cholesky(B)
A_theta_sqrt_inv_err = tf.matmul(A * theta_sqrt_inv, err)
c = 0.5 * tf.linalg.triangular_solve(LB, A_theta_sqrt_inv_err, lower=True)
# compute log marginal bound
bound = -output_dim * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LB)))
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound -= 0.5 * output_dim * tf.reduce_sum(Kdiag * theta)
bound += 0.5 * output_dim * tf.reduce_sum(tf.linalg.diag_part(AAT))
bound -= num_data * np.log(2)
bound -= self.likelihood.kl_term()
return bound
def predict_f(self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False) -> MeanAndVariance:
"""
Compute the mean and variance of the latent function at some new points Xnew.
"""
# metadata
X_data, Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
# compute initial matrices
err = Y_data - self.mean_function(X_data)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel)
kus = Kuf(self.inducing_variable, self.kernel, Xnew)
L = robust_cholesky(kuu)
theta = tf.transpose(self.likelihood.compute_theta())
theta_sqrt = tf.sqrt(theta)
theta_sqrt_inv = tf.math.reciprocal(theta_sqrt)
# compute intermediate matrices
A = tf.linalg.triangular_solve(L, kuf, lower=True) * theta_sqrt
AAT = tf.matmul(A, A, transpose_b=True)
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = robust_cholesky(B)
A_theta_sqrt_inv_err = tf.matmul(A * theta_sqrt_inv, err)
c = 0.5 * tf.linalg.triangular_solve(LB, A_theta_sqrt_inv_err)
# compute predictive
tmp1 = tf.linalg.triangular_solve(L, kus, lower=True)
tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = (
self.kernel(Xnew)
+ tf.matmul(tmp2, tmp2, transpose_a=True)
- tf.matmul(tmp1, tmp1, transpose_a=True)
)
var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1]) # [P, N, N]
else:
var = (
self.kernel(Xnew, full_cov=False)
+ tf.reduce_sum(tf.square(tmp2), 0)
- tf.reduce_sum(tf.square(tmp1), 0)
)
var = tf.tile(var[:, None], [1, self.num_latent_gps])
return mean + self.mean_function(Xnew), var
def predict_y(self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False) -> MeanAndVariance:
"""
Predict the mean and variance for unobserved values at some new points Xnew.
"""
mean, var = self.predict_f(Xnew)
return mean, var + self.likelihood.noise_variance(mean, var)
def compute_qu(self) -> Tuple[tf.Tensor, tf.Tensor]:
"""
Computes the mean and variance of q(u) = N(m,S), the variational distribution on inducing outputs.
"""
# metadata
X_data, Y_data = self.data
# compute initial matrices
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel)
theta = tf.transpose(self.likelihood.compute_theta())
# compute intermediate matrices
err = Y_data - self.mean_function(X_data)
kuf_theta = kuf * theta
sig = kuu + tf.matmul(kuf_theta, kuf, transpose_b=True)
sig_sqrt = robust_cholesky(sig)
sig_sqrt_inv_kuu = tf.linalg.triangular_solve(sig_sqrt, kuu)
kuf_err = tf.matmul(kuf, err)
# compute distribution
mu = 0.5 * (
tf.matmul(
sig_sqrt_inv_kuu,
tf.linalg.triangular_solve(sig_sqrt, kuf_err),
transpose_a=True
)
)
cov = tf.matmul(sig_sqrt_inv_kuu, sig_sqrt_inv_kuu, transpose_a=True)
return mu, cov
| 39.456311 | 115 | 0.642347 |
31bea9b848fc63ecddccce91c34c0f75e9e84d13 | 1,662 | py | Python | Fuzzy-C/fuzzy_c.py | ritafolisi/Tirocinio | c9a14ac33ab20c3c6524d32de4634f93ece001fb | [
"CC-BY-4.0"
] | null | null | null | Fuzzy-C/fuzzy_c.py | ritafolisi/Tirocinio | c9a14ac33ab20c3c6524d32de4634f93ece001fb | [
"CC-BY-4.0"
] | null | null | null | Fuzzy-C/fuzzy_c.py | ritafolisi/Tirocinio | c9a14ac33ab20c3c6524d32de4634f93ece001fb | [
"CC-BY-4.0"
] | null | null | null | from FCM import *
import logging
import numpy as np
def fcm_script(data):
#Da utilizzare se so a priori i nomi delle colonne
#feat1=sys.argv[2]
#feat2=sys.argv[3]
#labels=sys.argv[4]
dataset=pd.read_csv(data)
# extract features and labels
#X = dataset[[feat1, feat2]].values
#y = dataset[labels].values
X = dataset.iloc[:, 1:]
y = dataset.iloc[:, 0]
X = np.asarray(X)
y = np.asarray(y)
model = FCM()
N_SPLITS = 5;
N_CLUSTER = 2;
error=[]
score=[]
#cross validation
skf = StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=1)
for train_index, test_index in skf.split(X, y):
#print("TRAIN:", train_index, "\nTEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
#training
train_membership, centers = model.fuzzy_train(X_train , N_CLUSTER , 2)
#test
test_membership = model.fuzzy_predict(X_test , N_CLUSTER , centers, 2)
if(N_CLUSTER==2):
error.append(model.RMSE_membership(test_membership, y_test))
else:
error.append(model.RMSE(test_membership, y_test))
score.append(model.accuracy(test_membership, y_test))
return model, score, error
def main():
logging.basicConfig(filename = 'esperimenti.log', format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
# import data
data=sys.argv[1]
logging.info('Questo esperimento lavora sul dataset: %s', data)
model, score, error = fcm_script(data)
logging.info(f'{score} = array delle accuratezze')
logging.info(f'{error} = array delle RMSE_membership \n')
print(np.mean(score))
print (np.mean(error))
## main
if __name__ == '__main__':
main()
| 23.083333 | 135 | 0.700963 |
aeb5ee7d0d39f54632891961fa10b710da214b40 | 57,466 | py | Python | teimedit.py | gmaterni/teimedpy | f9cf58a37ec30fde406dce677e160bcbb120f746 | [
"MIT"
] | null | null | null | teimedit.py | gmaterni/teimedpy | f9cf58a37ec30fde406dce677e160bcbb120f746 | [
"MIT"
] | null | null | null | teimedit.py | gmaterni/teimedpy | f9cf58a37ec30fde406dce677e160bcbb120f746 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
from pdb import set_trace
import platform
import argparse
import os
import pprint
import sys
import re
import tkinter as tk
import tkinter.filedialog as fdialog
import tkinter.messagebox as mbox
from lxml import etree
import teimedlib.check_teimed as chk
import teimedlib.pathutils as ptu
import teimedlib.teim_paths as tpth
from teimedlib.clean_text import clean_text
from teimedlib.edit_constants import *
from teimedlib.findfile import *
from teimedlib.listbox import open_listbox
from teimedlib.readovertags import read_over_tags
from teimedlib.teim_constants import *
from teimedlib.textedit import TextEdit
from teimedlib.textpad import TextPad
from teimedlib.ualog import Log
from teimedlib.uarc import UaRc
from teimedlib.tei_head import add_tei_head
from checkover import do_main as do_main_checkover
from checktxt import do_main as do_main_checktxt
from teimnote import do_main as do_main_note
from teimover import do_main as do_main_over
from teimsetid import do_main as do_main_setid
from teimxml import do_main as do_main_xml
from teixml2txt import do_main as do_main_xml2txt
__date__ = "30-03-2022"
__version__ = "1.2.4"
__author__ = "Marta Materni"
# -d paris (facoltativo)
# cerca a partire dalla directory "paris" tutti i testi
# che corrispondono al nome o alla wilcard dell'opzione -t
# se ne trova più di uno. mostra una lista per selezionare
# se ne trova uno, lo seleziona e procede
# se non ne trova nessuno, mostra un messaggio e procede
def HELP_RUN_OPS():
s = """
teimedit.py -c ancestor_teimfcg -t <file_text -e
Lancio dell'applicazionw
teimedit.py -h
lista delle opzioni
teimedit.py -x
Visualizza istruzioni
-c ancestor_teimfcg (facoltativo)
cerca a partire dalla directory "acncestor_teimfcg" la directory "teimcfg"
se ne trova più di una, mostra la lista per selezionarne una.
se ne trova una, lo seleziona e procede
se non ne trova nessuna, esce dall'applicazione
se non è settata cerca la dir "teimcfg" a partirre dalla dir corente
-t testo.txt
nome del file o wilcard dei files di testo da elaborare
se si utilizza la wilcard, usare apici o virgolette:
es.
teimedit.py -t "a*.txt"
restituisce tutti i file che iniziano in "a" e terminano in ".txt"
E' possibile usare "$" senza gli apici.
es:
teimedit.py -t a$.txt
- e <facoltativo>
cancella la storia delle precedenti sessioni
L' applicativo salva i parametri con i qualei
è stato lanciato e posizioni e dimensioni delle finestre
al momento della chiusura.
Qundi lanciandolo sucessivamente senza alcun parametro usa
quelli memorizzati.
Il flag -e cancella la storia dela sessione.
es.
teimedit.py -c florimontr -t 'paris/*.txt'
Cerca "teimfcg" nella dir florimont e visualizza una lista
dir tutti i file della dir 'paris/' con estensione '.txt'.
teimedit -t 'paris/e*.txt'
Utilizza per 'teimfcg' la dir precedentemente settata
visualizzate la lsita dei file nella dir 'paris'/ che iniziando
con 'e' e terminano con '.txt'
teimedit-py -t 'paris/e*.txt' -e
Ceca i file di testo come nel caso precedente, ma cterca 'teimfcg'
a partire dalla dir corrente e visualizza le finestre nelle
posizioni e con le dimension iniziali.
Tasti di controllo:
Ctrl-q
esce dall'applicazione.E' abilitato solo dalla finestra
con la barra di menu.
Ctrl-w
quando ci si tova su una fieìnestra selezionata sopra a tutte
le altre ristabilisce l'ordine di default permettendo l'accesso
al menu e quindi a Ctrl-q.
"""
return s
g_win0 = '1000x600+50+50'
g_win1 = '1000x600+150+150'
g_win2 = '1000x600+250+250'
g_win3 = '1000x600+350+350'
def pp(data, w=60):
return pprint.pformat(data, width=w)
RC_PARENT_TEIMCFG = "parent_teimcfg"
# RC_ROOT_DIR = "root_dir"
# RC_PATH_MATCH = "path_match"
RC_PATH_LAST = "path_last"
RC_RECENTI = "recenti"
###############
PATH_ROOT_DIR = "."
PARENT_TEIMCFG = "."
TEIMEDIT_LOG = "log/teimedit.log"
rc = UaRc('.teimeditrc')
log_err = Log("w")
os_name = platform.system().lower()
class TeimEdit(object):
def __init__(self,
parent_teimcfg,
root_dir,
path_match):
"""
parent_teimcfg (str, optional): dir parent di teimcfg "".
root_dir (str, optional): dir parent del/dei file di testo.
path_match (str, optional): wilcard file/i di testo.
"""
# log_err
self.path_edit_err_p = ptu.str2path(TEIMEDIT_LOG)
self.path_edit_err_s = ptu.path2str(self.path_edit_err_p)
log_err.open(self.path_edit_err_s, 1)
log_err.log('')
# rc init
self.parent_teimcfg_s = rc.upd(
RC_PARENT_TEIMCFG, parent_teimcfg, PARENT_TEIMCFG)
# x = rc.upd(RC_ROOT_DIR, root_dir)
# self.root_dir_p = ptu.str2path(x)
# self.path_match_s = rc.upd(RC_PATH_MATCH, path_match)
self.path_match_s = path_match
# rc.prn('init')
########################
self.pwd = ptu.cwd()
self.path_text_s = ''
self.text_dir_s = ''
self.path_note_csv_s = ''
self.path_teim_in_s = ''
self.path_teim_out_s = ''
self.path_teim_log_s = ''
self.path_teim_err_s = ''
self.path_id_in_s = ''
self.path_id_out_s = ''
self.path_id_log_s = ''
self.path_id_err_s = ''
self.path_over_in_s = ''
self.path_over_out_s = ''
self.path_over_log_s = ''
self.path_over_err_s = ''
self.path_note_in_s = ''
self.path_note_out_s = ''
# self.path_note_log_s = ''
self.path_note_err_s = ''
self.path_xml_in_s = ''
self.path_xml_out_s = ''
self.path_xml_err_s = ''
self.path_tei_in_s = ''
self.path_tei_out_s = ''
self.path_tei_err_s = ''
self.path_xml2txt_in_s = ''
self.path_xml2txt_out_s = ''
self.path_xml2txt_err_s = ''
self.path_checktxt_in_s = ''
self.path_checktxt_out_s = ''
self.path_checkover_in_s = ''
self.path_checkover_out_s = ''
####################
# var per il controllo del check
self.tag_num_debug = 0
# dopo la lettutr cfg
self.tag_over_lst = []
self.text_edit = None
self.win0 = None
self.win1 = None
self.txt1 = None
self.win2 = None
self.txt2 = None
self.win3 = None
self.txt3 = None
# rc.get
self.geometry_win0 = rc.get('win0', g_win0)
self.geometry_win1 = rc.get('win1', g_win1)
self.geometry_win2 = rc.get('win2', g_win2)
self.geometry_win3 = rc.get('win3', g_win3)
#####################################
self.mv_check = None
self.mv_check_filled = False
####################################
# UA gestione files
####################################
def write_file(self, path, text):
# if isinstance(path, str):
# path = ptu.str2path(path)
# path.write_text(text)
if not isinstance(path, str):
path = ptu.path2str(path)
print(f"WARNING write_file {path}")
try:
with open(path, "w") as f:
f.write(text)
except IOError as e:
log_err.log(f"ERROR write_file() {path}\n{e}")
self.top_free()
mbox.showerror("", f"{path}\n{e}")
return
# self.chmod(path)
# try:
# ptu.chmod(path)
# except Exception as e:
# log_err.log(f"ERROR {path}\n{e}")
# self.top_free()
# mbox.showerror("", f"{path}\n{e}")
def read_file(self, path):
if isinstance(path, str):
path = ptu.str2path(path)
if not path.exists():
s = ptu.path2str(path)
self.show_log_lift(f"File:{s} Not Found.A1", True)
return ''
with path.open('r', encoding='utf-8') as f:
s = f.read()
return s
# def chmod(self, path):
# #AAA if isinstance(path, str):
# # path = ptu.str2path(path)
# # try:
# # ptu.chmod(path)
# try:
# ptu.chmod(path)
# except Exception as e:
# log_err.log(f"ERROR chmod() {path}\n{e}")
# self.top_free()
# mbox.showerror("", f"{path}\n{e}")
def read_text_file(self):
if self.path_text_s == '':
return
if not self.path_text_p.exists():
msg = f"{self.path_text_s} Not Found"
self.top_free()
mbox.showerror("", msg)
return
try:
s = self.read_file(self.path_text_p)
except Exception as e:
msg = f"{self.path_text_s} Not Found"
log_err.log(f'read_text_file()\n{msg}')
self.top_free()
mbox.showerror("", msg)
else:
self.text_edit.insert_text(s)
self.show_psths()
self.win0.lift()
self.text_edit.focus_set()
self.rc_update()
def read_log_file(self, path):
if isinstance(path, str):
path = ptu.str2path(path)
if path.exists():
s = self.read_file(path)
self.show_log_top(s)
else:
self.top_free()
mbox.showinfo("", f"{path} Not Foud", parent=self.win0)
def rfind_teimcfg(self):
"""
cerca in modalità ricorsiva la dir teimgcfg a partire
dal parametro parent_teimcfg se settato,
altrimenti dalla dir corrente.
Se esiste invoca:
set_teimcfg_paths()
find_file_text()
"""
try:
if self.parent_teimcfg_s is None:
dir_s = PARENT_TEIMCFG
else:
dir_s = self.parent_teimcfg_s
dir_p = ptu.str2path(dir_s)
dir_p_lst = rfind_dir_lst(dir_p, TEIMCFG)
dir_s_lst = ptu.pathlist2strlist(dir_p_lst)
le = len(dir_p_lst)
if le == 0:
self.iconify()
msg = f"{TEIMCFG} Not Foud"
mbox.showerror("", msg)
sys.exit(msg)
elif le == 1:
path_teimcfg_s = dir_s_lst[0]
self.set_teimcfg_paths(path_teimcfg_s)
self.find_file_text()
else:
def on_select(n):
if n < 0:
return
path_teimcfg_s = dir_s_lst[n]
self.set_teimcfg_paths(path_teimcfg_s)
self.find_file_text()
open_listbox(dir_s_lst, on_select, "find teimcfg")
return
except Exception as e:
msg = f"ERROR find_teimfcg()\n{e}"
log_err.log(msg)
self.iconify()
mbox.showerror("", f"{TEIMCFG}\nNot Foud")
sys.exit(msg)
def find_file_text(self):
"""
se si usa una wilcard cerca nalla root_dir
"""
match_file = self.path_match_s
if match_file is None:
return
# NON è una vwil card
if ptu.exists(match_file):
# if self.is_path(match_file):
self.set_paths(match_file)
self.read_text_file()
return
# match_file è una wilcard
try:
file_p_lst = find_file_lst(None, match_file)
file_s_lst = ptu.pathlist2strlist(file_p_lst)
le = len(file_p_lst)
if le == 0:
self.top_free()
msg = f"File: {match_file}\nNot Found"
mbox.showinfo("", msg, parent=self.win0)
elif le == 1:
path_text_s = file_s_lst[0]
self.set_paths(path_text_s)
self.read_text_file()
else:
def load_file(n):
if n < 0:
return
path_text_s = file_s_lst[n]
self.set_paths(path_text_s)
self.read_text_file()
open_listbox(file_s_lst, load_file, "find text")
except Exception as e:
msg = f"ERROR find_file_text()\n{match_file}\n{e}"
log_err.log(msg)
self.show_log_top(msg)
def get_edit_text(self):
s = self.text_edit.get('1.0', 'end')
return s.strip()
####################################
# UA gestione path
####################################
def set_teimcfg_paths(self, path_s):
self.path_teimcfg_s = path_s
try:
path_p = ptu.str2path(path_s)
self.path_teimcfg_p = path_p
self.parent_teimcfg_p = path_p.parent
self.parent_teimcfg_s = ptu.path2str(path_p.parent)
self.path_entity_csv_p = ptu.join(path_p, TEIMTAGS_CSV)
self.path_over_csv_p = ptu.join(path_p, TEIMOVERFLOW_CSV)
self.path_xmlid_csv_p = ptu.join(path_p, TEIMXMLID_CSV)
self.path_tei_head_p = ptu.join(path_p, TEI_HEAD)
self.path_entity_csv_s = ptu.path2str(self.path_entity_csv_p)
self.path_over_csv_s = ptu.path2str(self.path_over_csv_p)
self.path_xmlid_csv_s = ptu.path2str(self.path_xmlid_csv_p)
self.path_tei_head_s = ptu.path2str(self.path_tei_head_p)
if not self.path_over_csv_p.exists():
self.iconify()
msg = f"overflow.csv:{self.path_over_csv_p} Not Found"
mbox.showerror("", msg)
sys.exit(msg)
if not self.path_entity_csv_p.exists():
self.iconify()
msg = f"teimtags.csv:{self.path_entity_csv_p} Not Found"
mbox.showerror("", msg)
# self.show_log_top(msg)
sys.exit(msg)
if not self.path_xmlid_csv_p.exists():
self.iconify()
msg = f"teimxmlid.csv:{self.path_xmlid_csv_p} Not Found"
mbox.showerror("", msg)
sys.exit(msg)
if not self.path_tei_head_p.exists():
self.iconify()
msg = f"tei.xml:{self.path_tei_head_p} Not Found"
mbox.showerror("", msg)
sys.exit(msg)
# legge la tabbella per overflow
lst = read_over_tags(self.path_over_csv_p)
# prepara la tabella per la gestione del menu
self.tag_over_lst = chk.fill_tag_over_lst(lst)
self.add_mv_check()
except Exception as e:
msg = f"ERROR set_teimcfg_paths()\n {e}"
log_err.log(msg)
self.iconify()
mbox.showerror("", f"ERROR \nset_teimcfg_paths() ")
sys.exit(msg)
def set_paths(self, path_text_s):
try:
self.path_text_s = path_text_s
self.path_text_p = ptu.str2path(path_text_s)
# definizioni path di pathlib
self.text_dir_p = self.path_text_p.parent
self.text_dir_s = ptu.path2str(self.text_dir_p)
self.log_dir_p = ptu.join(self.text_dir_p, "log")
ptu.make_dir(self.log_dir_p)
path_note_csv_p = ptu.join(self.text_dir_p, TEIMNOTE_CSV)
self.path_note_csv_s = ptu.path2str(path_note_csv_p)
# test.txt => ./log/testo_teim.txt
self.path_teim_in_s = path_text_s
self.path_teim_out_s = tpth.set_path_teim_out(path_text_s)
self.path_teim_log_s = tpth.set_path_teim_log(path_text_s)
self.path_teim_err_s = tpth.set_path_teim_err(path_text_s)
# test.txt => ./log/testo_id.xml
self.path_id_in_s = path_text_s
self.path_id_out_s = tpth.set_path_id_out(path_text_s)
self.path_id_log_s = tpth.set_path_id_log(path_text_s)
self.path_id_err_s = tpth.set_path_id_err(path_text_s)
# test.txt => ./lg/testo_id_over.xml
self.path_over_in_s = path_text_s
self.path_over_out_s = tpth.set_path_over_out(path_text_s)
self.path_over_log_s = tpth.set_path_over_log(path_text_s)
self.path_over_err_s = tpth.set_path_over_err(path_text_s)
# test.txt => ./log/testo_id_over_note.xml
# test.txt => testo.xml
self.path_note_in_s = path_text_s
self.path_note_out_s = tpth.set_path_note_out(path_text_s)
# self.path_note_log_s = tpth.set_path_note_log(path_text_s)
self.path_note_err_s = tpth.set_path_note_err(path_text_s)
# ./log/testo_id_over_note.xml => testo.xml
self.path_xml_in_s = tpth.set_path_xml_in(path_text_s)
self.path_xml_out_s = tpth.set_path_xml_out(path_text_s)
self.path_xml_err_s = tpth.set_path_xml_err(path_text_s)
self.path_tei_in_s = tpth.set_path_tei_in(path_text_s)
self.path_tei_out_s = tpth.set_path_tei_out(path_text_s)
self.path_tei_err_s = tpth.set_path_tei_err(path_text_s)
# log/testo_id_over_note.xml => testo_text.txt
self.path_xml2txt_in_s = self.path_xml_out_s
path_xml2txt_out_s = path_text_s.replace('.txt', '_text.txt')
self.path_xml2txt_out_s = path_xml2txt_out_s
path_xml2txt_err_s = path_text_s.replace('.txt', '_text.ERR.log')
self.path_xml2txt_err_s = path_xml2txt_err_s
# test.txt => ./log/testo_checktxt.log
self.path_checktxt_in_s = path_text_s
self.path_checktxt_out_s = tpth.set_path_checktxt_out(path_text_s)
# self.path_checktxt_err_s = tpth.set_path_checktxt_err( path_text_s)
# test.txt => ./log/testo_checkover.log
self.path_checkover_in_s = path_text_s
self.path_checkover_out_s = tpth.set_path_checkover_out(
path_text_s)
# self.path_checkover_err_s = tpth.set_path_checkover_err( path_text_s)
except Exception as e:
msg = f"ERROR set_paths()\n{e}"
log_err.log(msg)
self.iconify()
mbox.showerror("", f"ERROR set_paths ")
sys.exit(msg)
###############################################
# UA GUI start
##############################################
# menu dei controlli ovwerflow
def add_mv_check(self):
# lst.append([func_type,name,so,sc,po,pc])
lst = self.tag_over_lst
if lst is None:
return
if self.mv_check_filled:
return
# self.mv_check.delete(9,20)
for item in lst:
t, name, so, sc, po, pc = item
lbl = f'{name}: {so} {sc}'
if t == 0:
# tag con {}
self.mv_check.add_command(
label=lbl,
command=lambda x=po, y=pc, : self.find_form_to(x, y))
else:
# tag con []
self.mv_check.add_command(
label=lbl,
command=lambda x=po, y=pc: self.find_over(x, y))
self.mv_check.add_separator()
self.mv_check_filled = True
def open_win0(self):
def new_mv():
mv = tk.Menu(menu_bar, tearoff=0)
mv.config(
font=FONT_MENU,
bg=BG_MENU,
fg=FG_MENU,
activebackground=BG2_MENU,
activeforeground=FG2_MENU,
relief=tk.SOLID)
return mv
self.win0 = tk.Tk()
# self.win0.withdraw()
self.win0.title("TeimEdit")
self.win0.geometry(self.geometry_win0)
self.win0.config(background=BG_WIN, pady=2)
self.win0.protocol("WM_DELETE_WINDOW", lambda: False)
# self.lbl_path_var = tk.StringVar()
# self.lbl_path_var.set("")
self.text_edit = TextEdit(self.win0)
self.text_edit.focus_set()
self.text_edit.config(spacing1=2, spacing3=2)
self.text_edit.pack(fill=tk.BOTH, expand=True)
menu_bar = tk.Menu(self.win0, tearoff=0)
menu_bar.config(
font=FONT_MENU,
bg=BG_MENU,
fg=FG_MENU,
activebackground=BG2_MENU,
activeforeground=FG2_MENU,
bd=2,
relief=tk.SOLID)
self.win0.config(menu=menu_bar)
mv_file = new_mv()
mv_file.add_command(
label='Reload', command=self.read_text_file, underline=0)
mv_file.add_command(label='Recent Files',
command=self.open_recenti, underline=0)
mv_file.add_separator()
mv_file.add_command(label='Open Ctrl-O', command=self.open_text)
mv_file.add_separator()
mv_file.add_command(label='Save Ctrl-S', command=self.save_text)
mv_file.add_command(label='Save As... Ctrl-Shift-S',
command=self.save_text_as)
mv_file.add_separator()
mv_file.add_command(label='Quit',
command=self.app_quit,
underline=0,
background=BG_MENU_LBL,
foreground=FG_MENU_LBL,
activebackground=BG2_MENU_LBL,
activeforeground=FG2_MENU_LBL)
mv_file.add_separator()
self.text_edit.bind("<Control-o>", self.open_text)
self.text_edit.bind("<Control-s>", self.save_text)
self.text_edit.bind("<Control-Shift-S>", self.save_text_as)
mv_edit = new_mv()
mv_edit.add_command(label="Undo Ctrl-Z")
mv_edit.add_command(label="Redo Ctrl-Shift-Z",
command=self.text_edit.on_redo)
mv_edit.add_separator()
mv_edit.add_command(label="Cut", accelerator="Ctrl+X",
command=lambda: self.text_edit.event_generate('<Control-x>'))
mv_edit.add_command(label="Copy", accelerator="Ctrl+C",
command=lambda: self.text_edit.event_generate('<Control-c>'))
mv_edit.add_command(label="Paste", accelerator="Ctrl+V",
command=lambda: self.text_edit.event_generate('<Control-v>'))
mv_edit.add_separator()
mv_edit.add_command(label="Find&Replace Ctrl-F",
command=self.text_edit.find_replace)
#########################################
self.mv_check = new_mv()
self.mv_check.add_command(label='Check Entities Log',
command=self.elab_checktxt)
self.mv_check.add_command(label='Check Overflow Log',
command=self.elab_checkover)
self.mv_check.add_separator()
self.mv_check.add_command(label='Entities Comma',
command=self.fin_entity_comma)
self.mv_check.add_command(label='Entities Brackets',
command=self.find_entity_brackets)
self.mv_check.add_command(label='Entities Undefined',
command=self.find_entity_undefined)
self.mv_check.add_command(label='Clean', command=self.del_tags)
self.mv_check.add_separator()
#########################################
mv_elab = new_mv()
mv_elab.add_command(label='Elab. Entities', command=self.elab_teimxml)
mv_elab.add_command(label='Elab. Set ID', command=self.elab_teimsetid)
mv_elab.add_command(label='Elab. Overflow', command=self.elab_teimover)
mv_elab.add_command(label='Elab. Note', command=self.elab_teimnote)
mv_elab.add_command(label='Elab. TEI', command=self.elab_tei)
mv_elab.add_separator()
mv_elab.add_command(label='XML => text', command=self.elab_xml2txt)
mv_elab.add_separator()
mv_elab.add_command(label='Elab. All', command=self.elab_all)
mv_log = new_mv()
mv_log.add_command(label='Check Text Err.', command=self.show_checktxt)
mv_log.add_command(label='Check Over Err.',
command=self.show_checkover)
mv_log.add_separator()
mv_log.add_command(label='Entity Out', command=self.show_entity_out)
mv_log.add_command(label='Entity Log.', command=self.show_entity_log)
mv_log.add_command(label='Entity Err.', command=self.show_entity_err)
mv_log.add_separator()
mv_log.add_command(label='Set ID Out', command=self.show_setwid_out)
mv_log.add_command(label='Set ID Log.', command=self.show_setwid_log)
mv_log.add_command(label='Set ID Err.', command=self.show_setwid_err)
mv_log.add_separator()
mv_log.add_command(label='Overflow Out', command=self.show_over_out)
mv_log.add_command(label='Overflow Log.', command=self.show_over_log)
mv_log.add_command(label='Overflow Err.', command=self.show_over_err)
mv_log.add_separator()
mv_log.add_command(label='Note Out', command=self.show_note_out)
# mv_log.add_command(label='Note Log.', command=self.show_note_log)
mv_log.add_command(label='Note Err.', command=self.show_note_err)
mv_log.add_separator()
mv_log.add_command(label='XML ', command=self.show_xml_out)
mv_log.add_command(label='XML Err.', command=self.show_xml_err)
mv_log.add_command(label='XML-TEI', command=self.show_tei_out)
mv_log.add_command(label='XML-TEI Err.', command=self.show_tei_err)
mv_log.add_separator()
mv_log.add_command(label='xml2text Out', command=self.show_xml2txt_out)
mv_log.add_command(label='xml2text Err.',
command=self.show_xml2txt_err)
mv_log.add_separator()
mv_log.add_command(label='TeimEdit Err.', command=self.show_edit_err)
mv_log.add_separator()
mv_log.add_command(label='Read Log', command=self.open_log)
mv_del = new_mv()
mv_del.add_command(label='Entities', command=self.delete_txt1)
mv_del.add_command(label='XML', command=self.delete_txt2)
mv_del.add_command(label='Log', command=self.delete_txt3)
mv_del.add_command(label='All', command=self.delete_txt_all)
mv_del.add_separator()
mv_del.add_command(label='Remove log files', command=self.remove_log)
mv_help = new_mv()
mv_help.add_command(label='Files & Directory', command=self.help_paths)
mv_help.add_command(label='run options', command=self.help_options)
# orizontale
menu_bar.add_cascade(label='File', menu=mv_file, underline=0)
menu_bar.add_cascade(label='Edit', menu=mv_edit, underline=0)
menu_bar.add_cascade(label='Check', menu=self.mv_check, underline=0)
menu_bar.add_cascade(label='Elab.', menu=mv_elab, underline=1)
menu_bar.add_cascade(label='Log', menu=mv_log, underline=0)
menu_bar.add_cascade(label='Del.', menu=mv_del, underline=0)
s = f"W:"
menu_bar.add_command(label=s, activeforeground=FG_MENU,
activebackground=BG_MENU),
menu_bar.add_command(label=' 1', command=self.top_w0,)
menu_bar.add_command(label=' 2', command=self.top_w1)
menu_bar.add_command(label=' 3', command=self.top_w2)
menu_bar.add_command(label=' 4 ', command=self.top_w3)
menu_bar.add_command(label='1234 ', command=self.top_free)
menu_bar.add_command(label='Tidy', command=self.top_order)
s = f" "
menu_bar.add_command(label=s, activeforeground=FG_MENU,
activebackground=BG_MENU),
menu_bar.add_cascade(label='Help', menu=mv_help)
s = f" {__version__} "
menu_bar.add_command(label=s, activeforeground=FG_MENU,
activebackground=BG_MENU),
self.open_win1()
self.open_win2()
self.open_win3()
# UA tasti di conrollo globali
self.text_edit.bind("<Control-q>", self.app_quit)
# self.txt1.bind("<Control-q>", self.app_quit)
# self.txt2.bind("<Control-q>", self.app_quit)
# self.txt3.bind("<Control-q>", self.app_quit)
self.txt1.bind("<Control-w>", lambda x: self.top_free())
self.txt2.bind("<Control-w>", lambda x: self.top_free())
self.txt3.bind("<Control-w>", lambda x: self.top_free())
self.show_win1("")
self.show_win2("")
self.show_win3("")
################################
# cerca la dir teimcfg_dir partendo
# -c teimcfg_dir od dalla dir corrente se non è settata
# se non la trova exit
# invoca:
# set_file_path
# read_over_tags
self.rfind_teimcfg()
##################################
self.add_mv_check()
self.top_free()
self.text_edit.focus()
tk.mainloop()
###############################
def open_win1(self):
self.win1 = tk.Toplevel(self.win0)
self.win1.title('ENTITY')
self.win1.protocol("WM_DELETE_WINDOW", lambda: False)
self.win1.rowconfigure(0, weight=1)
self.win1.columnconfigure(0, weight=1)
self.win1.geometry(self.geometry_win1)
self.txt1 = TextPad(self.win1)
self.txt1.config(spacing1=2, spacing3=2)
self.txt1.grid(sticky='nsew')
def open_win2(self):
self.win2 = tk.Toplevel(self.win0)
self.win2.title('XML')
self.win2.protocol("WM_DELETE_WINDOW", lambda: False)
self.win2.rowconfigure(0, weight=1)
self.win2.columnconfigure(0, weight=1)
self.win2.geometry(self.geometry_win2)
self.txt2 = TextPad(self.win2)
self.txt2.config(spacing1=2, spacing3=2)
self.txt2.grid(sticky='nsew')
def open_win3(self):
self.win3 = tk.Toplevel(self.win0)
self.win3.protocol("WM_DELETE_WINDOW", lambda: False)
self.win3.title('LOG')
self.win3.rowconfigure(0, weight=1)
self.win3.columnconfigure(0, weight=1)
self.win3.geometry(self.geometry_win3)
self.txt3 = TextPad(self.win3)
self.txt3.config(spacing1=1, spacing3=1)
self.txt3.grid(sticky='nsew')
self.txt3.configure(font=FONT_LOG, bg=BG_LOG, fg=FG_LOG)
def show_win1(self, s):
s = '' if s is None else s
self.txt1.delete('1.0', tk.END)
self.txt1.insert('1.0', s)
def show_win2(self, s):
s = '' if s is None else s
self.txt2.delete('1.0', tk.END)
self.txt2.insert('1.0', s)
def show_win3(self, s):
s = '' if s is None else s
self.txt3.delete('1.0', tk.END)
self.txt3.insert('1.0', s)
def app_quit(self, *args):
# yn = mbox.askyesno("", "Quit ?", parent=self.win0)
# if not yn:
# return
# self.win0.deiconify()
self.rc_save()
self.win0.quit()
# sys.exit(0)
########################
# UA manu mgr
########################
def top_order(self):
self.top_not()
self.win0.geometry(g_win0)
self.win1.geometry(g_win1)
self.win2.geometry(g_win2)
self.win3.geometry(g_win3)
self.win3.lift()
self.win2.lift()
self.win1.lift()
self.win0.lift()
def top_free(self):
self.top_not()
self.win3.lower(self.win2)
self.win2.lower(self.win1)
self.win1.lower(self.win0)
self.win0.lift()
def top_w0(self):
self.top_not()
self.win0.attributes("-topmost", True)
def top_w1(self):
self.top_not()
self.win1.attributes("-topmost", True)
def top_w2(self):
self.top_not()
self.win2.attributes("-topmost", True)
def top_w3(self):
self.top_not()
self.win3.attributes("-topmost", True)
def top_not(self):
self.win0.attributes("-topmost", False)
self.win1.attributes("-topmost", False)
self.win2.attributes("-topmost", False)
self.win3.attributes("-topmost", False)
def iconify(self):
self.win0.iconify()
self.win1.iconify()
self.win2.iconify()
self.win3.iconify()
########################
# UA menu file
########################
def open_text(self, *args):
self.top_free()
path_text_s = fdialog.askopenfilename(title='file',
initialdir=self.text_dir_s,
filetypes=[("text", "*.txt"),
("*.*", "*.*")])
if len(path_text_s) < 1:
return
if not ptu.exists(path_text_s):
return
path_text_s = ptu.relative_to(path_text_s, self.pwd)
self.set_paths(path_text_s)
self.read_text_file()
def open_recenti(self):
try:
file_lst_s = self.rc_get_recenti()
if len(file_lst_s) < 1:
return
self.top_free()
def load_file(n):
if n < 0:
return
path_text_s = file_lst_s[n]
self.set_paths(path_text_s)
self.read_text_file()
open_listbox(file_lst_s, load_file, "find text")
except Exception as e:
log_err.log(f"ERROR oepen_recenti() \n{e}")
self.top_free()
mbox.showerror("", f"ERROR open_recenti() ")
def save_text(self, *args):
s = self.get_edit_text()
self.write_file(self.path_text_p, s)
self.rc_update()
def save_text_as(self, *args):
self.top_free()
init_dir = self.text_dir_s
path_text_s = fdialog.asksaveasfilename(title='Save as Name',
initialdir=init_dir)
if path_text_s is None or len(path_text_s) < 1:
return ""
text = self.get_edit_text()
path_text_s = ptu.relative_to(path_text_s, self.pwd)
self.set_paths(path_text_s)
self.write_file(self.path_text_p, text)
self.rc_update()
title = f"TEXT: {path_text_s} "
self.win0.title(title)
##########################
# menu edit
##########################
#################
# UA menu check
################
def elab_checktxt(self):
s = self.get_edit_text()
self.write_file(self.path_checktxt_in_s, s)
do_main_checktxt(self.path_checktxt_in_s)
s = self.read_file(self.path_checktxt_out_s)
self.show_log_top(s, True)
def elab_checkover(self):
s = self.get_edit_text()
self.write_file(self.path_checkover_in_s, s)
do_main_checkover(self.path_checkover_in_s,
self.path_over_csv_s)
s = self.read_file(self.path_checkover_out_s)
self.show_log_top(s, True)
##########################################
# chek edit funizoni di controllo su teimed
###########################################
def del_tags(self):
self.text_edit.tag_delete(FIND_TAGS[0])
self.text_edit.tag_delete(FIND_TAGS[1])
def config_tags(self):
self.text_edit.tag_config(FIND_TAGS[0],
background=BG_TAG,
foreground=FG_TAG)
self.text_edit.tag_config(FIND_TAGS[1],
background=BG2_TAG,
foreground=FG2_TAG)
def next_idx(self, idx, n):
r, c = idx.split('.')
return f"{r}.{int(c)+n}"
def add_tags(self, m_lst):
self.nuum_debug = 0
self.del_tags()
idx = '1.0'
for mtch in m_lst:
# str tags
s = mtch['s']
# type match
t = mtch['t']
idx = self.text_edit.search(s,
idx,
regexp=False,
stopindex=tk.END)
if idx == '':
break
idx_end = self.next_idx(idx, len(s))
self.text_edit.tag_add(FIND_TAGS[t], idx, idx_end)
idx = idx_end
self.tag_num_debug += 1
self.config_tags()
# usa la prima rag per spostrae il teso alla sua posiziione
tag_lst = self.text_edit.tag_ranges(FIND_TAGS[0])
t0 = tag_lst[0] if len(tag_lst) > 0 else "1.0"
self.text_edit.see(t0)
def add_tags_from_to(self, m_lst):
self.del_tags()
idxo = '1.0'
self.tag_num_debug = 0
# print("=========")
for mtch in m_lst:
# str tags open
so = mtch['so']
# str tags close
sc = mtch['sc']
# type match
t = mtch['t']
# print("")
# print(f"A0 {idxo} {so} {sc} * ")
idxo = self.text_edit.search(so,
idxo,
regexp=False,
stopindex=tk.END)
# print(f"A1 {idxo} {so} {sc} *")
if idxo == '':
break
self.tag_num_debug += 1
idx_end = self.next_idx(idxo, len(so))
# print(f"A2 {idxo} {idx_end} {so} {sc} *")
# esiste tag di chiusura
if sc != "":
idxc = self.text_edit.search(sc,
idxo,
regexp=False,
stopindex=tk.END)
# print(f"A3 {idxo} {idx_end} {idxc }{so} {sc} *")
# trovato tag di cgiusura
if idxc != '':
idx_end = self.next_idx(idxc, len(sc))
# print(f"A4 {idxo} {idx_end} {so} {sc} *")
self.text_edit.tag_add(FIND_TAGS[t], idxo, idx_end)
idxo = idx_end
# print(f"A5 {idxo} {idx_end} {t} {so} {sc}")
self.config_tags()
# usa la prima rag per spostrae il teso alla sua posiziione
tag_lst = self.text_edit.tag_ranges(FIND_TAGS[0])
t0 = tag_lst[0] if len(tag_lst) > 0 else "1.0"
self.text_edit.see(t0)
###############
# controllo tags
# print(f"tag a:{t0} {len(tag_lst)}")
n = len(m_lst)
if self.tag_num_debug != n:
log_err.log("Error")
for x in m_lst:
xs = x['s']
le = min(60, len(xs))
txt=xs[0:le]
s=f"{txt.strip()}"
#log_err.log(s)
self.show_log_lift(s, True)
#log_err.log(f"{self.tag_num_debug}")
self.show_log_lift(f"{self.tag_num_debug}", True)
def fin_entity_comma(self):
text=self.text_edit.get('1.0', tk.END)
txt_wrk=clean_text(text)
m_lst=chk.check_entitys(txt_wrk)
self.add_tags(m_lst)
def find_entity_brackets(self):
text=self.text_edit.get('1.0', tk.END)
txt_wrk=clean_text(text)
m_lst=chk.check_entity_brackets(txt_wrk)
self.add_tags(m_lst)
def read_teimtags_set(self):
"""Lettura set di tags da teimtag.csv
"""
DELIMITER='|'
lst=[]
rows=self.path_entity_csv_p.open().readlines()
for row in rows:
if row.strip() == '':
continue
if row[0] == '#':
continue
cols=row.split(DELIMITER)
if len(cols) < 3:
continue
tag_name=cols[1].strip()
lst.append(tag_name)
tag_set=set(lst)
return tag_set
def find_entity_undefined(self):
text=self.text_edit.get('1.0', tk.END)
txt_wrk=clean_text(text)
m_lst=chk.check_entitys(txt_wrk)
tag_set=self.read_teimtags_set()
err_lst=[]
for item in m_lst:
tag=item.get('s', '').replace('&', '').replace(';', '').strip()
if tag in tag_set:
continue
item['t']=1
err_lst.append(item)
self.add_tags(err_lst)
def find_over(self, po, pc, *args):
# FIXME controlla tag [] e [_ _]
text=self.text_edit.get('1.0', tk.END)
txt_wrk=clean_text(text)
m_lst=chk.check_overflow(txt_wrk, po, pc)
self.add_tags(m_lst)
def find_form_to(self, po, pc):
# contolla tags di tipo overflow {}
text=self.text_edit.get('1.0', tk.END)
txt_wrk=clean_text(text)
m_lst=chk.check_overflow(txt_wrk, po, pc)
self.add_tags_from_to(m_lst)
#############
# UA menu elab
#############
def elab_teimxml(self):
msg=self.get_edit_text()
self.write_file(self.path_teim_in_s, msg)
try:
do_main_xml(self.path_teim_in_s,
self.path_entity_csv_s)
except SystemExit as e:
msg=f"ERROR Elab entities\n{e}"
log_err.log(msg)
self.show_log_top(msg, True)
return
msg=self.read_file(self.path_teim_log_s)
self.show_win1(msg)
ls=["Elab. entity",
f"{self.path_teim_in_s}",
f"{self.path_teim_out_s}",
f"{self.path_entity_csv_s}"]
self.show_log_lift(os.linesep.join(ls), True)
def elab_teimsetid(self):
if not ptu.exists(self.path_id_in_s):
self.top_free()
mbox.showinfo("", f"Before Elab. Entity", parent=self.win0)
return
try:
last=do_main_setid(self.path_id_in_s, self.path_xmlid_csv_s)
except SystemExit as e:
s=f"Errro in set id{os.linesep} {e}"
log_err.log(s)
self.show_log_top(s, True)
return
ls=["Elab. Set id",
f"{self.path_id_in_s}",
f"{self.path_id_out_s}",
f"{self.path_xmlid_csv_s}",
last]
self.show_log_lift(os.linesep.join(ls), True)
def elab_teimover(self):
if not ptu.exists(self.path_over_in_s):
self.top_free()
mbox.showinfo("", f"Before Elab. Set id", parent=self.win0)
return
try:
do_main_over(self.path_over_in_s, self.path_over_csv_s)
except SystemExit as e:
msg=f"Elab. Overflow {os.linesep} {e}"
log_err.log(msg)
self.show_log_top(msg, True)
return
ls=["Elab. overflow",
f"{self.path_over_in_s}",
f"{self.path_over_out_s}",
f"{self.path_over_csv_s}"]
self.show_log_lift(os.linesep.join(ls), True)
def elab_teimnote(self):
if not ptu.exists(self.path_note_in_s):
self.top_free()
mbox.showinfo("", f"Before Elab. Overflow", parent=self.win0)
return
try:
do_main_note(self.path_note_in_s,
self.path_note_csv_s)
except SystemExit as e:
msg=f"Elab. note {os.linesep} {e}"
log_err.log(msg)
self.show_log_top(msg, True)
return
ls=["Elab. Note",
f"{self.path_note_in_s}",
f"{self.path_note_out_s}",
f"{self.path_note_csv_s}"]
self.show_log_lift(os.linesep.join(ls), True)
# format xml
if not ptu.exists(self.path_xml_in_s):
self.top_free()
mbox.showinfo("", f"Error Elab. Note", parent=self.win0)
return
src=self.read_file(self.path_xml_in_s)
self.format_xml(src,
self.path_xml_out_s,
self.path_xml_err_s,
True)
def elab_tei(self):
if not ptu.exists(self.path_tei_in_s):
self.top_free()
mbox.showinfo("", f"Before Elab. Note", parent=self.win0)
return
try:
# tei_head = self.read_file(self.path_tei_head_s)
xml=self.read_file(self.path_tei_in_s)
# src_xml_tei = tei_head.replace(XML_MANO, xml)
xml_tei=add_tei_head(xml)
except SystemExit as e:
msg=f"Elab. XML-TEI {os.linesep} {e}"
log_err.log(msg)
self.show_log_top(msg, True)
return
ls=[" Elab. XML-TEI",
f"{self.path_tei_in_s}",
f"{self.path_tei_out_s}"]
self.show_log_lift(os.linesep.join(ls), True)
# self.write_file(self.path_tei_out_s,xml_tei)
self.format_xml(xml_tei,
self.path_tei_out_s,
self.path_tei_err_s,
False)
def format_xml(self, src, path_out, path_err, add_div=True):
def make_xml_err(xml, err):
m=re.search(r'(line )([0-9]+)(,)', err)
if m is not None:
s=m.group(2)
n=int(s)
else:
n=-1
rows=xml.split(os.linesep)
for i, row in enumerate(rows):
rows[i]=f'{i+1}){row}'
if i+1 == n:
rows[i]=f'\nERRROR\n{rows[i]}\n{err}\n'
xml_num=os.linesep.join(rows)
xml_err="ERROR xml\n"+err+"\n\n"+xml_num
return xml_err
try:
if add_div:
src=f'<div>{src}</div>'
src_bytes=src.encode(encoding='utf-8')
parser=etree.XMLParser(remove_blank_text=True)
root=etree.XML(src_bytes, parser)
xml=etree.tostring(root,
method='xml',
xml_declaration=False,
encoding='unicode',
with_tail=True,
pretty_print=True,
standalone=None,
doctype=None,
exclusive=False,
inclusive_ns_prefixes=None,
strip_text=False)
except etree.ParseError as e:
msg=f"ParseError format_xml()\n{e}"
set_trace
log_err.log(msg)
xml_err=make_xml_err(src, str(e))
self.show_log_top(xml_err, False)
self.write_file(path_err, xml_err)
return
except Exception as e:
msg=f"ERROR format_xml()\n{e}"
log_err.log(msg)
self.show_log_top(msg, False)
return
self.show_win2(xml)
self.write_file(path_out, xml)
def elab_xml2txt(self):
if not ptu.exists(self.path_xml2txt_in_s):
mbox.showinfo("", f"Before Elab. Note", parent=self.win0)
return
try:
do_main_xml2txt(self.path_xml2txt_in_s,
self.path_xml2txt_out_s)
except Exception as e:
msg=f"ERROR Elab. xml2txt()\n{e} "
log_err.log(msg)
self.show_log(msg, True)
mbox.showerror("", f"{e}")
return
ls=["XML => text",
f"{self.path_xml2txt_in_s}",
f"{self.path_xml2txt_out_s}"]
self.show_log_lift(os.linesep.join(ls), True)
# text = self.read_file(self.path_xml2txt_out_s)
# self.show_win2(text)
def elab_all(self):
self.remove_log()
self.elab_teimxml()
self.elab_teimsetid()
self.elab_teimover()
self.elab_teimnote()
self.elab_tei()
self.elab_xml2txt()
##############
# UA menu del
##############
def delete_txt_all(self):
self.delete_txt1()
self.delete_txt2()
self.delete_txt3()
def delete_txt1(self):
if self.txt1 is not None:
self.txt1.delete('1.0', tk.END)
def delete_txt2(self):
if self.txt2 is not None:
self.txt2.delete('1.0', tk.END)
def delete_txt3(self):
if self.txt3 is not None:
self.txt3.delete('1.0', tk.END)
def remove_log(self):
path_lst=ptu.list_path(self.log_dir_p)
for p in path_lst:
p.unlink()
##############
# UA menu log
##############
def show_checktxt(self):
self.read_log_file(self.path_checktxt_out_s)
def show_checkover(self):
self.read_log_file(self.path_checkover_out_s)
#
def show_entity_out(self):
self.read_log_file(self.path_teim_out_s)
def show_entity_log(self):
self.read_log_file(self.path_teim_log_s)
def show_entity_err(self):
self.read_log_file(self.path_teim_err_s)
#
def show_setwid_out(self):
format_path=self.path_id_out_s.replace("_id.xml", "_id_format.xml")
# AAA self.read_log_file(self.path_id_out_s)
self.read_log_file(format_path)
def show_setwid_log(self):
self.read_log_file(self.path_id_log_s)
def show_setwid_err(self):
self.read_log_file(self.path_id_err_s)
#
def show_over_out(self):
format_path=self.path_over_out_s.replace(
"_over.xml", "_over_format.xml")
# AAA self.read_log_file(self.path_over_out_s)
self.read_log_file(format_path)
def show_over_log(self):
self.read_log_file(self.path_over_log_s)
def show_over_err(self):
self.read_log_file(self.path_over_err_s)
#
def show_note_out(self):
format_path=self.path_note_out_s.replace(
"_note.xml", "_note_format.xml")
# AAA self.read_log_file(self.path_note_out_s)
self.read_log_file(format_path)
# def show_note_log(self):
# self.read_log_file(self.path_note_log_s)
def show_note_err(self):
self.read_log_file(self.path_note_err_s)
#
def show_xml_out(self):
self.read_log_file(self.path_xml_out_s)
def show_xml_err(self):
self.read_log_file(self.path_xml_err_s)
#
def show_tei_out(self):
self.read_log_file(self.path_tei_out_s)
def show_tei_err(self):
self.read_log_file(self.path_tei_err_s)
#
def show_xml2txt_out(self):
self.read_log_file(self.path_xml2txt_out_s)
def show_xml2txt_err(self):
self.read_log_file(self.path_xml2txt_err_s)
#
def show_edit_err(self):
self.read_log_file(self.path_edit_err_s)
def open_log(self):
self.top_free()
path=fdialog.askopenfilename(
parent=self.win0,
title='log',
initialdir=self.log_dir_p,
filetypes=[("all", "*.*"),
("log", "*.log"),
("text", "*.txt"),
("xml", "*.xml")])
if len(path) < 1:
return
# controllo probabilmente inutile
path=ptu.str2path(path)
if path.exists():
s=self.read_file(path)
self.show_log_top(s)
else:
self.top_free()
mbox.showinfo("", f"Not Foud", parent=self.win0)
#############
# UA menu help
############
def help_paths(self):
self.show_psths()
self.top_w3()
def show_psths(self):
try:
wrk_dir=ptu.path2str(self.pwd)
# parent_teimcfg = self.parent_teimcfg_p.absolute()
teimcfg=ptu.path2str(self.path_teimcfg_p.absolute())
# root_dir = self.root_dir_p.absolute()
info=[
"===========================",
f"FILE TEXT : {self.path_text_s}",
"===========================",
f"match : {self.path_match_s}",
f"work dir : {wrk_dir} ",
f"teimcfg : {teimcfg}",
"---------------------------",
f"teimed tags : {self.path_entity_csv_s}",
f"overflow tags : {self.path_over_csv_s}",
f"xmlid tags : {self.path_xmlid_csv_s}",
f"note : {self.path_note_csv_s}",
"---------------------------",
f"chek txt : {self.path_checktxt_out_s}",
f"check over : {self.path_checkover_out_s}",
"",
f"elab entity : {self.path_teim_out_s}",
f"log entity : {self.path_teim_log_s}",
f"ERR entity : {self.path_teim_err_s}",
"",
f"elab set id : {self.path_id_out_s}",
f"log set id : {self.path_id_log_s}",
f"ERR set id : {self.path_id_err_s}",
"",
f"elab over : {self.path_over_out_s}",
f"log over : {self.path_over_log_s}",
f"ERR over : {self.path_over_err_s}",
"",
f"elab note : {self.path_note_out_s}",
f"ERR note : {self.path_note_err_s}",
"",
f"elab XML : {self.path_xml_out_s}",
f"ERR XML : {self.path_xml_err_s}",
"===========================",
f"elab XML-TEI : {self.path_tei_out_s}",
f"ERR XML-TEI : {self.path_tei_err_s}",
"===========================",
f"elab text : {self.path_xml2txt_out_s}",
f"err text : {self.path_xml2txt_err_s}",
"---------------------------",
]
s=os.linesep.join(info)
self.show_log(s)
except Exception as e:
log_err.log(e)
raise(Exception(f"ERROR show_paths()\{e}"))
def help_options(self):
s=HELP_RUN_OPS()
self.show_log_top(s)
def show_log_top(self, msg, append=False):
self.show_log(msg, append)
self.top_w3()
def show_log_lift(self, msg, append=False):
self.show_log(msg, append)
self.win3.lift()
def show_log(self, msg, append=False):
msg="" if msg is None else msg
if append:
r=os.linesep.join(["", "", msg])
self.txt3.insert(tk.END, r)
self.txt3.see(tk.END)
else:
r=os.linesep.join(["", msg])
self.txt3.delete('1.0', tk.END)
self.txt3.insert('1.0', r)
#################################
# UA rc
################################
def rc_get_recenti(self):
lst=rc.get(RC_RECENTI, [])
return lst
def rc_update(self):
path=self.path_text_s
lst=rc.get(RC_RECENTI, [])
lst.append(path)
lst=list(set(lst))
rc.set('recenti', lst)
rc.set(RC_PATH_LAST, self.path_text_s)
# rc.prn('update')
def rc_save(self):
# 3 73
# 1000x600+56+196",
# 1000 500+50+196
# widthxheight*X*Y
def geometry(win):
wg=win.winfo_geometry()
wd, hxy=wg.split('x')
he, x, y=hxy.split('+')
if os_name == 'linux':
x=str(int(x)-3)
y=str(int(y)-73)
return f'{wd}x{he}+{x}+{y}'
rc.set('win0', geometry(self.win0))
rc.set('win1', geometry(self.win1))
rc.set('win2', geometry(self.win2))
rc.set('win3', geometry(self.win3))
rc.set(RC_PARENT_TEIMCFG, self.parent_teimcfg_s)
self.rc_update()
rc.save()
# rc.prn("save")
def do_main(parent_teimcfg, root_dir, path_match):
rc.load()
# rc.prn("rc.load")
if parent_teimcfg is None:
parent_teimcfg=rc.get_val(RC_PARENT_TEIMCFG)
if path_match is None:
path_match=rc.get_val(RC_PATH_LAST)
if path_match is None and parent_teimcfg is None:
return
tme=TeimEdit(parent_teimcfg, root_dir, path_match)
tme.open_win0()
def prn_help():
print(HELP_RUN_OPS())
if __name__ == "__main__":
le=len(sys.argv)
parser=argparse.ArgumentParser()
parser.add_argument('-c',
dest="parent_cfg",
required=False,
default=None,
type=str,
metavar="teimcfg ancestor",
help="-c <dir_ancestor_of_teimcfg>")
parser.add_argument('-t',
dest="path_match",
required=False,
default=None,
type=str,
metavar="files match",
help="-t <file>.txt / <files_ match>")
parser.add_argument('-x',
action="store_true",
required=False,
help="-x => print Help")
parser.add_argument('-e',
action="store_true",
required=False,
help="-e => delete history")
args=parser.parse_args()
# set_trace()
if le == 1:
if rc.is_empty():
print(f"\nauthor: {__author__}")
print(f"{__date__} { __version__}")
parser.print_help()
sys.exit()
# do_main(None, PATH_ROOT_DIR, None)
if args.x:
prn_help()
sys.exit()
if args.e:
rc.remove()
if args.path_match:
args.path_match=args.path_match.replace('$', '*')
print(f"{__date__} { __version__}")
do_main(args.parent_cfg, PATH_ROOT_DIR, args.path_match)
| 35.341943 | 89 | 0.54949 |
46fcfa76bc605cf66195434120e084f9bd607f0d | 5,857 | py | Python | tests/test_events/test_events_cloudformation.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 5,460 | 2015-01-01T01:11:17.000Z | 2022-03-31T23:45:38.000Z | tests/test_events/test_events_cloudformation.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 4,475 | 2015-01-05T19:37:30.000Z | 2022-03-31T13:55:12.000Z | tests/test_events/test_events_cloudformation.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 1,831 | 2015-01-14T00:00:44.000Z | 2022-03-31T20:30:04.000Z | import pytest
import copy
from string import Template
import boto3
import json
from botocore.exceptions import ClientError
from moto import mock_cloudformation, mock_events
import sure # noqa # pylint: disable=unused-import
from moto.core import ACCOUNT_ID
archive_template = Template(
json.dumps(
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "EventBridge Archive Test",
"Resources": {
"Archive": {
"Type": "AWS::Events::Archive",
"Properties": {
"ArchiveName": "${archive_name}",
"SourceArn": {
"Fn::Sub": "arn:aws:events:$${AWS::Region}:$${AWS::AccountId}:event-bus/default"
},
},
}
},
"Outputs": {
"Arn": {
"Description": "Archive Arn",
"Value": {"Fn::GetAtt": ["Archive", "Arn"]},
}
},
}
)
)
rule_template = Template(
json.dumps(
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "EventBridge Rule Test",
"Resources": {
"Rule": {
"Type": "AWS::Events::Rule",
"Properties": {
"Name": "${rule_name}",
"EventPattern": {"detail-type": ["SomeDetailType"]},
},
}
},
"Outputs": {
"Arn": {
"Description": "Rule Arn",
"Value": {"Fn::GetAtt": ["Rule", "Arn"]},
}
},
}
)
)
empty = json.dumps(
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "EventBridge Rule Test",
"Resources": {},
}
)
@mock_events
@mock_cloudformation
def test_create_archive():
# given
cfn_client = boto3.client("cloudformation", region_name="eu-central-1")
name = "test-archive"
stack_name = "test-stack"
template = archive_template.substitute({"archive_name": name})
# when
cfn_client.create_stack(StackName=stack_name, TemplateBody=template)
# then
archive_arn = "arn:aws:events:eu-central-1:{0}:archive/{1}".format(ACCOUNT_ID, name)
stack = cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0]
stack["Outputs"][0]["OutputValue"].should.equal(archive_arn)
events_client = boto3.client("events", region_name="eu-central-1")
response = events_client.describe_archive(ArchiveName=name)
response["ArchiveArn"].should.equal(archive_arn)
@mock_events
@mock_cloudformation
def test_update_archive():
# given
cfn_client = boto3.client("cloudformation", region_name="eu-central-1")
name = "test-archive"
stack_name = "test-stack"
template = archive_template.substitute({"archive_name": name})
cfn_client.create_stack(StackName=stack_name, TemplateBody=template)
template_update = copy.deepcopy(json.loads(template))
template_update["Resources"]["Archive"]["Properties"][
"Description"
] = "test archive"
# when
cfn_client.update_stack(
StackName=stack_name, TemplateBody=json.dumps(template_update)
)
# then
events_client = boto3.client("events", region_name="eu-central-1")
response = events_client.describe_archive(ArchiveName=name)
response["ArchiveArn"].should.equal(
"arn:aws:events:eu-central-1:{0}:archive/{1}".format(ACCOUNT_ID, name)
)
response["Description"].should.equal("test archive")
@mock_events
@mock_cloudformation
def test_delete_archive():
# given
cfn_client = boto3.client("cloudformation", region_name="eu-central-1")
name = "test-archive"
stack_name = "test-stack"
template = archive_template.substitute({"archive_name": name})
cfn_client.create_stack(StackName=stack_name, TemplateBody=template)
# when
cfn_client.delete_stack(StackName=stack_name)
# then
events_client = boto3.client("events", region_name="eu-central-1")
response = events_client.list_archives(NamePrefix="test")["Archives"]
response.should.have.length_of(0)
@mock_events
@mock_cloudformation
def test_create_rule():
# given
cfn_client = boto3.client("cloudformation", region_name="eu-central-1")
name = "test-rule"
stack_name = "test-stack"
template = rule_template.substitute({"rule_name": name})
# when
cfn_client.create_stack(StackName=stack_name, TemplateBody=template)
# then
rule_arn = "arn:aws:events:eu-central-1:{0}:rule/{1}".format(ACCOUNT_ID, name)
stack = cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0]
stack["Outputs"][0]["OutputValue"].should.equal(rule_arn)
events_client = boto3.client("events", region_name="eu-central-1")
response = events_client.describe_rule(Name=name)
response["Arn"].should.equal(rule_arn)
response["EventPattern"].should.equal('{"detail-type": ["SomeDetailType"]}')
@mock_events
@mock_cloudformation
def test_delete_rule():
# given
cfn_client = boto3.client("cloudformation", region_name="eu-central-1")
name = "test-rule"
stack_name = "test-stack"
template = rule_template.substitute({"rule_name": name})
cfn_client.create_stack(StackName=stack_name, TemplateBody=template)
# when
cfn_client.update_stack(StackName=stack_name, TemplateBody=empty)
# then
events_client = boto3.client("events", region_name="eu-central-1")
with pytest.raises(ClientError) as exc:
events_client.describe_rule(Name=name)
err = exc.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.equal("Rule test-rule does not exist.")
| 30.664921 | 108 | 0.619942 |
fd54d87fc0f387ea49e4fb6a47d4b1dec8ae9230 | 5,148 | py | Python | A2C/task0.py | JohnJim0816/rl-tutorials | e99daea815da85f9f25dff2d01b030249a203d22 | [
"MIT"
] | 16 | 2021-04-07T03:03:59.000Z | 2021-10-05T17:49:45.000Z | A2C/task0.py | JohnJim0816/rl-tutorials | e99daea815da85f9f25dff2d01b030249a203d22 | [
"MIT"
] | 1 | 2021-07-20T02:46:23.000Z | 2021-10-04T14:57:18.000Z | A2C/task0.py | JohnJim0816/rl-tutorials | e99daea815da85f9f25dff2d01b030249a203d22 | [
"MIT"
] | 8 | 2021-04-09T03:24:12.000Z | 2021-10-10T06:41:16.000Z | import sys
import os
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
parent_path = os.path.dirname(curr_path) # 父路径
sys.path.append(parent_path) # 添加路径到系统路径
import gym
import numpy as np
import torch
import torch.optim as optim
import datetime
from common.multiprocessing_env import SubprocVecEnv
from A2C.agent import ActorCritic
from common.utils import save_results, make_dir
from common.utils import plot_rewards
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
algo_name = 'A2C' # 算法名称
env_name = 'CartPole-v0' # 环境名称
class A2CConfig:
def __init__(self) -> None:
self.algo_name = algo_name# 算法名称
self.env_name = env_name # 环境名称
self.n_envs = 8 # 异步的环境数目
self.gamma = 0.99 # 强化学习中的折扣因子
self.hidden_dim = 256
self.lr = 1e-3 # learning rate
self.max_frames = 30000
self.n_steps = 5
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class PlotConfig:
def __init__(self) -> None:
self.algo_name = algo_name # 算法名称
self.env_name = env_name # 环境名称
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测GPU
self.result_path = curr_path+"/outputs/" + self.env_name + \
'/'+curr_time+'/results/' # 保存结果的路径
self.model_path = curr_path+"/outputs/" + self.env_name + \
'/'+curr_time+'/models/' # 保存模型的路径
self.save = True # 是否保存图片
def make_envs(env_name):
def _thunk():
env = gym.make(env_name)
env.seed(2)
return env
return _thunk
def test_env(env,model,vis=False):
state = env.reset()
if vis: env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(cfg.device)
dist, _ = model(state)
next_state, reward, done, _ = env.step(dist.sample().cpu().numpy()[0])
state = next_state
if vis: env.render()
total_reward += reward
return total_reward
def compute_returns(next_value, rewards, masks, gamma=0.99):
R = next_value
returns = []
for step in reversed(range(len(rewards))):
R = rewards[step] + gamma * R * masks[step]
returns.insert(0, R)
return returns
def train(cfg,envs):
print('开始训练!')
print(f'环境:{cfg.env_name}, 算法:{cfg.algo}, 设备:{cfg.device}')
env = gym.make(cfg.env_name) # a single env
env.seed(10)
state_dim = envs.observation_space.shape[0]
action_dim = envs.action_space.n
model = ActorCritic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device)
optimizer = optim.Adam(model.parameters())
frame_idx = 0
test_rewards = []
test_ma_rewards = []
state = envs.reset()
while frame_idx < cfg.max_frames:
log_probs = []
values = []
rewards = []
masks = []
entropy = 0
# rollout trajectory
for _ in range(cfg.n_steps):
state = torch.FloatTensor(state).to(cfg.device)
dist, value = model(state)
action = dist.sample()
next_state, reward, done, _ = envs.step(action.cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(cfg.device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(cfg.device))
state = next_state
frame_idx += 1
if frame_idx % 100 == 0:
test_reward = np.mean([test_env(env,model) for _ in range(10)])
print(f"frame_idx:{frame_idx}, test_reward:{test_reward}")
test_rewards.append(test_reward)
if test_ma_rewards:
test_ma_rewards.append(0.9*test_ma_rewards[-1]+0.1*test_reward)
else:
test_ma_rewards.append(test_reward)
# plot(frame_idx, test_rewards)
next_state = torch.FloatTensor(next_state).to(cfg.device)
_, next_value = model(next_state)
returns = compute_returns(next_value, rewards, masks)
log_probs = torch.cat(log_probs)
returns = torch.cat(returns).detach()
values = torch.cat(values)
advantage = returns - values
actor_loss = -(log_probs * advantage.detach()).mean()
critic_loss = advantage.pow(2).mean()
loss = actor_loss + 0.5 * critic_loss - 0.001 * entropy
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('完成训练!')
return test_rewards, test_ma_rewards
if __name__ == "__main__":
cfg = A2CConfig()
plot_cfg = PlotConfig()
envs = [make_envs(cfg.env_name) for i in range(cfg.n_envs)]
envs = SubprocVecEnv(envs)
# 训练
rewards,ma_rewards = train(cfg,envs)
make_dir(plot_cfg.result_path,plot_cfg.model_path)
save_results(rewards, ma_rewards, tag='train', path=plot_cfg.result_path) # 保存结果
plot_rewards(rewards, ma_rewards, plot_cfg, tag="train") # 画出结果
| 37.035971 | 91 | 0.620435 |
c2f80e4c42036a725fca434be7043b0c6a5a0d16 | 3,510 | py | Python | archives/scope/data_loader/data_sets/geneva_stroke_outcome_dataset.py | JulianKlug/scop | b0d6a805a11ee8b4d0f53a4d6a5ec402988298e4 | [
"MIT"
] | null | null | null | archives/scope/data_loader/data_sets/geneva_stroke_outcome_dataset.py | JulianKlug/scop | b0d6a805a11ee8b4d0f53a4d6a5ec402988298e4 | [
"MIT"
] | 2 | 2021-05-13T08:13:06.000Z | 2021-05-13T08:13:30.000Z | archives/scope/data_loader/data_sets/geneva_stroke_outcome_dataset.py | JulianKlug/scope | b0d6a805a11ee8b4d0f53a4d6a5ec402988298e4 | [
"MIT"
] | null | null | null | import torch
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
class GenevaStrokeOutcomeDataset(Dataset):
"""Geneva Clinical Stroke Outcome Dataset."""
def __init__(self, imaging_dataset_path, outcome_file_path, channels, outcome, transform=None, preload_data=True):
"""
"""
self.imaging_dataset_path = imaging_dataset_path
self.params = np.load(imaging_dataset_path, allow_pickle=True)['params']
self.channels = channels
self.ids = np.load(imaging_dataset_path, allow_pickle=True)['ids']
outcomes_df = pd.read_excel(outcome_file_path)
raw_labels = [outcomes_df.loc[outcomes_df['anonymised_id'] == subj_id, outcome].iloc[0] for
subj_id in self.ids]
self.raw_labels = torch.tensor(raw_labels).long()
try:
print('Using channels:', [self.params.item()['ct_sequences'][channel] for channel in channels])
except:
print('Geneva Stroke Dataset (perfusion CT maps) parameters: ', self.params)
# data augmentation
self.transform = transform
# data load into the ram memory
self.preload_data = preload_data
if self.preload_data:
print('Preloading the dataset ...')
# select only from data available for this split
self.raw_images = np.load(imaging_dataset_path, allow_pickle=True)['ct_inputs'][..., self.channels]
self.raw_masks = np.load(imaging_dataset_path, allow_pickle=True)['brain_masks']
self.raw_masks = np.expand_dims(self.raw_masks, axis=-1)
if self.raw_images.ndim < 5:
self.raw_images = np.expand_dims(self.raw_images, axis=-1)
# Apply masks
self.raw_images = self.raw_images * self.raw_masks
assert len(self.raw_images) == len(self.raw_labels)
print('Loading is done\n')
def get_ids(self, indices):
return [self.ids[index] for index in indices]
def __getitem__(self, index):
'''
Return sample at index
:param index: int
:return: sample (c, x, y, z)
'''
# load the images
if not self.preload_data:
input = np.load(self.imaging_dataset_path, allow_pickle=True)['ct_inputs'][index, ..., self.channels]
mask = np.load(self.imaging_dataset_path, allow_pickle=True)['brain_masks'][index]
# Make sure there is a channel dimension
mask = np.expand_dims(mask, axis=-1)
if input.ndim < 5:
input = np.expand_dims(input, axis=-1)
# Apply masks
input = input * mask
# Remove first dimension
input = np.squeeze(input, axis=0)
else:
# With preload, it is already only the images from a certain split that are loaded
input = self.raw_images[index]
target = self.raw_labels[index]
id = self.ids[index]
# input = torch.from_numpy(input).permute(3, 0, 1, 2).to(torch.float32)
input = np.transpose(input, (3, 0, 1, 2))
# apply transformations
if self.transform:
# transiently transform into dictionary to use DKFZ augmentation
data_dict = {'data': input}
input = self.transform(**data_dict)['data']
input = torch.from_numpy(input).to(torch.float32)
return input, target, id
def __len__(self):
return len(self.ids) | 35.454545 | 118 | 0.6151 |
1285ad17989531ffe21075fa0b0fd733f5f0fce2 | 602 | py | Python | scripts/price_feed_scripts/01_deploy_price_consumer_v3.py | boitenoire-dev/dev-bootcamp-brownie | c4feb2a2d4bfd9da2048fe458e5f735dab64d30d | [
"MIT"
] | null | null | null | scripts/price_feed_scripts/01_deploy_price_consumer_v3.py | boitenoire-dev/dev-bootcamp-brownie | c4feb2a2d4bfd9da2048fe458e5f735dab64d30d | [
"MIT"
] | null | null | null | scripts/price_feed_scripts/01_deploy_price_consumer_v3.py | boitenoire-dev/dev-bootcamp-brownie | c4feb2a2d4bfd9da2048fe458e5f735dab64d30d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from brownie import PriceFeedConsumer, accounts
from scripts.helpful_scripts import (
get_verify_status,
get_account,
get_contract,
)
def deploy_price_feed_consumer():
account = get_account()
eth_usd_price_feed_address = get_contract("eth_usd_price_feed").address
price_feed = PriceFeedConsumer.deploy(
eth_usd_price_feed_address,
{"from": account},
publish_source=get_verify_status(),
)
print(f"The current price of ETH is {price_feed.getLatestPrice()}")
return price_feed
def main():
deploy_price_feed_consumer()
| 25.083333 | 75 | 0.729236 |
77dd047693e90b7e0ac103e5286bfc5d2f523b50 | 324 | py | Python | events/admin.py | reddevilcero/SchoolWeb | 7adb0d6a82a803aab3e06ebedfcb022ab758c56e | [
"MIT"
] | null | null | null | events/admin.py | reddevilcero/SchoolWeb | 7adb0d6a82a803aab3e06ebedfcb022ab758c56e | [
"MIT"
] | null | null | null | events/admin.py | reddevilcero/SchoolWeb | 7adb0d6a82a803aab3e06ebedfcb022ab758c56e | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Event, EventImage
# Register your models here.
class EventImageInline(admin.TabularInline):
model = EventImage
extra = 3
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
readonly_fields = ['created', 'updated']
inlines = [EventImageInline]
| 20.25 | 44 | 0.743827 |
7ad021760a7953cb5c1d123a3103d7aa5c7b50d9 | 3,646 | py | Python | project_generator/tools_supported.py | GeneKong/project_generator | 84681246d236a39f22410988afc2b4593b805829 | [
"Apache-2.0"
] | null | null | null | project_generator/tools_supported.py | GeneKong/project_generator | 84681246d236a39f22410988afc2b4593b805829 | [
"Apache-2.0"
] | null | null | null | project_generator/tools_supported.py | GeneKong/project_generator | 84681246d236a39f22410988afc2b4593b805829 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tools.iar import IAREmbeddedWorkbench
from .tools.uvision import Uvision, Uvision5
from .tools.coide import Coide
from .tools.eclipse import EclipseMakeGccARM
from .tools.gnu_mcu_eclipse import EclipseGnuMCU
from .tools.gccarm import MakefileGccArm
from .tools.makearmcc import MakefileArmcc
from .tools.sublimetext import SublimeTextMakeGccARM
from .tools.gdb import GDB, ARMNoneEABIGDB, JLinkGDB
from .tools.cmake import CMakeGccArm
from .tools.visual_studio import VisualStudioMakeGCCARM, VisualStudioGDB
class ToolsSupported:
""" Represents all tools available """
# Default tools - aliases
TOOLS_ALIAS = {
'uvision': 'uvision5',
'iar': 'iar_arm',
'make_armcc': 'armcc',
'make_gcc_arm': 'gcc_arm',
'eclipse': 'eclipse_make_gcc_arm',
'gnu_mcu': 'gnu_mcu_eclipse',
'sublime': 'sublime_make_gcc_arm',
'sublime_text': 'sublime_make_gcc_arm',
'visual_studio': 'visual_studio_make_gcc_arm',
'cmake': 'cmake_gcc_arm',
}
# Tools dictionary
# Each of this tool needs to support at least:
# - get_toolchain (toolchain is a list of toolchains supported by tool)
# - get_toolname (returns name string)
# - export_project (basic functionality to be covered by a tool)
TOOLS_DICT = {
'iar_arm': IAREmbeddedWorkbench,
'uvision4': Uvision,
'uvision5': Uvision5,
'coide': Coide,
'gcc_arm': MakefileGccArm,
'armcc': MakefileArmcc,
'eclipse_make_gcc_arm': EclipseMakeGccARM,
'gnu_mcu_eclipse': EclipseGnuMCU,
'sublime_make_gcc_arm': SublimeTextMakeGccARM,
'gdb': GDB,
'arm_none_eabi_gdb': ARMNoneEABIGDB,
'jlink_gdb': JLinkGDB,
'cmake_gcc_arm': CMakeGccArm,
'visual_studio_gdb': VisualStudioGDB,
'visual_studio_make_gcc_arm': VisualStudioMakeGCCARM,
}
TOOLCHAINS = list(set([v.get_toolchain() for k, v in TOOLS_DICT.items() if v.get_toolchain() is not None]))
TOOLS = list(set([v for k, v in TOOLS_DICT.items() if v is not None]))
def _get_tool_name(self, tool):
if tool in self.TOOLS_ALIAS.keys():
tool = self.TOOLS_ALIAS[tool]
return tool
def get_tool(self, tool):
name = self._get_tool_name(tool)
try:
return self.TOOLS_DICT[name]
except KeyError:
return None
def get_toolnames(self, tool):
name = self._get_tool_name(tool)
try:
return self.TOOLS_DICT[name].get_toolnames()
except KeyError:
return None
def get_toolchain(self, tool):
name = self._get_tool_name(tool)
try:
return self.TOOLS_DICT[name].get_toolchain()
except KeyError:
return None
def get_supported(self):
return list(self.TOOLS_DICT.keys()) + list(self.TOOLS_ALIAS.keys())
| 36.828283 | 111 | 0.644816 |
edb291817a2b04b3b5331f412cb6294ef038b66f | 8,077 | py | Python | madgraph/various/rambo.py | khurtado/MG5_aMC | 9cde676b0a1097058c416983017af257385fa375 | [
"NCSA"
] | 5 | 2018-10-23T14:37:18.000Z | 2021-11-22T20:59:02.000Z | madgraph/various/rambo.py | khurtado/MG5_aMC | 9cde676b0a1097058c416983017af257385fa375 | [
"NCSA"
] | 26 | 2018-10-08T15:49:32.000Z | 2020-05-15T13:33:36.000Z | madgraph/various/rambo.py | khurtado/MG5_aMC | 9cde676b0a1097058c416983017af257385fa375 | [
"NCSA"
] | 4 | 2019-02-18T11:42:18.000Z | 2021-11-11T20:46:08.000Z | from __future__ import division
import math
import random
class FortranList(list):
def __init__(self, min, max=None):
if max is None:
self.min = 1
self.max = min + 1
else:
self.min = min
self.max = max + 1
list.__init__(self,[0]*(self.max-self.min))
def __getitem__(self, index):
assert self.min <= index < self.max, 'outside range %s <= %s < %s' % (self.min, index, self.max)
return list.__getitem__(self, index - self.min)
def __setitem__(self, index, value):
assert self.min <= index < self.max
return list.__setitem__(self, index - self.min , value)
class DoubleFortranList(FortranList):
def __init__(self, min, max=(None,None)):
min1 = min[0]
max1 = max[0]
FortranList.__init__(self, min1, max1)
min2 = min[1]
max2 = max[1]
for i in range(len(self)):
list.__setitem__(self,i,FortranList(min2,max2))
def __getitem__(self, index):
var1 = index[0]
var2 = index[1]
list1 = FortranList.__getitem__(self, var1)
return list1.__getitem__(var2)
def __setitem__(self, index, value):
var1 = index[0]
var2 = index[1]
list1 = FortranList.__getitem__(self, var1)
list1.__setitem__(var2, value)
class RAMBOError(Exception):
""" A Error class for RAMBO routine """
pass
def RAMBO(N,ET,XM):
"""***********************************************************************
* RAMBO *
* RA(NDOM) M(OMENTA) B(EAUTIFULLY) O(RGANIZED) *
* *
* A DEMOCRATIC MULTI-PARTICLE PHASE SPACE GENERATOR *
* AUTHORS: S.D. ELLIS, R. KLEISS, W.J. STIRLING *
* -- ADJUSTED BY HANS KUIJF, WEIGHTS ARE LOGARITHMIC (20-08-90) *
* THIS IS PY VERSION 1.0 - WRITTEN BY O. MATTELAER *
* *
* N = NUMBER OF PARTICLES *
* ET = TOTAL CENTRE-OF-MASS ENERGY *
* XM = PARTICLE MASSES ( DIM=NEXTERNAL-nincoming ) *
* RETURN *
* P = PARTICLE MOMENTA ( DIM=(4,NEXTERNAL-nincoming) ) *
* WT = WEIGHT OF THE EVENT *
***********************************************************************"""
# RUN PARAMETER
acc = 1e-14
itmax = 6
ibegin = 0
iwarn = FortranList(5)
Nincoming = 2
# Object Initialization
Z = FortranList(N)
Q = DoubleFortranList((4,N))
P = DoubleFortranList((4,N))
R = FortranList(4)
B = FortranList(3)
XM2 = FortranList(N)
P2 = FortranList(N)
E = FortranList(N)
V= FortranList(N)
IWARN = [0,0]
# Check input object
assert isinstance(XM, FortranList)
assert XM.min == 1
assert XM.max == N+1
# INITIALIZATION STEP: FACTORIALS FOR THE PHASE SPACE WEIGHT
if not ibegin:
ibegin = 1
twopi = 8 * math.atan(1)
po2log = math.log(twopi/4)
Z[2] = po2log
for k in range(3, N+1):
Z[k] = Z[k-1] + po2log - 2.*math.log(k-2) - math.log(k-1)
# CHECK ON THE NUMBER OF PARTICLES
assert 1 < N < 101
# CHECK WHETHER TOTAL ENERGY IS SUFFICIENT; COUNT NONZERO MASSES
xmt = 0
nm = 0
for i in range(1,N+1):
if XM[i] != 0:
nm +=1
xmt += abs(XM[i])
if xmt > ET:
raise RAMBOError, ' Not enough energy in this case'
#
# THE PARAMETER VALUES ARE NOW ACCEPTED
#
# GENERATE N MASSLESS MOMENTA IN INFINITE PHASE SPACE
for i in range(1,N+1):
r1=random_nb(1)
c = 2 * r1 -1
s = math.sqrt(1 - c**2)
f = twopi * random_nb(2)
r1 = random_nb(3)
r2 = random_nb(4)
Q[(4,i)]=-math.log(r1*r2)
Q[(3,i)]= Q[(4,i)]*c
Q[(2,i)]=Q[(4,i)]*s*math.cos(f)
Q[(1,i)]=Q[(4,i)]*s*math.sin(f)
# CALCULATE THE PARAMETERS OF THE CONFORMAL TRANSFORMATION
for i in range(1, N+1):
for k in range(1,5):
R[k] = R[k] + Q[(k,i)]
rmas = math.sqrt(R[4]**2-R[3]**2-R[2]**2-R[1]**2)
for k in range(1,4):
B[k] = - R[k]/rmas
g = R[4] / rmas
a = 1.0 / (1+g)
x = ET / rmas
# TRANSFORM THE Q'S CONFORMALLY INTO THE P'S
for i in range(1, N+1):
bq = B[1]*Q[(1,i)]+B[2]*Q[(2,i)]+B[3]*Q[(3,i)]
for k in range(1,4):
P[k,i] = x*(Q[(k,i)]+B[k]*(Q[(4,i)]+a*bq))
P[(4,i)] = x*(g*Q[(4,i)]+bq)
# CALCULATE WEIGHT AND POSSIBLE WARNINGS
wt = po2log
if N != 2:
wt = (2 * N-4) * math.log(ET) + Z[N]
if wt < -180 and iwarn[1] < 5:
print "RAMBO WARNS: WEIGHT = EXP(%f20.9) MAY UNDERFLOW" % wt
iwarn[1] += 1
if wt > 174 and iwarn[2] < 5:
print " RAMBO WARNS: WEIGHT = EXP(%f20.9) MAY OVERFLOW" % wt
iwarn[2] += 1
# RETURN FOR WEIGHTED MASSLESS MOMENTA
if nm == 0:
return P, wt
# MASSIVE PARTICLES: RESCALE THE MOMENTA BY A FACTOR X
xmax = math.sqrt(1-(xmt/ET)**2)
for i in range(1,N+1):
XM2[i] = XM[i] **2
P2[i] = P[(4,i)]**2
n_iter = 0
x= xmax
accu = ET * acc
while 1:
f0 = -ET
g0 = 0
x2 = x**2
for i in range(1, N+1):
E[i] = math.sqrt(XM2[i]+x2*P2[i])
f0 += E[i]
g0 += P2[i]/E[i]
if abs(f0) <= accu:
break
n_iter += 1
if n_iter > itmax:
print "RAMBO WARNS: %s ITERATIONS DID NOT GIVE THE DESIRED ACCURACY = %s" \
%(n_iter , f0)
break
x=x-f0/(x*g0)
for i in range(1, N+1):
V[i] = x * P[(4,i)]
for k in range(1,4):
P[(k,i)] = x * P[(k,i)]
P[(4,i)] = E[i]
# CALCULATE THE MASS-EFFECT WEIGHT FACTOR
wt2 = 1.
wt3 = 0.
for i in range(1, N+1):
wt2 *= V[i]/E[i]
wt3 += V[i]**2/E[i]
wtm = (2.*N-3.)*math.log(x)+math.log(wt2/wt3*ET)
# RETURN FOR WEIGHTED MASSIVE MOMENTA
wt += wtm
if(wt < -180 and iwarn[3] < 5):
print " RAMBO WARNS: WEIGHT = EXP(%s) MAY UNDERFLOW" % wt
iwarn[3] += 1
if(wt > 174 and iwarn[4] > 5):
print " RAMBO WARNS: WEIGHT = EXP(%s) MAY OVERFLOW" % wt
iwarn[4] += 1
# RETURN
return P, wt
def random_nb(value):
""" random number """
output = 0
while output < 1e-16:
output= random.uniform(0,1)
return output
| 33.238683 | 139 | 0.396434 |
56f4b3ceb927dd6bb69449cf182700edbe189e7e | 1,041 | py | Python | tests/tools/test_persistent_reference_dict_merge.py | yaal-fr/sheraf | 9821a53d8b0ea0aba420175e4cfa81529262f88c | [
"MIT"
] | 1 | 2020-03-18T09:54:52.000Z | 2020-03-18T09:54:52.000Z | tests/tools/test_persistent_reference_dict_merge.py | yaal-fr/sheraf | 9821a53d8b0ea0aba420175e4cfa81529262f88c | [
"MIT"
] | null | null | null | tests/tools/test_persistent_reference_dict_merge.py | yaal-fr/sheraf | 9821a53d8b0ea0aba420175e4cfa81529262f88c | [
"MIT"
] | null | null | null | import pytest
from sheraf.tools.dicttools import DictConflictException, merge
from ZODB.ConflictResolution import PersistentReference as PR
def test_edited_once():
assert {"ref": PR("bar")} == merge(
{"ref": PR("foo")},
{"ref": PR("foo")},
{"ref": PR("bar")},
)
assert {"ref": PR("bar")} == merge(
{"ref": PR("foo")},
{"ref": PR("bar")},
{"ref": PR("foo")},
)
def test_added_once():
assert {"ref": PR("foo")} == merge(
{},
{"ref": PR("foo")},
{},
)
assert {"ref": PR("foo")} == merge(
{},
{},
{"ref": PR("foo")},
)
def test_deleted_twice():
assert {} == merge(
{"ref": PR("foo")},
{},
{},
)
def test_added_twice():
with pytest.raises(DictConflictException):
merge({}, {"foo": PR("DOH")}, {"foo": PR("NEH")})
def test_edited_twice():
with pytest.raises(DictConflictException):
merge({"foo": PR("ZBRLA")}, {"foo": PR("DOH")}, {"foo": PR("NEH")})
| 20.411765 | 75 | 0.48511 |
68074a9012f3a6b6eb7229cbf6790d4addaacc04 | 21,559 | py | Python | pyzoo/test/zoo/orca/learn/ray/tf/test_tf_ray_estimator.py | Elena-Qiu/analytics-zoo | deb1e42c20b1003345690152fd782f3177e8ee25 | [
"Apache-2.0"
] | null | null | null | pyzoo/test/zoo/orca/learn/ray/tf/test_tf_ray_estimator.py | Elena-Qiu/analytics-zoo | deb1e42c20b1003345690152fd782f3177e8ee25 | [
"Apache-2.0"
] | null | null | null | pyzoo/test/zoo/orca/learn/ray/tf/test_tf_ray_estimator.py | Elena-Qiu/analytics-zoo | deb1e42c20b1003345690152fd782f3177e8ee25 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
from unittest import TestCase
import numpy as np
import pytest
import tensorflow as tf
from zoo import init_nncontext
from zoo.orca.data import XShards
import zoo.orca.data.pandas
from zoo.orca.learn.tf2 import Estimator
from zoo.ray import RayContext
import ray
NUM_TRAIN_SAMPLES = 1000
NUM_TEST_SAMPLES = 400
import os
resource_path = os.path.join(
os.path.realpath(os.path.dirname(__file__)), "../../../../resources")
def linear_dataset(a=2, size=1000):
x = np.random.rand(size)
y = x / 2
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
return x, y
def create_train_datasets(config, batch_size):
import tensorflow as tf
x_train, y_train = linear_dataset(size=NUM_TRAIN_SAMPLES)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(NUM_TRAIN_SAMPLES).batch(
batch_size)
return train_dataset
def create_test_dataset(config, batch_size):
import tensorflow as tf
x_test, y_test = linear_dataset(size=NUM_TEST_SAMPLES)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(batch_size)
return test_dataset
def simple_model(config):
import tensorflow as tf
model = tf.keras.models.Sequential([tf.keras.layers.Dense(10, input_shape=(1,)),
tf.keras.layers.Dense(1)])
return model
def compile_args(config):
import tensorflow as tf
if "lr" in config:
lr = config["lr"]
else:
lr = 1e-3
args = {
"optimizer": tf.keras.optimizers.SGD(lr),
"loss": "mean_squared_error",
"metrics": ["mean_squared_error"]
}
return args
def model_creator(config):
model = simple_model(config)
model.compile(**compile_args(config))
return model
def identity_model_creator(config):
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(1)),
tf.keras.layers.Lambda(lambda x: tf.identity(x))
])
model.compile()
return model
def create_auto_shard_datasets(config, batch_size):
import tensorflow as tf
data_path = os.path.join(resource_path, "orca/learn/test_auto_shard/*.csv")
dataset = tf.data.Dataset.list_files(data_path)
dataset = dataset.interleave(lambda x: tf.data.TextLineDataset(x))
dataset = dataset.map(lambda x: tf.strings.to_number(x))
dataset = dataset.map(lambda x: (x, x))
dataset = dataset.batch(batch_size)
return dataset
def create_auto_shard_model(config):
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.identity(x))
])
return model
def create_auto_shard_compile_args(config):
import tensorflow as tf
def loss_func(y1, y2):
return tf.abs(y1[0] - y1[1]) + tf.abs(y2[0] - y2[1])
args = {
"optimizer": tf.keras.optimizers.SGD(lr=0.0),
"loss": loss_func,
}
return args
def auto_shard_model_creator(config):
model = create_auto_shard_model(config)
model.compile(**create_auto_shard_compile_args(config))
return model
class LRChecker(tf.keras.callbacks.Callback):
def __init__(self, *args):
super(LRChecker, self).__init__(*args)
self.warmup_lr = [0.16, 0.22, 0.28, 0.34, 0.4]
def on_epoch_end(self, epoch, logs=None):
current_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
print("epoch {} current lr is {}".format(epoch, current_lr))
if epoch < 5:
assert abs(current_lr - self.warmup_lr[epoch]) < 1e-5
elif 5 <= epoch < 10:
assert abs(current_lr - 0.4) < 1e-5
elif 10 <= epoch < 15:
assert abs(current_lr - 0.04) < 1e-5
elif 15 <= epoch < 20:
assert abs(current_lr - 0.004) < 1e-5
else:
assert abs(current_lr - 0.0004) < 1e-5
class TestTFRayEstimator(TestCase):
def impl_test_fit_and_evaluate(self, backend):
import tensorflow as tf
ray_ctx = RayContext.get()
batch_size = 32
global_batch_size = batch_size * ray_ctx.num_ray_nodes
if backend == "horovod":
trainer = Estimator.from_keras(
model_creator=simple_model,
compile_args_creator=compile_args,
verbose=True,
config=None,
backend=backend)
else:
trainer = Estimator.from_keras(model_creator=model_creator,
verbose=True,
config=None,
backend=backend,
workers_per_node=2)
# model baseline performance
start_stats = trainer.evaluate(create_test_dataset, batch_size=global_batch_size,
num_steps=NUM_TEST_SAMPLES // global_batch_size)
print(start_stats)
def scheduler(epoch):
if epoch < 2:
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (2 - epoch))
scheduler = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
# train for 2 epochs
trainer.fit(create_train_datasets, epochs=2, batch_size=global_batch_size,
steps_per_epoch=10, callbacks=[scheduler])
trainer.fit(create_train_datasets, epochs=2, batch_size=global_batch_size,
steps_per_epoch=10, callbacks=[scheduler])
# model performance after training (should improve)
end_stats = trainer.evaluate(create_test_dataset, batch_size=global_batch_size,
num_steps=NUM_TEST_SAMPLES // global_batch_size)
print(end_stats)
# sanity check that training worked
dloss = end_stats["validation_loss"] - start_stats["validation_loss"]
dmse = (end_stats["validation_mean_squared_error"] -
start_stats["validation_mean_squared_error"])
print(f"dLoss: {dloss}, dMSE: {dmse}")
assert dloss < 0 and dmse < 0, "training sanity check failed. loss increased!"
def test_fit_and_evaluate_tf(self):
self.impl_test_fit_and_evaluate(backend="tf2")
def test_fit_and_evaluate_horovod(self):
self.impl_test_fit_and_evaluate(backend="horovod")
def test_auto_shard_tf(self):
# file 1 contains all 0s, file 2 contains all 1s
# If shard by files, then each model will
# see the same records in the same batch.
# If shard by records, then each batch
# will have different records.
# The loss func is constructed such that
# the former case will return 0, and the latter
# case will return non-zero.
ray_ctx = RayContext.get()
trainer = Estimator.from_keras(
model_creator=auto_shard_model_creator,
verbose=True,
backend="tf2", workers_per_node=2)
stats = trainer.fit(create_auto_shard_datasets, epochs=1, batch_size=4, steps_per_epoch=2)
assert stats["train_loss"] == 0.0
def test_auto_shard_horovod(self):
# file 1 contains all 0s, file 2 contains all 1s
# If shard by files, then each model will
# see the same records in the same batch.
# If shard by records, then each batch
# will have different records.
# The loss func is constructed such that
# the former case will return 0, and the latter
# case will return non-zero.
ray_ctx = RayContext.get()
trainer = Estimator.from_keras(
model_creator=create_auto_shard_model,
compile_args_creator=create_auto_shard_compile_args,
verbose=True,
backend="horovod", workers_per_node=2)
stats = trainer.fit(create_auto_shard_datasets, epochs=1, batch_size=4, steps_per_epoch=2)
assert stats["train_loss"] == 0.0
# this needs horovod >= 0.19.2
def test_horovod_learning_rate_schedule(self):
import horovod
major, minor, patch = horovod.__version__.split(".")
larger_major = int(major) > 0
larger_minor = int(major) == 0 and int(minor) > 19
larger_patch = int(major) == 0 and int(minor) == 19 and int(patch) >= 2
if larger_major or larger_minor or larger_patch:
ray_ctx = RayContext.get()
batch_size = 32
workers_per_node = 4
global_batch_size = batch_size * workers_per_node
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=simple_model,
compile_args_creator=compile_args,
verbose=True,
config=config,
backend="horovod", workers_per_node=workers_per_node)
import horovod.tensorflow.keras as hvd
callbacks = [
hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5, initial_lr=0.4,
verbose=True),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=5, end_epoch=10,
multiplier=1., initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=10, end_epoch=15,
multiplier=1e-1, initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=15, end_epoch=20,
multiplier=1e-2, initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=20, multiplier=1e-3,
initial_lr=0.4),
LRChecker()
]
for i in range(30):
trainer.fit(create_train_datasets, epochs=1, batch_size=global_batch_size,
callbacks=callbacks)
else:
# skip tests in horovod lower version
pass
def test_sparkxshards(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100))})
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25)
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25)
def test_dataframe(self):
sc = init_nncontext()
rdd = sc.range(0, 10)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_dataframe_with_empty_partition(self):
from zoo.orca import OrcaContext
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 10)
rdd_with_empty = rdd.repartition(4).\
mapPartitionsWithIndex(lambda idx, part: [] if idx == 0 else part)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd_with_empty.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=()))))\
.toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_pandas_dataframe(self):
def model_creator(config):
import tensorflow as tf
input1 = tf.keras.layers.Input(shape=(1,))
input2 = tf.keras.layers.Input(shape=(1,))
concatenation = tf.concat([input1, input2], axis=-1)
outputs = tf.keras.layers.Dense(units=1, activation='softmax')(concatenation)
model = tf.keras.Model(inputs=[input1, input2], outputs=outputs)
model.compile(**compile_args(config))
return model
file_path = os.path.join(resource_path, "orca/learn/ncf2.csv")
train_data_shard = zoo.orca.data.pandas.read_csv(file_path)
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=1)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["user", "item"],
label_cols=["label"])
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25,
feature_cols=["user", "item"], label_cols=["label"])
trainer.predict(train_data_shard, feature_cols=["user", "item"]).collect()
def test_dataframe_shard_size(self):
from zoo.orca import OrcaContext
OrcaContext._shard_size = 3
sc = init_nncontext()
rdd = sc.range(0, 10)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_partition_num_less_than_workers(self):
sc = init_nncontext()
rdd = sc.range(200, numSlices=1)
assert rdd.getNumPartitions() == 1
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
assert df.rdd.getNumPartitions() < trainer.num_workers
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_dataframe_predict(self):
sc = init_nncontext()
rdd = sc.parallelize(range(20))
df = rdd.map(lambda x: ([float(x)] * 5,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
estimator = Estimator.from_keras(
model_creator=identity_model_creator,
verbose=True,
config={},
workers_per_node=2)
result = estimator.predict(df, batch_size=4,
feature_cols=["feature"])
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result.selectExpr(expr).first()["error"] == 0
def test_sparkxshards_with_inbalanced_data(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100))})
def random_pad(data):
import numpy as np
import random
times = random.randint(1, 10)
data["x"] = np.concatenate([data["x"]] * times)
data["y"] = np.concatenate([data["y"]] * times)
return data
train_data_shard = train_data_shard.transform_shard(random_pad)
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25)
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25)
def test_predict_xshards(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100,))})
expected = train_data_shard.collect()
expected = [shard["x"] for shard in expected]
for x in expected:
print(x.shape)
expected = np.concatenate(expected)
config = {
}
trainer = Estimator.from_keras(
model_creator=identity_model_creator,
verbose=True,
config=config,
workers_per_node=2)
result_shards = trainer.predict(train_data_shard, batch_size=10).collect()
result = [shard["prediction"] for shard in result_shards]
expected_result = [shard["x"] for shard in result_shards]
result = np.concatenate(result)
assert np.allclose(expected, result)
def test_save_and_load(self):
def model_creator(config):
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='valid'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='valid'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')]
)
model.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def train_data_creator(config, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((np.random.randn(100, 28, 28, 3),
np.random.randint(0, 10, (100, 1))))
dataset = dataset.repeat()
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size)
return dataset
batch_size = 320
try:
est = Estimator.from_keras(model_creator=model_creator, workers_per_node=2)
history = est.fit(train_data_creator,
epochs=1,
batch_size=batch_size,
steps_per_epoch=5)
print("start saving")
est.save("/tmp/cifar10_keras.ckpt")
est.load("/tmp/cifar10_keras.ckpt")
print("save success")
finally:
os.remove("/tmp/cifar10_keras.ckpt")
if __name__ == "__main__":
pytest.main([__file__])
| 36.916096 | 98 | 0.586437 |
f914819b81b1431a3eb1b61b209eda4c89ca8fa4 | 219 | py | Python | __Training__/Python - HackerRank/11. Built-Ins/Any or All.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | __Training__/Python - HackerRank/11. Built-Ins/Any or All.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | __Training__/Python - HackerRank/11. Built-Ins/Any or All.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/any-or-all/problem
len_numbers, numbers = input(), input().split()
# 6
# 1 2 11 121 -9
print(all([int(n) > 0 for n in numbers]) and any([n == n[::-1] for n in numbers]))
# False | 27.375 | 82 | 0.643836 |
0d347a8c5880dc7ffef3e5659f2cff1523ee7117 | 704 | py | Python | virtual/bin/django-admin.py | FGacheru/Awwards | 341124ad40f073a3ba3f4c712359a274f5cfa07f | [
"RSA-MD"
] | null | null | null | virtual/bin/django-admin.py | FGacheru/Awwards | 341124ad40f073a3ba3f4c712359a274f5cfa07f | [
"RSA-MD"
] | null | null | null | virtual/bin/django-admin.py | FGacheru/Awwards | 341124ad40f073a3ba3f4c712359a274f5cfa07f | [
"RSA-MD"
] | null | null | null | #!/home/frank/Desktop/core-projects/django/awwards/virtual/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 32 | 80 | 0.730114 |
86ae6ab03045a286db847acc766741122b48ccf5 | 1,187 | py | Python | rtlive/sources/data_lu.py | michaelosthege/covid-model | 378dc8d0c89098b8b8a469efe1e8e0fcb9f7b5de | [
"Apache-2.0"
] | 1 | 2020-10-29T11:00:27.000Z | 2020-10-29T11:00:27.000Z | rtlive/sources/data_lu.py | michaelosthege/covid-model | 378dc8d0c89098b8b8a469efe1e8e0fcb9f7b5de | [
"Apache-2.0"
] | null | null | null | rtlive/sources/data_lu.py | michaelosthege/covid-model | 378dc8d0c89098b8b8a469efe1e8e0fcb9f7b5de | [
"Apache-2.0"
] | null | null | null | import logging
import pandas
from . import ourworldindata
from .. import preprocessing
_log = logging.getLogger(__file__)
def forecast_LU(df: pandas.DataFrame):
""" Applies testcount interpolation/extrapolation.
Currently this assumes the OWID data, which only has an "all" region.
In the future, this should be replaced with more fine graned data loading!
"""
# forecast with existing data
df['predicted_new_tests'], results = preprocessing.predict_testcounts_all_regions(df, 'LU')
# interpolate the initial testing ramp-up to account for missing data
df_region = df.xs('all')
df_region.loc['2020-01-01', 'predicted_new_tests'] = 0
df_region.predicted_new_tests = df_region.predicted_new_tests.interpolate('linear')
df_region['region'] = 'all'
df = df_region.reset_index().set_index(['region', 'date'])
return df, results
from .. import data
data.set_country_support(
country_alpha2="LU",
compute_zone=data.Zone.Europe,
region_name={
"all": "Luxemburg",
},
region_population={
"all": 626_108,
},
fn_load=ourworldindata.create_loader_function("LU"),
fn_process=forecast_LU,
)
| 29.675 | 95 | 0.708509 |
504d56d6abb67eac45971ae095341b4bfe29a421 | 1,126 | py | Python | accounts/migrations/0003_userprofile.py | shaymk1/Felicia-s-ecommerce-store | aaf9d5aed018e451602c6c39bf8e5e24f9cedc01 | [
"MIT"
] | null | null | null | accounts/migrations/0003_userprofile.py | shaymk1/Felicia-s-ecommerce-store | aaf9d5aed018e451602c6c39bf8e5e24f9cedc01 | [
"MIT"
] | null | null | null | accounts/migrations/0003_userprofile.py | shaymk1/Felicia-s-ecommerce-store | aaf9d5aed018e451602c6c39bf8e5e24f9cedc01 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-05 10:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_alter_account_id'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_line_1', models.CharField(blank=True, max_length=100)),
('address_line_2', models.CharField(blank=True, max_length=100)),
('profile_picture', models.ImageField(blank=True, upload_to='userprofile')),
('city', models.CharField(blank=True, max_length=20)),
('province', models.CharField(blank=True, max_length=20)),
('country', models.CharField(blank=True, max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 38.827586 | 121 | 0.625222 |
cfb33ca2e65ad6c490f3676eabfd8732b530e0b2 | 389 | py | Python | src/sentry_plugins/github/urls.py | MattPark/sentry-plugins | 3b08a43ea9ca1fb0fd183c3fe7bd5606f14ba993 | [
"Apache-2.0"
] | null | null | null | src/sentry_plugins/github/urls.py | MattPark/sentry-plugins | 3b08a43ea9ca1fb0fd183c3fe7bd5606f14ba993 | [
"Apache-2.0"
] | 2 | 2018-05-26T13:19:41.000Z | 2018-06-01T20:14:41.000Z | src/sentry_plugins/github/urls.py | MattPark/sentry-plugins | 3b08a43ea9ca1fb0fd183c3fe7bd5606f14ba993 | [
"Apache-2.0"
] | 1 | 2018-05-26T11:45:46.000Z | 2018-05-26T11:45:46.000Z | from __future__ import absolute_import
from django.conf.urls import patterns, url
from .endpoints.webhook import GithubIntegrationsWebhookEndpoint, GithubWebhookEndpoint
urlpatterns = patterns(
'',
url(r'^organizations/(?P<organization_id>[^\/]+)/webhook/$', GithubWebhookEndpoint.as_view()),
url(r'^installations/webhook/$', GithubIntegrationsWebhookEndpoint.as_view()),
)
| 32.416667 | 98 | 0.773779 |
45de74bed49969f9b0986bb6429350a6d7e9ce72 | 11,485 | py | Python | src/dbcontext.py | jimmg35/Sensor_Crawling_v2 | 5154885cad5173127539487a2fcf2140a4409b8b | [
"MIT"
] | null | null | null | src/dbcontext.py | jimmg35/Sensor_Crawling_v2 | 5154885cad5173127539487a2fcf2140a4409b8b | [
"MIT"
] | null | null | null | src/dbcontext.py | jimmg35/Sensor_Crawling_v2 | 5154885cad5173127539487a2fcf2140a4409b8b | [
"MIT"
] | null | null | null |
# Author : @jimmg35
import datetime
import schedule
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
class Storer():
"""
Storing processed data from Parser.
"""
data_list = []
storage = {}
def __init__(self, dbcontext):
self.dbcontext = dbcontext
def insert(self, data, name: str):
self.storage[name] = data
self.data_list.append(name)
def import2Database(self, item: str, y=None, sm=None, em=None):
if item == "ProjectData" and self.importGate(item):
self.dbcontext.ImportProjectMeta(self.storage[item])
if item == "DeviceMeta" and self.importGate(item):
self.dbcontext.ImportDeviceMeta(self.storage[item])
if item == "SensorMeta" and self.importGate(item):
self.dbcontext.ImportSensorMeta(self.storage[item])
if item == "FixedData" and self.importGate(item):
self.dbcontext.ImportFixedSensorData(self.storage[item], y, sm, em)
def importGate(self, item):
if self.data_list.index(item) != -1:
return True
else:
print("Data is not accessible!")
return False
class Dbcontext():
"""
Importing data into database.
"""
def __init__(self, PGSQL_user_data, database):
# PostgreSQL server variable.
self.PGSQL_user_data = PGSQL_user_data
# Connect to local Postgresql server.
self.cursor = self.ConnectToDatabase(database)
def ConnectToDatabase(self, database):
"""
Connect to PostgreSQL database.
"""
conn = psycopg2.connect(database=database,
user=self.PGSQL_user_data["user"],
password=self.PGSQL_user_data["password"],
host=self.PGSQL_user_data["host"],
port=self.PGSQL_user_data["port"])
conn.autocommit = True
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
print(f'Successfully connected to local PostgreSQL server| User: @{self.PGSQL_user_data["user"]}')
print(f' Currently connected to database: @{database}')
cursor = conn.cursor()
return cursor
def ImportProjectMeta(self, projectMeta):
"""
Import porject metadata into database.
"""
for projID in list(projectMeta.keys()):
keys_arr: str = "'{"
for index, i in enumerate(projectMeta[projID]["keys"]):
if index == (len(projectMeta[projID]["keys"])-1):
keys_arr += '"' + str(i) + '"' + "}'"
break
keys_arr += '"' + str(i) + '"' + ','
query = '''INSERT INTO projectmeta (projectid, projectname, projectkeys)
VALUES({}, \'{}\', {});'''.format(str(projID),
projectMeta[projID]["name"],
keys_arr)
self.cursor.execute(query)
print("Project Metadata has been stored into database!")
def ImportDeviceMeta(self, deviceMeta):
"""
Import device meta into database.
"""
column_str = "("
query = "select column_name from information_schema.columns where table_name = 'devicemeta';"
self.cursor.execute(query)
column = [i[0] for i in self.cursor.fetchall()]
for index, i in enumerate(column):
if index == (len(column)-1):
column_str += i + ")"
break
column_str += i + ","
for index, i in enumerate(deviceMeta):
values = self.bulidDeviceMetaQuery(i, index)
query = "INSERT INTO devicemeta " + column_str + values
self.cursor.execute(query)
print("Device Metadata has been stored into database!")
def ImportSensorMeta(self, SensorMeta):
"""
Import metadata of sensor of each device into database.
"""
ids = 1
for device in SensorMeta:
sensor_id = "'{"
for index, i in enumerate(device[2]):
if index == (len(device[2])-1):
sensor_id += '"' + str(i) + '"' + "}'"
break
sensor_id += '"' + str(i) + '"' + ','
query = '''INSERT INTO sensormeta (id, deviceid, projectkey, sensor_id)
VALUES({}, \'{}\', \'{}\', {});'''.format(ids, device[0], device[1], sensor_id)
self.cursor.execute(query)
ids += 1
print("Sensor Metadata has been stored into database!")
def bulidDeviceMetaQuery(self, device, count):
"""
Helper function of ImportDeviceMeta(),
for handling exception.
"""
output = " VALUES(" + str(count) + "," + device["id"] + ","
for index, i in enumerate(list(device.keys())):
if index == (len(list(device.keys())) - 1):
output += "'" + str(device[i]) + "')"
break
if i == "id":
continue
if str(device[i]) == "旗山區公所前'":
output += "'" + "旗山區公所前" + "',"
continue
output += "'" + str(device[i]) + "',"
return output
def queryDeviceSensorMeta_fixed(self):
"""
query specific metadata from database.
"""
query = '''SELECT projectid, projectkey, deviceid, sensor_id FROM sensormeta INNER JOIN projectmeta ON sensormeta.projectkey = ANY(projectmeta.projectkeys) WHERE projectid IN ('528','671','672','673','674',
'675','677','678','680','709','756','1024','1025','1027','1029','1032','1034','1035','1036','1048',
'1058','1071','1072','1075','1079','1084','1085','1102','1120','1145','1147','1162','1167','1184','1189','1192','1207');'''
self.cursor.execute(query)
return self.cursor.fetchall()
def ImportFixedSensorData(self, FixedSensorData, year, start_m, end_m):
print("=================== Import into database ===================")
table_dict = {"1": "minute", "60": "hour"}
for interval in list(FixedSensorData.keys()):
for projectid in list(FixedSensorData[interval].keys()):
# get biggest id in that table
table_name = table_dict[interval] + "_" + projectid + "_" + str(year) + "_" + str(int(start_m)) + "to" + str(int(end_m)+1)
print(table_name)
if self.getBiggestId(table_name) == None:
id_for_proj = 1
else:
id_for_proj = self.getBiggestId(table_name) + 1
# insert data into table
for a_row in FixedSensorData[interval][projectid]:
try:
query = '''INSERT INTO {} (id, deviceid,
voc_avg, voc_max, voc_min, voc_median,
pm2_5_avg, pm2_5_max, pm2_5_min, pm2_5_median,
humidity_avg, humidity_max, humidity_min, humidity_median,
temperature_avg, temperature_max, temperature_min, temperature_median,
year, month, day, hour, minute, second, time)
VALUES({},\'{}\',{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},\'{}\');
'''.format(table_name, id_for_proj, a_row[0], a_row[1], a_row[2], a_row[3], a_row[4],a_row[5], a_row[6], a_row[7], a_row[8],
a_row[9], a_row[10], a_row[11], a_row[12],a_row[13], a_row[14], a_row[15], a_row[16], a_row[17], a_row[18], a_row[19],
a_row[20],a_row[21], a_row[22], a_row[23])
self.cursor.execute(query)
id_for_proj += 1
except:
print("insert exception at -> interval:{} projectid:{} ".format(interval, projectid))
print("insert complete -> {}".format(table_name))
def getBiggestId(self, table_name):
query = '''SELECT max(id) FROM {};'''.format(table_name)
self.cursor.execute(query)
return self.cursor.fetchall()[0][0]
def queryDeviceSensorMeta_spacial(self):
query = '''SELECT projectid, projectkey, deviceid FROM devicemeta WHERE projectid IN ('1156', '565', '624', '891');'''
self.cursor.execute(query)
return self.cursor.fetchall()
def queryMinuteMetadata(self, project):
query = '''SELECT deviceid, projectkey FROM sensormeta INNER JOIN
projectmeta ON sensormeta.projectkey =
ANY(projectmeta.projectkeys) WHERE projectid = '{}';'''.format(project)
self.cursor.execute(query)
data = self.cursor.fetchall()
return [[i[0], i[1]]for i in data]
def ImportMinuteData(self, deviceid, data, date, time, project, start_month):
""" 將時間區段內的一台感測器資料輸入至資料庫 """
table_name = "minute_{}_{}to{}".format(project, start_month, start_month+1)
if self.getBiggestId(table_name) == None:
ids = 1
else:
ids = self.getBiggestId(table_name) + 1
for i in range(0, len(deviceid)):
query = '''INSERT INTO {} (id, deviceid, voc, pm2_5, humidity, temperature, date, hour, minute, second)
VALUES({}, \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\');
'''.format(table_name, ids, deviceid[i], data[i]["voc"], data[i]["pm2_5"],
data[i]["humidity"], data[i]["temperature"],
date[i], time[i][0], time[i][1], time[i][2])
self.cursor.execute(query)
ids += 1
def launchPatch(self):
queries = ['''DELETE FROM devicemeta WHERE projectid
NOT IN ('528','671','672','673','674',
'675','677','678','680','709',
'756','1024','1025','1027','1029',
'1032','1034','1035','1036','1048',
'1058','1071','1072','1075','1079',
'1084','1085','1102','1120','1145',
'1147','1162','1167','1184','1189',
'1192','1207','1156','565','624','891');''']
for index, i in enumerate(queries):
print("Patch {} has been applied to database!".format(index))
self.cursor.execute(i)
def ImportHourData(self, total_chunk_np, meta):
table_name = "hour_{}_{}".format(meta["porjectId"], meta["startMonth"])
ids = 1 if self.getBiggestId(table_name) == None else self.getBiggestId(table_name) + 1
for i in range(0, total_chunk_np.shape[0]):
query = '''INSERT INTO {} (id, deviceid, voc, pm2_5, humidity, temperature, date, hour)
VALUES({}, \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\');
'''.format(table_name, ids, total_chunk_np[i][0], total_chunk_np[i][1], total_chunk_np[i][2],
total_chunk_np[i][3], total_chunk_np[i][4],
total_chunk_np[i][5], total_chunk_np[i][6])
self.cursor.execute(query)
ids += 1 | 45.94 | 214 | 0.514236 |
9564b0c1a1f685ef8f8e5bc654c654e6d6364be2 | 17,266 | py | Python | syft/frameworks/torch/dp/pate.py | sparkingdark/PySyft | 8fec86803dd20ca9ad58590ff0d16559991f1b08 | [
"Apache-2.0"
] | 7 | 2020-04-20T22:22:08.000Z | 2020-07-25T17:32:08.000Z | syft/frameworks/torch/dp/pate.py | sparkingdark/PySyft | 8fec86803dd20ca9ad58590ff0d16559991f1b08 | [
"Apache-2.0"
] | 3 | 2020-04-24T21:20:57.000Z | 2020-05-28T09:17:02.000Z | syft/frameworks/torch/dp/pate.py | sparkingdark/PySyft | 8fec86803dd20ca9ad58590ff0d16559991f1b08 | [
"Apache-2.0"
] | 4 | 2020-04-24T22:32:37.000Z | 2020-05-25T19:29:20.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# ==============================================================================
# Modifications copyright (C) 2020 OpenMined
#
# Added type hints to functions
# Added moment values to print statements when calculating sensitivity
# ==============================================================================
"""
This script computes bounds on the privacy cost of training the
student model from noisy aggregation of labels predicted by teachers.
It should be used only after training the student (and therefore the
teachers as well). We however include the label files required to
reproduce key results from our paper (https://arxiv.org/abs/1610.05755):
the epsilon bounds for MNIST and SVHN students.
"""
import math
from typing import List, Tuple, Union
import numpy as np
import torch
def compute_q_noisy_max(counts: Union[np.ndarray, List[float]], noise_eps: float) -> float:
"""
Returns ~ Pr[outcome != winner].
Args:
counts: a list of scores
noise_eps: privacy parameter for noisy_max
Returns:
q: the probability that outcome is different from true winner.
"""
# For noisy max, we only get an upper bound.
# Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
# proof at http://mathoverflow.net/questions/66763/
# tight-bounds-on-probability-of-sum-of-laplace-random-variables
winner = np.argmax(counts)
counts_normalized = noise_eps * (counts - counts[winner])
counts_rest = np.array([counts_normalized[i] for i in range(len(counts)) if i != winner])
q = 0.0
for c in counts_rest:
gap = -c
q += (gap + 2.0) / (4.0 * math.exp(gap))
return min(q, 1.0 - (1.0 / len(counts)))
def compute_q_noisy_max_approx(counts: List[float], noise_eps: float) -> float:
"""
Returns ~ Pr[outcome != winner].
Args:
counts: a list of scores
noise_eps: privacy parameter for noisy_max
Returns:
q: the probability that outcome is different from true winner.
"""
# For noisy max, we only get an upper bound.
# Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
# proof at http://mathoverflow.net/questions/66763/
# tight-bounds-on-probability-of-sum-of-laplace-random-variables
# This code uses an approximation that is faster and easier
# to get local sensitivity bound on.
winner = np.argmax(counts)
counts_normalized = noise_eps * (counts - counts[winner])
counts_rest = np.array([counts_normalized[i] for i in range(len(counts)) if i != winner])
gap = -max(counts_rest)
q = (len(counts) - 1) * (gap + 2.0) / (4.0 * math.exp(gap))
return min(q, 1.0 - (1.0 / len(counts)))
def logmgf_exact(q: float, priv_eps: float, l: int) -> float:
"""
Computes the logmgf value given q and privacy eps.
The bound used is the min of three terms. The first term is from
https://arxiv.org/pdf/1605.02065.pdf.
The second term is based on the fact that when event has probability (1-q) for
q close to zero, q can only change by exp(eps), which corresponds to a
much smaller multiplicative change in (1-q)
The third term comes directly from the privacy guarantee.
Args:
q: pr of non-optimal outcome
priv_eps: eps parameter for DP
l: moment to compute.
Returns:
Upper bound on logmgf
"""
if q < 0.5:
t_one = (1 - q) * math.pow((1 - q) / (1 - math.exp(priv_eps) * q), l)
t_two = q * math.exp(priv_eps * l)
t = t_one + t_two
try:
log_t = math.log(t)
except ValueError:
print("Got ValueError in math.log for values :" + str((q, priv_eps, l, t)))
log_t = priv_eps * l
else:
log_t = priv_eps * l
return min(0.5 * priv_eps * priv_eps * l * (l + 1), log_t, priv_eps * l)
def logmgf_from_counts(counts: Union[np.ndarray, List[float]], noise_eps: float, l: int) -> float:
"""
ReportNoisyMax mechanism with noise_eps with 2*noise_eps-DP
in our setting where one count can go up by one and another
can go down by 1.
Args:
counts: an array of scores
noise_eps: noise epsilon used
l: moment to compute
Returns:
q: Upper bound on logmgf
"""
q = compute_q_noisy_max(counts, noise_eps)
return logmgf_exact(q, 2.0 * noise_eps, l)
def sens_at_k(counts: np.ndarray, noise_eps: float, l: int, k: float) -> float:
"""
Return sensitivity at distance k.
Args:
counts: an array of scores
noise_eps: noise parameter used
l: moment whose sensitivity is being computed
k: distance
Returns:
sensitivity: at distance k
"""
counts_sorted = sorted(counts, reverse=True)
if 0.5 * noise_eps * l > 1:
print(f"l of {l} too large to compute sensitivity with noise epsilon {noise_eps}")
return 0
# Now we can assume that at k, gap remains positive
# or we have reached the point where logmgf_exact is
# determined by the first term and ind of q.
if counts[0] < counts[1] + k:
return 0
counts_sorted[0] -= k
counts_sorted[1] += k
val = logmgf_from_counts(counts_sorted, noise_eps, l)
counts_sorted[0] -= 1
counts_sorted[1] += 1
val_changed = logmgf_from_counts(counts_sorted, noise_eps, l)
return val_changed - val
def smoothed_sens(counts: np.ndarray, noise_eps: float, l: int, beta: float) -> float:
"""
Compute beta-smooth sensitivity.
Args:
counts: array of scores
noise_eps: noise parameter
l: moment of interest
beta: smoothness parameter
Returns:
smooth_sensitivity: a beta smooth upper bound
"""
k = 0
smoothed_sensitivity = sens_at_k(counts, noise_eps, l, k)
while k < max(counts):
k += 1
sensitivity_at_k = sens_at_k(counts, noise_eps, l, k)
smoothed_sensitivity = max(smoothed_sensitivity, math.exp(-beta * k) * sensitivity_at_k)
if sensitivity_at_k == 0.0:
break
return smoothed_sensitivity
def perform_analysis(
teacher_preds: np.ndarray,
indices: np.ndarray,
noise_eps: float,
delta: float = 1e-5,
moments: int = 8,
beta: float = 0.09,
) -> Tuple[float, float]:
"""
Performs PATE analysis on predictions from teachers and combined predictions for student.
Args:
teacher_preds: a numpy array of dim (num_teachers x num_examples). Each value corresponds
to the index of the label which a teacher gave for a specific example
indices: a numpy array of dim (num_examples) of aggregated examples which were aggregated
using the noisy max mechanism.
noise_eps: the epsilon level used to create the indices
delta: the desired level of delta
moments: the number of moments to track (see the paper)
beta: a smoothing parameter (see the paper)
Returns:
tuple: first value is the data dependent epsilon, then the data independent epsilon
"""
num_teachers, num_examples = teacher_preds.shape
_num_examples = indices.shape[0]
labels = set(teacher_preds.flatten())
num_labels = len(labels)
assert num_examples == _num_examples
counts_mat = np.zeros((num_examples, num_labels))
for i in range(num_examples):
for j in range(num_teachers):
counts_mat[i, int(teacher_preds[j, i])] += 1
l_list = 1.0 + np.array(range(moments))
total_log_mgf_nm = np.array([0.0 for _ in l_list])
total_ss_nm = np.array([0.0 for _ in l_list])
for i in indices:
total_log_mgf_nm += np.array(
[logmgf_from_counts(counts_mat[i], noise_eps, l) for l in l_list]
)
total_ss_nm += np.array([smoothed_sens(counts_mat[i], noise_eps, l, beta) for l in l_list])
# We want delta = exp(alpha - eps l).
# Solving gives eps = (alpha - ln (delta))/l
eps_list_nm = (total_log_mgf_nm - math.log(delta)) / l_list
# If beta < eps / 2 ln (1/delta), then adding noise Lap(1) * 2 SS/eps
# is eps,delta DP
# Also if beta < eps / 2(gamma +1), then adding noise 2(gamma+1) SS eta / eps
# where eta has density proportional to 1 / (1+|z|^gamma) is eps-DP
# Both from Corolloary 2.4 in
# http://www.cse.psu.edu/~ads22/pubs/NRS07/NRS07-full-draft-v1.pdf
# Print the first one's scale
ss_eps = 2.0 * beta * math.log(1 / delta)
if min(eps_list_nm) == eps_list_nm[-1]:
print(
"Warning: May not have used enough values of l. Increase 'moments' variable and "
"run again."
)
# Data independent bound, as mechanism is
# 2*noise_eps DP.
data_ind_log_mgf = np.array([0.0 for _ in l_list])
data_ind_log_mgf += num_examples * np.array(
[logmgf_exact(1.0, 2.0 * noise_eps, l) for l in l_list]
)
data_ind_eps_list = (data_ind_log_mgf - math.log(delta)) / l_list
return min(eps_list_nm), min(data_ind_eps_list)
def tensors_to_literals(tensor_list: List[torch.Tensor]) -> List[Union[float, int]]:
"""
Converts list of torch tensors to list of integers/floats. Fix for not having the functionality
which converts list of tensors to tensors
Args:
tensor_list: List of torch tensors
Returns:
literal_list: List of floats/integers
"""
literal_list = []
for tensor in tensor_list:
literal_list.append(tensor.item())
return literal_list
def logmgf_exact_torch(q: float, priv_eps: float, l: int) -> float:
"""
Computes the logmgf value given q and privacy eps.
The bound used is the min of three terms. The first term is from
https://arxiv.org/pdf/1605.02065.pdf.
The second term is based on the fact that when event has probability (1-q) for
q close to zero, q can only change by exp(eps), which corresponds to a
much smaller multiplicative change in (1-q)
The third term comes directly from the privacy guarantee.
Args:
q: pr of non-optimal outcome
priv_eps: eps parameter for DP
l: moment to compute.
Returns:
Upper bound on logmgf
"""
if q < 0.5:
t_one = (1 - q) * math.pow((1 - q) / (1 - math.exp(priv_eps) * q), l)
t_two = q * math.exp(priv_eps * l)
t = t_one + t_two
try:
log_t = math.log(t)
except ValueError:
print("Got ValueError in math.log for values :" + str((q, priv_eps, l, t)))
log_t = priv_eps * l
else:
log_t = priv_eps * l
return min(0.5 * priv_eps * priv_eps * l * (l + 1), log_t, priv_eps * l)
def compute_q_noisy_max_torch(
counts: Union[List[torch.Tensor], torch.Tensor], noise_eps: float
) -> float:
"""
Returns ~ Pr[outcome != winner].
Args:
counts: a list of scores
noise_eps: privacy parameter for noisy_max
Returns:
q: the probability that outcome is different from true winner.
"""
if type(counts) != torch.tensor:
counts = torch.tensor(tensors_to_literals(counts), dtype=torch.float)
_, winner = counts.max(0)
counts_normalized = noise_eps * (counts.clone().detach().type(torch.float) - counts[winner])
counts_normalized = tensors_to_literals(counts_normalized)
counts_rest = torch.tensor(
[counts_normalized[i] for i in range(len(counts)) if i != winner], dtype=torch.float
)
q = 0.0
index = 0
for c in counts_rest:
gap = -c
q += (gap + 2.0) / (4.0 * math.exp(gap))
index += 1
return min(q, 1.0 - (1.0 / len(counts)))
def logmgf_from_counts_torch(
counts: Union[List[torch.Tensor], torch.Tensor], noise_eps: float, l: int
) -> float:
"""
ReportNoisyMax mechanism with noise_eps with 2*noise_eps-DP
in our setting where one count can go up by one and another
can go down by 1.
Args:
counts: a list of scores
noise_eps: noise parameter used
l: moment whose sensitivity is being computed
Returns:
q: the probability that outcome is different from true winner
"""
q = compute_q_noisy_max_torch(counts, noise_eps)
return logmgf_exact_torch(q, 2.0 * noise_eps, l)
def sens_at_k_torch(counts: torch.Tensor, noise_eps: float, l: int, k: int) -> float:
"""
Return sensitivity at distane k.
Args:
counts: tensor of scores
noise_eps: noise parameter used
l: moment whose sensitivity is being computed
k: distance
Returns:
sensitivity: at distance k
"""
counts_sorted = sorted(counts, reverse=True)
if 0.5 * noise_eps * l > 1:
print(f"l of {l} is too large to compute sensitivity with noise epsilon {noise_eps}")
return 0
if counts[0] < counts[1] + k:
return 0
counts_sorted[0] -= k
counts_sorted[1] += k
val = logmgf_from_counts_torch(counts_sorted, noise_eps, l)
counts_sorted[0] -= 1
counts_sorted[1] += 1
val_changed = logmgf_from_counts_torch(counts_sorted, noise_eps, l)
return val_changed - val
def smooth_sens_torch(counts: torch.Tensor, noise_eps: float, l: int, beta: float) -> float:
"""Compute beta-smooth sensitivity.
Args:
counts: tensor of scores
noise_eps: noise parameter
l: moment of interest
beta: smoothness parameter
Returns:
smooth_sensitivity: a beta smooth upper bound
"""
k = 0
smoothed_sensitivity = sens_at_k_torch(counts, noise_eps, l, k)
while k < max(counts):
k += 1
sensitivity_at_k = sens_at_k_torch(counts, noise_eps, l, k)
smoothed_sensitivity = max(smoothed_sensitivity, math.exp(-beta * k) * sensitivity_at_k)
if sensitivity_at_k == 0.0:
break
return smoothed_sensitivity
def perform_analysis_torch(
preds: torch.Tensor,
indices: torch.Tensor,
noise_eps: float = 0.1,
delta: float = 1e-5,
moments: int = 8,
beta: float = 0.09,
) -> Tuple[float, float]:
"""
Performs PATE analysis on predictions from teachers and combined predictions for student.
Args:
preds: a torch tensor of dim (num_teachers x num_examples). Each value corresponds to the
index of the label which a teacher gave for a specific example
indices: a torch tensor of dim (num_examples) of aggregated examples which were aggregated
using the noisy max mechanism.
noise_eps: the epsilon level used to create the indices
delta: the desired level of delta
moments: the number of moments to track (see the paper)
beta: a smoothing parameter (see the paper)
Returns:
tuple: first value is the data dependent epsilon, then the data independent epsilon
"""
num_teachers, num_examples = preds.shape
_num_examples = indices.shape[0]
# Check that preds is shape (teachers x examples)
assert num_examples == _num_examples
labels = list(preds.flatten())
labels = {tensor.item() for tensor in labels}
num_labels = len(labels)
counts_mat = torch.zeros(num_examples, num_labels, dtype=torch.float32)
# Count number of teacher predictions of each label for each example
for i in range(num_examples):
for j in range(num_teachers):
counts_mat[i, int(preds[j, i])] += 1
l_list = 1 + torch.tensor(range(moments), dtype=torch.float)
total_log_mgf_nm = torch.tensor([0.0 for _ in l_list], dtype=torch.float)
total_ss_nm = torch.tensor([0.0 for _ in l_list], dtype=torch.float)
for i in indices:
total_log_mgf_nm += torch.tensor(
[logmgf_from_counts_torch(counts_mat[i].clone(), noise_eps, l) for l in l_list]
)
total_ss_nm += torch.tensor(
[smooth_sens_torch(counts_mat[i].clone(), noise_eps, l, beta) for l in l_list],
dtype=torch.float,
)
eps_list_nm = (total_log_mgf_nm - math.log(delta)) / l_list
ss_eps = 2.0 * beta * math.log(1 / delta)
if min(eps_list_nm) == eps_list_nm[-1]:
print(
"Warning: May not have used enough values of l. Increase 'moments' variable "
"and run again."
)
# Computer epsilon when not taking teacher quorum into account
data_ind_log_mgf = torch.tensor([0.0 for _ in l_list])
data_ind_log_mgf += num_examples * torch.tensor(
tensors_to_literals([logmgf_exact_torch(1.0, 2.0 * noise_eps, l) for l in l_list])
)
data_ind_eps_list = (data_ind_log_mgf - math.log(delta)) / l_list
return min(eps_list_nm), min(data_ind_eps_list)
| 33.140115 | 99 | 0.641608 |
71f9543e653d7334ca5a4302db3c350b9ed9668b | 4,405 | py | Python | tests/records/test_mockrecords_api.py | mirekys/invenio-communities | f412c941627cfd1a515755fdf17cc0aa794d857e | [
"MIT"
] | 3 | 2017-10-25T00:58:24.000Z | 2021-05-02T23:42:02.000Z | tests/records/test_mockrecords_api.py | mirekys/invenio-communities | f412c941627cfd1a515755fdf17cc0aa794d857e | [
"MIT"
] | 277 | 2015-04-30T09:39:22.000Z | 2022-03-31T07:38:26.000Z | tests/records/test_mockrecords_api.py | mirekys/invenio-communities | f412c941627cfd1a515755fdf17cc0aa794d857e | [
"MIT"
] | 44 | 2015-04-22T11:34:49.000Z | 2022-03-22T09:56:33.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-Communities is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Data layer tests for record/community integration."""
import pytest
from jsonschema import ValidationError
from mock_module.api import MockRecord
from sqlalchemy.exc import IntegrityError
from invenio_communities.communities.records.api import Community
@pytest.fixture()
def c(app, db, location):
"""A community fixture."""
_c = Community.create({})
db.session.commit()
return Community.get_record(_c.id)
@pytest.fixture()
def c2(app, db, location):
"""Another community fixture."""
_c = Community.create({})
db.session.commit()
return Community.get_record(_c.id)
@pytest.fixture()
def record(app, db, c):
"""A community fixture."""
r = MockRecord.create({})
r.communities.add(c, default=True)
r.commit()
db.session.commit()
return r
def test_record_create_empty(app, db):
"""Smoke test."""
record = MockRecord.create({})
db.session.commit()
assert record.schema
# JSONSchema validation works.
pytest.raises(
ValidationError,
MockRecord.create,
{'metadata': {'title': 1}}
)
def test_get(db, record, c):
"""Loading a record should load communties and default."""
r = MockRecord.get_record(record.id)
assert c in r.communities
assert r.communities.default == c
def test_add(db, c):
"""Test adding a record to a community."""
# With default
record = MockRecord.create({})
record.communities.add(c, default=True)
assert record.communities.default == c
record.commit()
assert record['communities'] == {
'default': str(c.id),
'ids': [str(c.id)],
}
db.session.commit()
# No default
record = MockRecord.create({})
record.communities.add(c)
assert record.communities.default is None
record.commit()
assert record['communities'] == {'ids': [str(c.id)]}
db.session.commit()
def test_add_existing(db, c):
"""Test addding same community twice."""
record = MockRecord.create({})
record.communities.add(c)
record.communities.add(c)
# Adding a community already added, will raise integrity error.
pytest.raises(IntegrityError, record.commit)
# Rollback to avoid error in pytest-invenio "location" fixture.
db.session.rollback()
def test_len_contains(record, c, c2):
assert len(record.communities) == 1
assert c in record.communities
assert str(c.id) in record.communities
assert c2 not in record.communities
assert str(c2.id) not in record.communities
def test_remove(db, c, record):
"""Test removal of community."""
record.communities.remove(c)
assert len(record.communities) == 0
record.commit()
assert record['communities'] == {}
db.session.commit()
# Removing non-existing raises an error
pytest.raises(ValueError, record.communities.remove, c2)
def test_iter(db, record, c):
# With cache hit
assert list(record.communities) == [c]
# Without cache hit
record = MockRecord.get_record(record.id)
assert list(record.communities) == [c]
def test_ids(db, record, c):
assert list(record.communities.ids) == [str(c.id)]
def test_change_default(db, record, c, c2):
assert record.communities.default == c
del record.communities.default
assert record.communities.default is None
with pytest.raises(AttributeError):
# record not part of c2, so will fail
record.communities.default = c2
record.communities.add(c2)
record.communities.default = c2
assert record.communities.default == c2
def test_clear(db, record):
assert len(record.communities) == 1
record.communities.clear()
assert len(record.communities) == 0
record.commit()
assert record['communities'] == {}
def test_refresh(db, record, c2):
assert len(record.communities) == 1
# Mess up internals
record.communities._communities_ids = set()
record.communities._default_id = str(c2.id)
record.commit()
db.session.commit()
# Still messed up
record = MockRecord.get_record(record.id)
assert len(record.communities) == 0
# Refresh to fix
record.communities.refresh()
assert len(record.communities) == 1
| 26.859756 | 73 | 0.676277 |
dfb3f5aa6bb79a0fdcd45f50d8d8abe060d884f4 | 62 | py | Python | hwt/simulator/exceptions.py | mgielda/hwt | e6c699fb154f93ac03523bfe40a3d4fc1912d28b | [
"MIT"
] | null | null | null | hwt/simulator/exceptions.py | mgielda/hwt | e6c699fb154f93ac03523bfe40a3d4fc1912d28b | [
"MIT"
] | null | null | null | hwt/simulator/exceptions.py | mgielda/hwt | e6c699fb154f93ac03523bfe40a3d4fc1912d28b | [
"MIT"
] | null | null | null |
class SimException(Exception):
"""Error in simulation"""
| 15.5 | 30 | 0.693548 |
55bcbf5bdc649f3bd5143721829d54b52e530560 | 3,294 | py | Python | data-structures/python/int_graph.py | zubinshah/algorithms | 292cd1ac0ddd84b8c3933b0c2404d1c4b316be9d | [
"Apache-2.0"
] | null | null | null | data-structures/python/int_graph.py | zubinshah/algorithms | 292cd1ac0ddd84b8c3933b0c2404d1c4b316be9d | [
"Apache-2.0"
] | null | null | null | data-structures/python/int_graph.py | zubinshah/algorithms | 292cd1ac0ddd84b8c3933b0c2404d1c4b316be9d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
'''
int_graph module is a wrapper module to implement an integer graph data
structure in python.
TBD :
(1) derive child class to differentiate matrix vs adj-list impl.
(2) dynamically grow/shrink the graph data struct (matrix) and avoid
constructor to specific max nodes for matrix implementation
(3)
'''
print "Importing integer graph module."
from collections import deque
class IntGraph:
"""A base class defined for a graph data structures and utilities """
nodes = []
#constructor
def __init__(self, max_nodes):
self.nodes = [[0 for i in range(max_nodes)] for i in range(max_nodes)]
def add_edge(self, from_v, to_v):
self.nodes[from_v][to_v] = 1
def debug_print_graph(self):
print "DEBUG : printing self.nodes"
print self.nodes
def breadth_first_traversal(self, root, debug=0):
"""breadth first traversal for graph """
queue = deque()
visited = [0 for i in range(len(self.nodes))]
#add an item to the end of the list(queue operation)
queue.append(root)
while len(queue) is not 0:
#always dequeue at the front of the list (enqueue operation)
current = queue.popleft()
#mark current as visited
visited[current] = 1
#visited!!!
if debug is not 0:
print current
#traverse all neighbors of current and add them to queue
for i in range(len(self.nodes[current])):
if self.nodes[current][i] is not 0:
#if i not in visited:
if visited[i] == 0:
queue.append(i)
def depth_first_traversal_preorder_iterative (self, root, debug=0):
""" depth first traversal for graphs (non-recursive) and preorder """
#using python lists as a stack
stack = []
#maintain a visited array to mark them as visited
visited = [0 for i in range(len(self.nodes))]
#count to check how many nodes were visited
count=0
#append root to the stack
stack.append(root)
while len(stack) is not 0:
current = stack.pop()
if visited[current] == 1:
continue
#preorder traversal for graph
visited[current] = 1
if debug == 1:
print current
count = count + 1
#append all neighbors to the stack
for i in range (len(self.nodes[current])):
if self.nodes[current][i] is not 0:
if visited[i] is 0:
stack.append(i)
if debug == 1:
print "DEBUG: Append " + str(i) + ", for curr " + str(current)
if debug == 1:
print "Total nodes traversed " + str(count) + "."
def depth_first_traversal_non_recursive(self, root, debug=0):
"""depth first travesal for graph NON_RECURSIVE """
print "TODO"
#TODO(zubin) : implement an efficient queue operation in python
#TODO(zubin) : implement bft , traversal operation argument
# default is to print for now
| 31.371429 | 90 | 0.563752 |
5f37bf15d09e009f999032f2493ff64ce830f206 | 3,306 | py | Python | test/TEX/recursive_scanner_dependencies_input.py | edobez/scons | 722228995b223b4507ebb686b48f94b9f7a8380c | [
"MIT"
] | null | null | null | test/TEX/recursive_scanner_dependencies_input.py | edobez/scons | 722228995b223b4507ebb686b48f94b9f7a8380c | [
"MIT"
] | null | null | null | test/TEX/recursive_scanner_dependencies_input.py | edobez/scons | 722228995b223b4507ebb686b48f94b9f7a8380c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
r"""
Verify that we re-run LaTeX after changing a nested \input. This
checks that recursive implicit dependencies are found correctly.
"""
import TestSCons
test = TestSCons.TestSCons()
pdflatex = test.where_is('pdflatex')
if not pdflatex:
test.skip_test("Could not find pdflatex; skipping test(s).\n")
test.write(['SConstruct'], """\
env = Environment(tools=['pdftex', 'tex'])
env.PDF('master.tex')
""")
test.write(['master.tex'], r"""
\documentclass{article}
\begin{document}
\input{sub1}
\end{document}
""")
test.write(['sub1.tex'], r"""
\input{sub2}
""")
test.write(['sub2.tex'], r"""
Sub-document 2 content
""")
test.run()
pdf_output_1 = test.read('master.pdf')
# Change sub2.tex, see if master.pdf is changed
test.write(['sub2.tex'], r"""
Sub-document 2 content -- updated
""")
test.run()
pdf_output_2 = test.read('master.pdf')
# If the PDF file is the same as it was previously, then it didn't
# pick up the change in sub2.tex, so fail.
test.fail_test(pdf_output_1 == pdf_output_2)
# Double-check: clean everything and rebuild from scratch, which
# should force the PDF file to be the 1982 version.
test.run(arguments='-c')
test.run()
pdf_output_3 = test.read('master.pdf')
# If the PDF file is now different than the second run, modulo the
# creation timestamp and the ID and some other PDF garp, then something
# else odd has happened, so fail.
pdf_output_2 = test.normalize_pdf(pdf_output_2)
pdf_output_3 = test.normalize_pdf(pdf_output_3)
if pdf_output_2 != pdf_output_3:
import sys
test.write('master.normalized.2.pdf', pdf_output_2)
test.write('master.normalized.3.pdf', pdf_output_3)
sys.stdout.write("***** 2 and 3 are different!\n")
sys.stdout.write(test.diff_substr(pdf_output_2, pdf_output_3, 80, 80)
+ '\n')
sys.stdout.write("Output from run 2:\n")
sys.stdout.write(test.stdout(-2) + '\n')
sys.stdout.write("Output from run 3:\n")
sys.stdout.write(test.stdout() + '\n')
sys.stdout.flush()
test.fail_test()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 29 | 73 | 0.724138 |
e8af0355f4d22c9ca0a8fb43252d8c9acac1da7d | 13,735 | py | Python | airflow/providers/microsoft/azure/hooks/azure_batch.py | yang040840219/airflow | c73d9e09cc650374e975ba287e9ed8ea59cde51a | [
"Apache-2.0"
] | 2 | 2021-07-30T17:25:56.000Z | 2021-08-03T13:51:09.000Z | airflow/providers/microsoft/azure/hooks/azure_batch.py | yang040840219/airflow | c73d9e09cc650374e975ba287e9ed8ea59cde51a | [
"Apache-2.0"
] | null | null | null | airflow/providers/microsoft/azure/hooks/azure_batch.py | yang040840219/airflow | c73d9e09cc650374e975ba287e9ed8ea59cde51a | [
"Apache-2.0"
] | 1 | 2020-11-06T01:26:29.000Z | 2020-11-06T01:26:29.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
from datetime import timedelta
from typing import Optional, Set
from azure.batch import BatchServiceClient, batch_auth, models as batch_models
from azure.batch.models import PoolAddParameter, JobAddParameter, TaskAddParameter
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.models import Connection
from airflow.utils import timezone
class AzureBatchHook(BaseHook):
"""
Hook for Azure Batch APIs
"""
def __init__(self, azure_batch_conn_id: str = 'azure_batch_default') -> None:
super().__init__()
self.conn_id = azure_batch_conn_id
self.connection = self.get_conn()
self.extra = self._connection().extra_dejson
def _connection(self) -> Connection:
"""
Get connected to azure batch service
"""
conn = self.get_connection(self.conn_id)
return conn
def get_conn(self):
"""
Get the batch client connection
:return: Azure batch client
"""
conn = self._connection()
def _get_required_param(name):
"""Extract required parameter from extra JSON, raise exception if not found"""
value = conn.extra_dejson.get(name)
if not value:
raise AirflowException(
'Extra connection option is missing required parameter: `{}`'.format(name)
)
return value
batch_account_name = _get_required_param('account_name')
batch_account_key = _get_required_param('account_key')
batch_account_url = _get_required_param('account_url')
credentials = batch_auth.SharedKeyCredentials(batch_account_name, batch_account_key)
batch_client = BatchServiceClient(credentials, batch_url=batch_account_url)
return batch_client
def configure_pool(
self,
pool_id: str,
vm_size: str,
vm_publisher: str,
vm_offer: str,
sku_starts_with: str,
display_name: Optional[str] = None,
target_dedicated_nodes: Optional[int] = None,
use_latest_image_and_sku: bool = False,
**kwargs,
) -> PoolAddParameter:
"""
Configures a pool
:param pool_id: A string that uniquely identifies the Pool within the Account
:type pool_id: str
:param vm_size: The size of virtual machines in the Pool.
:type vm_size: str
:param display_name: The display name for the Pool
:type display_name: str
:param target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool.
:type target_dedicated_nodes: Optional[int]
:param use_latest_image_and_sku: Whether to use the latest verified vm image and sku
:type use_latest_image_and_sku: bool
:param vm_publisher: The publisher of the Azure Virtual Machines Marketplace Image.
For example, Canonical or MicrosoftWindowsServer.
:type vm_publisher: Optional[str]
:param vm_offer: The offer type of the Azure Virtual Machines Marketplace Image.
For example, UbuntuServer or WindowsServer.
:type vm_offer: Optional[str]
:param sku_starts_with: The start name of the sku to search
:type sku_starts_with: Optional[str]
"""
if use_latest_image_and_sku:
self.log.info('Using latest verified virtual machine image with node agent sku')
sku_to_use, image_ref_to_use = self._get_latest_verified_image_vm_and_sku(
publisher=vm_publisher, offer=vm_offer, sku_starts_with=sku_starts_with
)
pool = batch_models.PoolAddParameter(
id=pool_id,
vm_size=vm_size,
display_name=display_name,
virtual_machine_configuration=batch_models.VirtualMachineConfiguration(
image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use
),
target_dedicated_nodes=target_dedicated_nodes,
**kwargs,
)
elif self.extra.get('os_family'):
self.log.info(
'Using cloud service configuration to create pool, ' 'virtual machine configuration ignored'
)
pool = batch_models.PoolAddParameter(
id=pool_id,
vm_size=vm_size,
display_name=display_name,
cloud_service_configuration=batch_models.CloudServiceConfiguration(
os_family=self.extra.get('os_family'), os_version=self.extra.get('os_version')
),
target_dedicated_nodes=target_dedicated_nodes,
**kwargs,
)
else:
self.log.info('Using virtual machine configuration to create a pool')
pool = batch_models.PoolAddParameter(
id=pool_id,
vm_size=vm_size,
display_name=display_name,
virtual_machine_configuration=batch_models.VirtualMachineConfiguration(
image_reference=batch_models.ImageReference(
publisher=self.extra.get('vm_publisher'),
offer=self.extra.get('vm_offer'),
sku=self.extra.get('vm_sku'),
version=self.extra.get("vm_version"),
),
node_agent_sku_id=self.extra.get('node_agent_sku_id'),
),
target_dedicated_nodes=target_dedicated_nodes,
**kwargs,
)
return pool
def create_pool(self, pool: PoolAddParameter) -> None:
"""
Creates a pool if not already existing
:param pool: the pool object to create
:type pool: batch_models.PoolAddParameter
"""
try:
self.log.info("Attempting to create a pool: %s", pool.id)
self.connection.pool.add(pool)
self.log.info("Created pool: %s", pool.id)
except batch_models.BatchErrorException as e:
if e.error.code != "PoolExists":
raise
else:
self.log.info("Pool %s already exists", pool.id)
def _get_latest_verified_image_vm_and_sku(
self,
publisher: str,
offer: str,
sku_starts_with: str,
) -> tuple:
"""
Get latest verified image vm and sku
:param publisher: The publisher of the Azure Virtual Machines Marketplace Image.
For example, Canonical or MicrosoftWindowsServer.
:type publisher: str
:param offer: The offer type of the Azure Virtual Machines Marketplace Image.
For example, UbuntuServer or WindowsServer.
:type offer: str
:param sku_starts_with: The start name of the sku to search
:type sku_starts_with: str
"""
options = batch_models.AccountListSupportedImagesOptions(filter="verificationType eq 'verified'")
images = self.connection.account.list_supported_images(account_list_supported_images_options=options)
# pick the latest supported sku
skus_to_use = [
(image.node_agent_sku_id, image.image_reference)
for image in images
if image.image_reference.publisher.lower() == publisher.lower()
and image.image_reference.offer.lower() == offer.lower()
and image.image_reference.sku.startswith(sku_starts_with)
]
# pick first
agent_sku_id, image_ref_to_use = skus_to_use[0]
return agent_sku_id, image_ref_to_use
def wait_for_all_node_state(self, pool_id: str, node_state: Set) -> list:
"""
Wait for all nodes in a pool to reach given states
:param pool_id: A string that identifies the pool
:type pool_id: str
:param node_state: A set of batch_models.ComputeNodeState
:type node_state: set
"""
self.log.info('waiting for all nodes in pool %s to reach one of: %s', pool_id, node_state)
while True:
# refresh pool to ensure that there is no resize error
pool = self.connection.pool.get(pool_id)
if pool.resize_errors is not None:
resize_errors = "\n".join([repr(e) for e in pool.resize_errors])
raise RuntimeError('resize error encountered for pool {}:\n{}'.format(pool.id, resize_errors))
nodes = list(self.connection.compute_node.list(pool.id))
if len(nodes) >= pool.target_dedicated_nodes and all(node.state in node_state for node in nodes):
return nodes
# Allow the timeout to be controlled by the AzureBatchOperator
# specified timeout. This way we don't interrupt a startTask inside
# the pool
time.sleep(10)
def configure_job(
self,
job_id: str,
pool_id: str,
display_name: Optional[str] = None,
**kwargs,
) -> JobAddParameter:
"""
Configures a job for use in the pool
:param job_id: A string that uniquely identifies the job within the account
:type job_id: str
:param pool_id: A string that identifies the pool
:type pool_id: str
:param display_name: The display name for the job
:type display_name: str
"""
job = batch_models.JobAddParameter(
id=job_id,
pool_info=batch_models.PoolInformation(pool_id=pool_id),
display_name=display_name,
**kwargs,
)
return job
def create_job(self, job: JobAddParameter) -> None:
"""
Creates a job in the pool
:param job: The job object to create
:type job: batch_models.JobAddParameter
"""
try:
self.connection.job.add(job)
self.log.info("Job %s created", job.id)
except batch_models.BatchErrorException as err:
if err.error.code != "JobExists":
raise
else:
self.log.info("Job %s already exists", job.id)
def configure_task(
self,
task_id: str,
command_line: str,
display_name: Optional[str] = None,
container_settings=None,
**kwargs,
) -> TaskAddParameter:
"""
Creates a task
:param task_id: A string that identifies the task to create
:type task_id: str
:param command_line: The command line of the Task.
:type command_line: str
:param display_name: A display name for the Task
:type display_name: str
:param container_settings: The settings for the container under which the Task runs.
If the Pool that will run this Task has containerConfiguration set,
this must be set as well. If the Pool that will run this Task doesn't have
containerConfiguration set, this must not be set.
:type container_settings: batch_models.TaskContainerSettings
"""
task = batch_models.TaskAddParameter(
id=task_id,
command_line=command_line,
display_name=display_name,
container_settings=container_settings,
**kwargs,
)
self.log.info("Task created: %s", task_id)
return task
def add_single_task_to_job(self, job_id: str, task: TaskAddParameter) -> None:
"""
Add a single task to given job if it doesn't exist
:param job_id: A string that identifies the given job
:type job_id: str
:param task: The task to add
:type task: batch_models.TaskAddParameter
"""
try:
self.connection.task.add(job_id=job_id, task=task)
except batch_models.BatchErrorException as err:
if err.error.code != "TaskExists":
raise
else:
self.log.info("Task %s already exists", task.id)
def wait_for_job_tasks_to_complete(self, job_id: str, timeout: int) -> None:
"""
Wait for tasks in a particular job to complete
:param job_id: A string that identifies the job
:type job_id: str
:param timeout: The amount of time to wait before timing out in minutes
:type timeout: int
"""
timeout_time = timezone.utcnow() + timedelta(minutes=timeout)
while timezone.utcnow() < timeout_time:
tasks = self.connection.task.list(job_id)
incomplete_tasks = [task for task in tasks if task.state != batch_models.TaskState.completed]
if not incomplete_tasks:
return
for task in incomplete_tasks:
self.log.info("Waiting for %s to complete, currently on %s state", task.id, task.state)
time.sleep(15)
raise TimeoutError("Timed out waiting for tasks to complete")
| 38.909348 | 110 | 0.627084 |
6d18259601f97fa73f920f9db7283b9cfe938ecd | 602 | py | Python | scripts/jginames_to_tab.py | stajichlab/Chytrid_Virus | bab9353e12ba0afe3f1726fd50187640e74d17cc | [
"MIT"
] | null | null | null | scripts/jginames_to_tab.py | stajichlab/Chytrid_Virus | bab9353e12ba0afe3f1726fd50187640e74d17cc | [
"MIT"
] | null | null | null | scripts/jginames_to_tab.py | stajichlab/Chytrid_Virus | bab9353e12ba0afe3f1726fd50187640e74d17cc | [
"MIT"
] | 1 | 2020-04-21T17:08:19.000Z | 2020-04-21T17:08:19.000Z | #!/usr/bin/env python3
import csv,re,os
topdir="lib"
outfile="lib/jgi_names.tab"
with open(outfile,"w") as ofh:
writer = csv.writer(ofh,delimiter="\t",lineterminator='\n')
for infile in os.listdir(topdir):
if not infile.endswith("_names.csv"):
continue
with open(os.path.join(topdir,infile),"r") as ifh:
reader = csv.reader(ifh,delimiter=",")
for line in reader:
name=re.sub(" ","_",line[1])
name=re.sub(";","",name)
name=re.sub("\r","",name)
writer.writerow([line[2],name])
| 30.1 | 63 | 0.546512 |
fcd72762a607b8aa53702fcea7a546913a163ebb | 4,076 | py | Python | kindle/books.py | szarroug3/X-Ray-Creator-2 | 61b3ff772794c13c31f91cabf299edd4264f803d | [
"MIT"
] | 2 | 2016-06-06T23:52:21.000Z | 2020-02-03T20:32:04.000Z | kindle/books.py | szarroug3/X-Ray-Creator-2 | 61b3ff772794c13c31f91cabf299edd4264f803d | [
"MIT"
] | null | null | null | kindle/books.py | szarroug3/X-Ray-Creator-2 | 61b3ff772794c13c31f91cabf299edd4264f803d | [
"MIT"
] | null | null | null | # books.py
import ctypes
import os
from mobibook import MobiBook
from customexceptions import *
# Drive types
DRIVE_UNKNOWN = 0 # The drive type cannot be determined.
DRIVE_NO_ROOT_DIR = 1 # The root path is invalbookID; for example, there is no volume mounted at the specified path.
DRIVE_REMOVABLE = 2 # The drive has removable media; for example, a floppy drive, thumb drive, or flash card reader.
DRIVE_FIXED = 3 # The drive has fixed media; for example, a hard disk drive or flash drive.
DRIVE_REMOTE = 4 # The drive is a remote (network) drive.
DRIVE_CDROM = 5 # The drive is a CD-ROM drive.
DRIVE_RAMDISK = 6 # The drive is a RAM disk.
books_updated = []
books_skipped = []
class Books(object):
def __init__(self):
self.FindKindle()
self.GetBooks()
def __iter__(self):
for book in self.books:
yield book
def __str__(self):
string = ''
for book in self.books:
string += str(book) + '\n'
return string[:-1]
def __len__(self):
return len(self.books)
@property
def kindleDrive(self):
return self._kindleDrive
@kindleDrive.setter
def kindleDrive(self, value):
self._kindleDrive = value
@property
def books(self):
return self._books
@books.setter
def books(self, value):
self._books = value
# Return drive letter of kindle if found or None if not found
def FindKindle(self):
print 'Checking for kindle...'
drive_info = self.GetDriveInfo()
removable_drives = [drive_letter for drive_letter, drive_type in drive_info if drive_type == DRIVE_REMOVABLE]
for drive in removable_drives:
for dirName, subDirList, fileList in os.walk(drive):
if dirName == drive + 'system\.mrch':
for fName in fileList:
if 'amzn1_account' in fName:
print 'Kindle found!'
self.kindleDrive = drive
return
raise KindleNotFound('Please make sure kindle is plugged in.')
# Return list of tuples mapping drive letters to drive types
def GetDriveInfo(self):
result = []
bitmask = ctypes.windll.kernel32.GetLogicalDrives()
for i in range(26):
bit = 2 ** i
if bit & bitmask:
drive_letter = '%s:' % chr(65 + i)
drive_type = ctypes.windll.kernel32.GetDriveTypeA('%s\\' % drive_letter)
result.append((drive_letter, drive_type))
return result
# Get list of books
def GetBooks(self):
books_directory = os.path.join(self.kindleDrive, 'documents')
self.books = []
index = 0
print 'Searching for books...'
for dirName, subDirList, fileList in os.walk(books_directory):
for fName in fileList:
if '.mobi' in fName:
index += 1
self.books.append(MobiBook(os.path.join(dirName,fName)))
print '%i books found' % index
print 'Get metadata for books...'
for book in self.books:
book.GetBookConfig()
self.books.sort(key=lambda x:x.bookNameAndAuthor)
print 'Done getting metadata'
print
def PrintListOfBooks(self):
for bookNum, book in enumerate(self.books, 1):
print '%i. %s' % (bookNum, book.bookNameAndAuthor)
print
def RemoveBooksWithXray(self):
for book in self.books:
if book.xrayExists:
self.books.remove(book)
def GetBooksToUpdate(self):
booksToUpdate = []
for book in self.books:
if book.update:
booksToUpdate.append(book)
return booksToUpdate
def GetBookByASIN(self, ASIN, onlyCheckUpdated=True):
for book in self.books:
if (onlyCheckUpdated and book.update) or not onlyCheckUpdated:
if book.ASIN == ASIN:
return book | 33.409836 | 119 | 0.593229 |
ba64d996f8983ea4afd752da2f561ebebaa06444 | 57 | py | Python | Examples/Misc/Resources/calc.py | OldSecureIQLab/MyWarez | c0c3bcbd1d206ff6fff7efb2c798538d79fbe87f | [
"MIT"
] | 1 | 2021-07-07T04:08:39.000Z | 2021-07-07T04:08:39.000Z | Examples/Misc/Resources/calc.py | OldSecureIQLab/MyWarez | c0c3bcbd1d206ff6fff7efb2c798538d79fbe87f | [
"MIT"
] | null | null | null | Examples/Misc/Resources/calc.py | OldSecureIQLab/MyWarez | c0c3bcbd1d206ff6fff7efb2c798538d79fbe87f | [
"MIT"
] | 1 | 2021-01-28T16:36:34.000Z | 2021-01-28T16:36:34.000Z | import os
os.system("c:\\windows\\system32\\calc.exe");
| 14.25 | 45 | 0.684211 |
9bfe391f05d96dd62c3b06723c0635db819f8562 | 15,091 | py | Python | help.py | bsoyka/sunset-bot | ea05000e52e1883ddba77ab754e5f733c8b3375c | [
"MIT"
] | 1 | 2021-06-21T16:58:48.000Z | 2021-06-21T16:58:48.000Z | help.py | bsoyka/sunset-bot | ea05000e52e1883ddba77ab754e5f733c8b3375c | [
"MIT"
] | 4 | 2021-08-13T16:52:51.000Z | 2021-09-01T13:05:42.000Z | help.py | sunset-vacation/bot | ea05000e52e1883ddba77ab754e5f733c8b3375c | [
"MIT"
] | 4 | 2021-06-21T22:16:12.000Z | 2021-08-11T21:01:19.000Z | import itertools
from collections import namedtuple
from contextlib import suppress
from typing import List, Union
from discord import Color, Embed
from discord.ext import commands
from fuzzywuzzy import fuzz, process
from fuzzywuzzy.utils import full_process
from config import CONFIG
from pagination import LinePaginator
COMMANDS_PER_PAGE = 8
PREFIX = CONFIG.bot.prefix
NOT_ALLOWED_TO_RUN_MESSAGE = '***You cannot run this command.***\n\n'
Category = namedtuple('Category', ['name', 'description', 'cogs'])
class HelpQueryNotFound(ValueError):
"""
Raised when a HelpSession Query doesn't match a command or cog.
Contains the custom attribute of ``possible_matches``.
Instances of this object contain a dictionary of any command(s) that
were close to matching the query, where keys are the possible
matched command names and values are the likeness match scores.
"""
def __init__(self, arg: str, possible_matches: dict = None):
super().__init__(arg)
self.possible_matches = possible_matches
class CustomHelpCommand(commands.HelpCommand):
"""
An interactive instance for the bot help command.
Cogs can be grouped into custom categories. All cogs with the same
category will be displayed under a single category name in the help
output. Custom categories are defined inside the cogs as a class
attribute named `category`. A description can also be specified with
the attribute `category_description`. If a description is not found
in at least one cog, the default will be the regular description
(class docstring) of the first cog found in the category.
"""
def __init__(self):
super().__init__(command_attrs={'help': 'Shows help for bot commands'})
async def command_callback(
self, ctx: commands.Context, *, command: str = None
) -> None:
"""Attempts to match the provided query with a valid command or cog."""
# the only reason we need to tamper with this is because d.py does not support "categories",
# so we need to deal with them ourselves.
bot = ctx.bot
if command is None:
# quick and easy, send bot help if command is none
mapping = self.get_bot_mapping()
await self.send_bot_help(mapping)
return
cog_matches = []
description = None
for cog in bot.cogs.values():
if hasattr(cog, 'category') and cog.category == command:
cog_matches.append(cog)
if hasattr(cog, 'category_description'):
description = cog.category_description
if cog_matches:
category = Category(
name=command, description=description, cogs=cog_matches
)
await self.send_category_help(category)
return
# it's either a cog, group, command or subcommand; let the parent class deal with it
await super().command_callback(ctx, command=command)
async def get_all_help_choices(self) -> set:
"""
Get all the possible options for getting help in the bot.
This will only display commands the author has permission to run.
These include:
- Category names
- Cog names
- Group command names (and aliases)
- Command names (and aliases)
- Subcommand names (with parent group and aliases for
subcommand, but not including aliases for group)
Options and choices are case sensitive.
"""
# first get all commands including subcommands and full command name aliases
choices = set()
for command in await self.filter_commands(
self.context.bot.walk_commands()
):
# the the command or group name
choices.add(str(command))
if isinstance(command, commands.Command):
# all aliases if it's just a command
choices.update(command.aliases)
else:
# otherwise we need to add the parent name in
choices.update(
f'{command.full_parent_name} {alias}'
for alias in command.aliases
)
# all cog names
choices.update(self.context.bot.cogs)
# all category names
choices.update(
cog.category
for cog in self.context.bot.cogs.values()
if hasattr(cog, 'category')
)
return choices
async def command_not_found(self, string: str) -> 'HelpQueryNotFound':
"""
Handles when a query does not match a valid command, group, cog
or category.
Will return an instance of the `HelpQueryNotFound` exception
with the error message and possible matches.
"""
choices = await self.get_all_help_choices()
# Run fuzzywuzzy's processor beforehand, and avoid matching if processed string is empty
# This avoids fuzzywuzzy from raising a warning on inputs with only non-alphanumeric characters
processed = full_process(string)
if processed:
result = process.extractBests(
processed,
choices,
scorer=fuzz.ratio,
score_cutoff=60,
processor=None,
)
else:
result = []
return HelpQueryNotFound(f'Query "{string}" not found.', dict(result))
async def subcommand_not_found(
self, command: commands.Command, string: str
) -> 'HelpQueryNotFound':
"""
Redirects the error to `command_not_found`.
`command_not_found` deals with searching and getting best
choices for both commands and subcommands.
"""
return await self.command_not_found(
f'{command.qualified_name} {string}'
)
async def send_error_message(self, error: HelpQueryNotFound) -> None:
"""Send the error message to the channel."""
embed = Embed(colour=Color.red(), title=str(error))
if getattr(error, 'possible_matches', None):
matches = '\n'.join(
f'`{match}`' for match in error.possible_matches
)
embed.description = f'**Did you mean:**\n{matches}'
await self.context.send(embed=embed)
async def command_formatting(self, command: commands.Command) -> Embed:
"""
Takes a command and turns it into an embed.
It will add an author, command signature + help, aliases and a
note if the user can't run the command.
"""
embed = Embed()
embed.set_author(name='Command Help')
parent = command.full_parent_name
name = str(command) if not parent else f'{parent} {command.name}'
command_details = f'**```{PREFIX}{name} {command.signature}```**\n'
# show command aliases
aliases = [
f'`{alias}`' if not parent else f'`{parent} {alias}`'
for alias in command.aliases
]
aliases += [
f'`{alias}`' for alias in getattr(command, 'root_aliases', ())
]
aliases = ', '.join(sorted(aliases))
if aliases:
command_details += f'**Can also use:** {aliases}\n\n'
# when command is disabled, show message about it,
# when other CommandError or user is not allowed to run command,
# add this to help message.
try:
if not await command.can_run(self.context):
command_details += NOT_ALLOWED_TO_RUN_MESSAGE
except commands.DisabledCommand:
command_details += '***This command is disabled.***\n\n'
except commands.CommandError:
command_details += NOT_ALLOWED_TO_RUN_MESSAGE
command_details += f"*{command.help or 'No details provided.'}*\n"
embed.description = command_details
return embed
async def send_command_help(self, command: commands.Command) -> None:
"""Send help for a single command."""
embed = await self.command_formatting(command)
await self.context.send(embed=embed)
@staticmethod
def get_commands_brief_details(
commands_: List[commands.Command], return_as_list: bool = False
) -> Union[List[str], str]:
"""
Formats the prefix, command name and signature, and short doc
for an iterable of commands.
return_as_list is helpful for passing these command details into
the paginator as a list of command details.
"""
details = []
for command in commands_:
signature = f' {command.signature}' if command.signature else ''
details.append(
f"\n**`{PREFIX}{command.qualified_name}{signature}`**\n*{command.short_doc or 'No details provided'}*"
)
if return_as_list:
return details
else:
return ''.join(details)
async def send_group_help(self, group: commands.Group) -> None:
"""Sends help for a group command."""
subcommands = group.commands
if len(subcommands) == 0:
# no subcommands, just treat it like a regular command
await self.send_command_help(group)
return
# remove commands that the user can't run and are hidden, and sort by name
commands_ = await self.filter_commands(subcommands, sort=True)
embed = await self.command_formatting(group)
command_details = self.get_commands_brief_details(commands_)
if command_details:
embed.description += f'\n**Subcommands:**\n{command_details}'
await self.context.send(embed=embed)
async def send_cog_help(self, cog: commands.Cog) -> None:
"""Send help for a cog."""
# sort commands by name, and remove any the user can't run or are hidden.
commands_ = await self.filter_commands(cog.get_commands(), sort=True)
embed = Embed()
embed.set_author(name='Command Help')
embed.description = f'**{cog.qualified_name}**\n*{cog.description}*'
command_details = self.get_commands_brief_details(commands_)
if command_details:
embed.description += f'\n\n**Commands:**\n{command_details}'
await self.context.send(embed=embed)
@staticmethod
def _category_key(command: commands.Command) -> str:
"""
Returns a cog name of a given command for use as a key for
`sorted` and `groupby`.
A zero width space is used as a prefix for results with no cogs
to force them last in ordering.
"""
if not command.cog:
return '**\u200bNo Category:**'
with suppress(AttributeError):
if command.cog.category:
return f'**{command.cog.category}**'
return f'**{command.cog_name}**'
async def send_category_help(self, category: Category) -> None:
"""
Sends help for a bot category.
This sends a brief help for all commands in all cogs registered
to the category.
"""
embed = Embed()
embed.set_author(name='Command Help')
all_commands = []
for cog in category.cogs:
all_commands.extend(cog.get_commands())
filtered_commands = await self.filter_commands(all_commands, sort=True)
command_detail_lines = self.get_commands_brief_details(
filtered_commands, return_as_list=True
)
description = f'**{category.name}**\n*{category.description}*'
if command_detail_lines:
description += '\n\n**Commands:**'
await LinePaginator.paginate(
command_detail_lines,
self.context,
embed,
prefix=description,
max_lines=COMMANDS_PER_PAGE,
max_size=2000,
)
async def send_bot_help(self, mapping: dict) -> None:
"""Sends help for all bot commands and cogs."""
bot = self.context.bot
embed = Embed()
embed.set_author(name='Command Help')
filter_commands = await self.filter_commands(
bot.commands, sort=True, key=self._category_key
)
cog_or_category_pages = []
for cog_or_category, _commands in itertools.groupby(
filter_commands, key=self._category_key
):
sorted_commands = sorted(_commands, key=lambda c: c.name)
if len(sorted_commands) == 0:
continue
command_detail_lines = self.get_commands_brief_details(
sorted_commands, return_as_list=True
)
# Split cogs or categories which have too many commands to fit in one page.
# The length of commands is included for later use when aggregating into pages for the paginator.
for index in range(0, len(sorted_commands), COMMANDS_PER_PAGE):
truncated_lines = command_detail_lines[
index : index + COMMANDS_PER_PAGE
]
joined_lines = ''.join(truncated_lines)
cog_or_category_pages.append(
(
f'**{cog_or_category}**{joined_lines}',
len(truncated_lines),
)
)
pages = []
counter = 0
page = ''
for page_details, length in cog_or_category_pages:
counter += length
if counter > COMMANDS_PER_PAGE:
# force a new page on paginator even if it falls short of the max pages
# since we still want to group categories/cogs.
counter = length
pages.append(page)
page = f'{page_details}\n\n'
else:
page += f'{page_details}\n\n'
if page:
# add any remaining command help that didn't get added in the last iteration above.
pages.append(page)
await LinePaginator.paginate(
pages, self.context, embed=embed, max_lines=1, max_size=2000
)
class HelpCog(commands.Cog, name='Help'):
"""Custom Embed Pagination Help feature."""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.old_help_command = bot.help_command
bot.help_command = CustomHelpCommand()
bot.help_command.cog = self
def cog_unload(self) -> None:
"""Reset the help command when the cog is unloaded."""
self.bot.help_command = self.old_help_command
def setup(bot):
bot.add_cog(HelpCog(bot))
| 35.177156 | 119 | 0.59393 |
4e6c26cd5916f0cce02f51cd3baf3ba5302515ab | 34,404 | py | Python | Lib/test/test_importlib/test_abc.py | zsilver1/RustPython | 820337e6c31d2ffcdd341dd238c381ed6f3c23e8 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | Lib/test/test_importlib/test_abc.py | zsilver1/RustPython | 820337e6c31d2ffcdd341dd238c381ed6f3c23e8 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | Lib/test/test_importlib/test_abc.py | zsilver1/RustPython | 820337e6c31d2ffcdd341dd238c381ed6f3c23e8 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | import io
import marshal
import os
import sys
from test import support
import types
import unittest
from unittest import mock
import warnings
from . import util as test_util
init = test_util.import_importlib('importlib')
abc = test_util.import_importlib('importlib.abc')
machinery = test_util.import_importlib('importlib.machinery')
util = test_util.import_importlib('importlib.util')
##### Inheritance ##############################################################
class InheritanceTests:
"""Test that the specified class is a subclass/superclass of the expected
classes."""
subclasses = []
superclasses = []
def setUp(self):
self.superclasses = [getattr(self.abc, class_name)
for class_name in self.superclass_names]
if hasattr(self, 'subclass_names'):
# Because test.support.import_fresh_module() creates a new
# importlib._bootstrap per module, inheritance checks fail when
# checking across module boundaries (i.e. the _bootstrap in abc is
# not the same as the one in machinery). That means stealing one of
# the modules from the other to make sure the same instance is used.
machinery = self.abc.machinery
self.subclasses = [getattr(machinery, class_name)
for class_name in self.subclass_names]
assert self.subclasses or self.superclasses, self.__class__
self.__test = getattr(self.abc, self._NAME)
def test_subclasses(self):
# Test that the expected subclasses inherit.
for subclass in self.subclasses:
self.assertTrue(issubclass(subclass, self.__test),
"{0} is not a subclass of {1}".format(subclass, self.__test))
def test_superclasses(self):
# Test that the class inherits from the expected superclasses.
for superclass in self.superclasses:
self.assertTrue(issubclass(self.__test, superclass),
"{0} is not a superclass of {1}".format(superclass, self.__test))
class MetaPathFinder(InheritanceTests):
superclass_names = ['Finder']
subclass_names = ['BuiltinImporter', 'FrozenImporter', 'PathFinder',
'WindowsRegistryFinder']
(Frozen_MetaPathFinderInheritanceTests,
Source_MetaPathFinderInheritanceTests
) = test_util.test_both(MetaPathFinder, abc=abc)
class PathEntryFinder(InheritanceTests):
superclass_names = ['Finder']
subclass_names = ['FileFinder']
(Frozen_PathEntryFinderInheritanceTests,
Source_PathEntryFinderInheritanceTests
) = test_util.test_both(PathEntryFinder, abc=abc)
class ResourceLoader(InheritanceTests):
superclass_names = ['Loader']
(Frozen_ResourceLoaderInheritanceTests,
Source_ResourceLoaderInheritanceTests
) = test_util.test_both(ResourceLoader, abc=abc)
class InspectLoader(InheritanceTests):
superclass_names = ['Loader']
subclass_names = ['BuiltinImporter', 'FrozenImporter', 'ExtensionFileLoader']
(Frozen_InspectLoaderInheritanceTests,
Source_InspectLoaderInheritanceTests
) = test_util.test_both(InspectLoader, abc=abc)
class ExecutionLoader(InheritanceTests):
superclass_names = ['InspectLoader']
subclass_names = ['ExtensionFileLoader']
(Frozen_ExecutionLoaderInheritanceTests,
Source_ExecutionLoaderInheritanceTests
) = test_util.test_both(ExecutionLoader, abc=abc)
class FileLoader(InheritanceTests):
superclass_names = ['ResourceLoader', 'ExecutionLoader']
subclass_names = ['SourceFileLoader', 'SourcelessFileLoader']
(Frozen_FileLoaderInheritanceTests,
Source_FileLoaderInheritanceTests
) = test_util.test_both(FileLoader, abc=abc)
class SourceLoader(InheritanceTests):
superclass_names = ['ResourceLoader', 'ExecutionLoader']
subclass_names = ['SourceFileLoader']
(Frozen_SourceLoaderInheritanceTests,
Source_SourceLoaderInheritanceTests
) = test_util.test_both(SourceLoader, abc=abc)
##### Default return values ####################################################
def make_abc_subclasses(base_class, name=None, inst=False, **kwargs):
if name is None:
name = base_class.__name__
base = {kind: getattr(splitabc, name)
for kind, splitabc in abc.items()}
return {cls._KIND: cls() if inst else cls
for cls in test_util.split_frozen(base_class, base, **kwargs)}
class ABCTestHarness:
@property
def ins(self):
# Lazily set ins on the class.
cls = self.SPLIT[self._KIND]
ins = cls()
self.__class__.ins = ins
return ins
class MetaPathFinder:
def find_module(self, fullname, path):
return super().find_module(fullname, path)
class MetaPathFinderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(MetaPathFinder)
def test_find_module(self):
# Default should return None.
with self.assertWarns(DeprecationWarning):
found = self.ins.find_module('something', None)
self.assertIsNone(found)
def test_invalidate_caches(self):
# Calling the method is a no-op.
self.ins.invalidate_caches()
(Frozen_MPFDefaultTests,
Source_MPFDefaultTests
) = test_util.test_both(MetaPathFinderDefaultsTests)
class PathEntryFinder:
def find_loader(self, fullname):
return super().find_loader(fullname)
class PathEntryFinderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(PathEntryFinder)
def test_find_loader(self):
with self.assertWarns(DeprecationWarning):
found = self.ins.find_loader('something')
self.assertEqual(found, (None, []))
def find_module(self):
self.assertEqual(None, self.ins.find_module('something'))
def test_invalidate_caches(self):
# Should be a no-op.
self.ins.invalidate_caches()
(Frozen_PEFDefaultTests,
Source_PEFDefaultTests
) = test_util.test_both(PathEntryFinderDefaultsTests)
class Loader:
def load_module(self, fullname):
return super().load_module(fullname)
class LoaderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(Loader)
def test_create_module(self):
spec = 'a spec'
self.assertIsNone(self.ins.create_module(spec))
def test_load_module(self):
with self.assertRaises(ImportError):
self.ins.load_module('something')
def test_module_repr(self):
mod = types.ModuleType('blah')
with self.assertRaises(NotImplementedError):
self.ins.module_repr(mod)
original_repr = repr(mod)
mod.__loader__ = self.ins
# Should still return a proper repr.
self.assertTrue(repr(mod))
(Frozen_LDefaultTests,
SourceLDefaultTests
) = test_util.test_both(LoaderDefaultsTests)
class ResourceLoader(Loader):
def get_data(self, path):
return super().get_data(path)
class ResourceLoaderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(ResourceLoader)
def test_get_data(self):
with self.assertRaises(IOError):
self.ins.get_data('/some/path')
(Frozen_RLDefaultTests,
Source_RLDefaultTests
) = test_util.test_both(ResourceLoaderDefaultsTests)
class InspectLoader(Loader):
def is_package(self, fullname):
return super().is_package(fullname)
def get_source(self, fullname):
return super().get_source(fullname)
SPLIT_IL = make_abc_subclasses(InspectLoader)
class InspectLoaderDefaultsTests(ABCTestHarness):
SPLIT = SPLIT_IL
def test_is_package(self):
with self.assertRaises(ImportError):
self.ins.is_package('blah')
def test_get_source(self):
with self.assertRaises(ImportError):
self.ins.get_source('blah')
(Frozen_ILDefaultTests,
Source_ILDefaultTests
) = test_util.test_both(InspectLoaderDefaultsTests)
class ExecutionLoader(InspectLoader):
def get_filename(self, fullname):
return super().get_filename(fullname)
SPLIT_EL = make_abc_subclasses(ExecutionLoader)
class ExecutionLoaderDefaultsTests(ABCTestHarness):
SPLIT = SPLIT_EL
def test_get_filename(self):
with self.assertRaises(ImportError):
self.ins.get_filename('blah')
(Frozen_ELDefaultTests,
Source_ELDefaultsTests
) = test_util.test_both(InspectLoaderDefaultsTests)
class ResourceReader:
def open_resource(self, *args, **kwargs):
return super().open_resource(*args, **kwargs)
def resource_path(self, *args, **kwargs):
return super().resource_path(*args, **kwargs)
def is_resource(self, *args, **kwargs):
return super().is_resource(*args, **kwargs)
def contents(self, *args, **kwargs):
return super().contents(*args, **kwargs)
class ResourceReaderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(ResourceReader)
def test_open_resource(self):
with self.assertRaises(FileNotFoundError):
self.ins.open_resource('dummy_file')
def test_resource_path(self):
with self.assertRaises(FileNotFoundError):
self.ins.resource_path('dummy_file')
def test_is_resource(self):
with self.assertRaises(FileNotFoundError):
self.ins.is_resource('dummy_file')
def test_contents(self):
self.assertEqual([], list(self.ins.contents()))
(Frozen_RRDefaultTests,
Source_RRDefaultsTests
) = test_util.test_both(ResourceReaderDefaultsTests)
##### MetaPathFinder concrete methods ##########################################
class MetaPathFinderFindModuleTests:
@classmethod
def finder(cls, spec):
class MetaPathSpecFinder(cls.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
self.called_for = fullname, path
return spec
return MetaPathSpecFinder()
def test_no_spec(self):
finder = self.finder(None)
path = ['a', 'b', 'c']
name = 'blah'
with self.assertWarns(DeprecationWarning):
found = finder.find_module(name, path)
self.assertIsNone(found)
self.assertEqual(name, finder.called_for[0])
self.assertEqual(path, finder.called_for[1])
def test_spec(self):
loader = object()
spec = self.util.spec_from_loader('blah', loader)
finder = self.finder(spec)
with self.assertWarns(DeprecationWarning):
found = finder.find_module('blah', None)
self.assertIs(found, spec.loader)
(Frozen_MPFFindModuleTests,
Source_MPFFindModuleTests
) = test_util.test_both(MetaPathFinderFindModuleTests, abc=abc, util=util)
##### PathEntryFinder concrete methods #########################################
class PathEntryFinderFindLoaderTests:
@classmethod
def finder(cls, spec):
class PathEntrySpecFinder(cls.abc.PathEntryFinder):
def find_spec(self, fullname, target=None):
self.called_for = fullname
return spec
return PathEntrySpecFinder()
def test_no_spec(self):
finder = self.finder(None)
name = 'blah'
with self.assertWarns(DeprecationWarning):
found = finder.find_loader(name)
self.assertIsNone(found[0])
self.assertEqual([], found[1])
self.assertEqual(name, finder.called_for)
def test_spec_with_loader(self):
loader = object()
spec = self.util.spec_from_loader('blah', loader)
finder = self.finder(spec)
with self.assertWarns(DeprecationWarning):
found = finder.find_loader('blah')
self.assertIs(found[0], spec.loader)
def test_spec_with_portions(self):
spec = self.machinery.ModuleSpec('blah', None)
paths = ['a', 'b', 'c']
spec.submodule_search_locations = paths
finder = self.finder(spec)
with self.assertWarns(DeprecationWarning):
found = finder.find_loader('blah')
self.assertIsNone(found[0])
self.assertEqual(paths, found[1])
(Frozen_PEFFindLoaderTests,
Source_PEFFindLoaderTests
) = test_util.test_both(PathEntryFinderFindLoaderTests, abc=abc, util=util,
machinery=machinery)
##### Loader concrete methods ##################################################
class LoaderLoadModuleTests:
def loader(self):
class SpecLoader(self.abc.Loader):
found = None
def exec_module(self, module):
self.found = module
def is_package(self, fullname):
"""Force some non-default module state to be set."""
return True
return SpecLoader()
def test_fresh(self):
loader = self.loader()
name = 'blah'
with test_util.uncache(name):
loader.load_module(name)
module = loader.found
self.assertIs(sys.modules[name], module)
self.assertEqual(loader, module.__loader__)
self.assertEqual(loader, module.__spec__.loader)
self.assertEqual(name, module.__name__)
self.assertEqual(name, module.__spec__.name)
self.assertIsNotNone(module.__path__)
self.assertIsNotNone(module.__path__,
module.__spec__.submodule_search_locations)
def test_reload(self):
name = 'blah'
loader = self.loader()
module = types.ModuleType(name)
module.__spec__ = self.util.spec_from_loader(name, loader)
module.__loader__ = loader
with test_util.uncache(name):
sys.modules[name] = module
loader.load_module(name)
found = loader.found
self.assertIs(found, sys.modules[name])
self.assertIs(module, sys.modules[name])
(Frozen_LoaderLoadModuleTests,
Source_LoaderLoadModuleTests
) = test_util.test_both(LoaderLoadModuleTests, abc=abc, util=util)
##### InspectLoader concrete methods ###########################################
class InspectLoaderSourceToCodeTests:
def source_to_module(self, data, path=None):
"""Help with source_to_code() tests."""
module = types.ModuleType('blah')
loader = self.InspectLoaderSubclass()
if path is None:
code = loader.source_to_code(data)
else:
code = loader.source_to_code(data, path)
exec(code, module.__dict__)
return module
def test_source_to_code_source(self):
# Since compile() can handle strings, so should source_to_code().
source = 'attr = 42'
module = self.source_to_module(source)
self.assertTrue(hasattr(module, 'attr'))
self.assertEqual(module.attr, 42)
def test_source_to_code_bytes(self):
# Since compile() can handle bytes, so should source_to_code().
source = b'attr = 42'
module = self.source_to_module(source)
self.assertTrue(hasattr(module, 'attr'))
self.assertEqual(module.attr, 42)
def test_source_to_code_path(self):
# Specifying a path should set it for the code object.
path = 'path/to/somewhere'
loader = self.InspectLoaderSubclass()
code = loader.source_to_code('', path)
self.assertEqual(code.co_filename, path)
def test_source_to_code_no_path(self):
# Not setting a path should still work and be set to <string> since that
# is a pre-existing practice as a default to compile().
loader = self.InspectLoaderSubclass()
code = loader.source_to_code('')
self.assertEqual(code.co_filename, '<string>')
(Frozen_ILSourceToCodeTests,
Source_ILSourceToCodeTests
) = test_util.test_both(InspectLoaderSourceToCodeTests,
InspectLoaderSubclass=SPLIT_IL)
class InspectLoaderGetCodeTests:
def test_get_code(self):
# Test success.
module = types.ModuleType('blah')
with mock.patch.object(self.InspectLoaderSubclass, 'get_source') as mocked:
mocked.return_value = 'attr = 42'
loader = self.InspectLoaderSubclass()
code = loader.get_code('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
def test_get_code_source_is_None(self):
# If get_source() is None then this should be None.
with mock.patch.object(self.InspectLoaderSubclass, 'get_source') as mocked:
mocked.return_value = None
loader = self.InspectLoaderSubclass()
code = loader.get_code('blah')
self.assertIsNone(code)
def test_get_code_source_not_found(self):
# If there is no source then there is no code object.
loader = self.InspectLoaderSubclass()
with self.assertRaises(ImportError):
loader.get_code('blah')
(Frozen_ILGetCodeTests,
Source_ILGetCodeTests
) = test_util.test_both(InspectLoaderGetCodeTests,
InspectLoaderSubclass=SPLIT_IL)
class InspectLoaderLoadModuleTests:
"""Test InspectLoader.load_module()."""
module_name = 'blah'
def setUp(self):
support.unload(self.module_name)
self.addCleanup(support.unload, self.module_name)
def load(self, loader):
spec = self.util.spec_from_loader(self.module_name, loader)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return self.init._bootstrap._load_unlocked(spec)
def mock_get_code(self):
return mock.patch.object(self.InspectLoaderSubclass, 'get_code')
def test_get_code_ImportError(self):
# If get_code() raises ImportError, it should propagate.
with self.mock_get_code() as mocked_get_code:
mocked_get_code.side_effect = ImportError
with self.assertRaises(ImportError):
loader = self.InspectLoaderSubclass()
self.load(loader)
def test_get_code_None(self):
# If get_code() returns None, raise ImportError.
with self.mock_get_code() as mocked_get_code:
mocked_get_code.return_value = None
with self.assertRaises(ImportError):
loader = self.InspectLoaderSubclass()
self.load(loader)
def test_module_returned(self):
# The loaded module should be returned.
code = compile('attr = 42', '<string>', 'exec')
with self.mock_get_code() as mocked_get_code:
mocked_get_code.return_value = code
loader = self.InspectLoaderSubclass()
module = self.load(loader)
self.assertEqual(module, sys.modules[self.module_name])
(Frozen_ILLoadModuleTests,
Source_ILLoadModuleTests
) = test_util.test_both(InspectLoaderLoadModuleTests,
InspectLoaderSubclass=SPLIT_IL,
init=init,
util=util)
##### ExecutionLoader concrete methods #########################################
class ExecutionLoaderGetCodeTests:
def mock_methods(self, *, get_source=False, get_filename=False):
source_mock_context, filename_mock_context = None, None
if get_source:
source_mock_context = mock.patch.object(self.ExecutionLoaderSubclass,
'get_source')
if get_filename:
filename_mock_context = mock.patch.object(self.ExecutionLoaderSubclass,
'get_filename')
return source_mock_context, filename_mock_context
def test_get_code(self):
path = 'blah.py'
source_mock_context, filename_mock_context = self.mock_methods(
get_source=True, get_filename=True)
with source_mock_context as source_mock, filename_mock_context as name_mock:
source_mock.return_value = 'attr = 42'
name_mock.return_value = path
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertEqual(code.co_filename, path)
module = types.ModuleType('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
def test_get_code_source_is_None(self):
# If get_source() is None then this should be None.
source_mock_context, _ = self.mock_methods(get_source=True)
with source_mock_context as mocked:
mocked.return_value = None
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertIsNone(code)
def test_get_code_source_not_found(self):
# If there is no source then there is no code object.
loader = self.ExecutionLoaderSubclass()
with self.assertRaises(ImportError):
loader.get_code('blah')
def test_get_code_no_path(self):
# If get_filename() raises ImportError then simply skip setting the path
# on the code object.
source_mock_context, filename_mock_context = self.mock_methods(
get_source=True, get_filename=True)
with source_mock_context as source_mock, filename_mock_context as name_mock:
source_mock.return_value = 'attr = 42'
name_mock.side_effect = ImportError
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertEqual(code.co_filename, '<string>')
module = types.ModuleType('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
(Frozen_ELGetCodeTests,
Source_ELGetCodeTests
) = test_util.test_both(ExecutionLoaderGetCodeTests,
ExecutionLoaderSubclass=SPLIT_EL)
##### SourceLoader concrete methods ############################################
class SourceOnlyLoader:
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __cached__, __package__, "
b"repr(__loader__)])")
def __init__(self, path):
self.path = path
def get_data(self, path):
if path != self.path:
raise IOError
return self.source
def get_filename(self, fullname):
return self.path
def module_repr(self, module):
return '<module>'
SPLIT_SOL = make_abc_subclasses(SourceOnlyLoader, 'SourceLoader')
class SourceLoader(SourceOnlyLoader):
source_mtime = 1
def __init__(self, path, magic=None):
super().__init__(path)
self.bytecode_path = self.util.cache_from_source(self.path)
self.source_size = len(self.source)
if magic is None:
magic = self.util.MAGIC_NUMBER
data = bytearray(magic)
data.extend(self.init._pack_uint32(0))
data.extend(self.init._pack_uint32(self.source_mtime))
data.extend(self.init._pack_uint32(self.source_size))
code_object = compile(self.source, self.path, 'exec',
dont_inherit=True)
data.extend(marshal.dumps(code_object))
self.bytecode = bytes(data)
self.written = {}
def get_data(self, path):
if path == self.path:
return super().get_data(path)
elif path == self.bytecode_path:
return self.bytecode
else:
raise OSError
def path_stats(self, path):
if path != self.path:
raise IOError
return {'mtime': self.source_mtime, 'size': self.source_size}
def set_data(self, path, data):
self.written[path] = bytes(data)
return path == self.bytecode_path
SPLIT_SL = make_abc_subclasses(SourceLoader, util=util, init=init)
class SourceLoaderTestHarness:
def setUp(self, *, is_package=True, **kwargs):
self.package = 'pkg'
if is_package:
self.path = os.path.join(self.package, '__init__.py')
self.name = self.package
else:
module_name = 'mod'
self.path = os.path.join(self.package, '.'.join(['mod', 'py']))
self.name = '.'.join([self.package, module_name])
self.cached = self.util.cache_from_source(self.path)
self.loader = self.loader_mock(self.path, **kwargs)
def verify_module(self, module):
self.assertEqual(module.__name__, self.name)
self.assertEqual(module.__file__, self.path)
self.assertEqual(module.__cached__, self.cached)
self.assertEqual(module.__package__, self.package)
self.assertEqual(module.__loader__, self.loader)
values = module._.split('::')
self.assertEqual(values[0], self.name)
self.assertEqual(values[1], self.path)
self.assertEqual(values[2], self.cached)
self.assertEqual(values[3], self.package)
self.assertEqual(values[4], repr(self.loader))
def verify_code(self, code_object):
module = types.ModuleType(self.name)
module.__file__ = self.path
module.__cached__ = self.cached
module.__package__ = self.package
module.__loader__ = self.loader
module.__path__ = []
exec(code_object, module.__dict__)
self.verify_module(module)
class SourceOnlyLoaderTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader for source-only loading.
Reload testing is subsumed by the tests for
importlib.util.module_for_loader.
"""
@unittest.skip("TODO: RUSTPYTHON, AttributeError: module 'tokenize' has no attribute 'detect_encoding'")
def test_get_source(self):
# Verify the source code is returned as a string.
# If an OSError is raised by get_data then raise ImportError.
expected_source = self.loader.source.decode('utf-8')
self.assertEqual(self.loader.get_source(self.name), expected_source)
def raise_OSError(path):
raise OSError
self.loader.get_data = raise_OSError
with self.assertRaises(ImportError) as cm:
self.loader.get_source(self.name)
self.assertEqual(cm.exception.name, self.name)
def test_is_package(self):
# Properly detect when loading a package.
self.setUp(is_package=False)
self.assertFalse(self.loader.is_package(self.name))
self.setUp(is_package=True)
self.assertTrue(self.loader.is_package(self.name))
self.assertFalse(self.loader.is_package(self.name + '.__init__'))
def test_get_code(self):
# Verify the code object is created.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_source_to_code(self):
# Verify the compiled code object.
code = self.loader.source_to_code(self.loader.source, self.path)
self.verify_code(code)
def test_load_module(self):
# Loading a module should set __name__, __loader__, __package__,
# __path__ (for packages), __file__, and __cached__.
# The module should also be put into sys.modules.
with test_util.uncache(self.name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertEqual(module.__path__, [os.path.dirname(self.path)])
self.assertIn(self.name, sys.modules)
def test_package_settings(self):
# __package__ needs to be set, while __path__ is set on if the module
# is a package.
# Testing the values for a package are covered by test_load_module.
self.setUp(is_package=False)
with test_util.uncache(self.name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertFalse(hasattr(module, '__path__'))
@unittest.skip("TODO: RUSTPYTHON, AttributeError: module 'tokenize' has no attribute 'detect_encoding'")
def test_get_source_encoding(self):
# Source is considered encoded in UTF-8 by default unless otherwise
# specified by an encoding line.
source = "_ = 'ü'"
self.loader.source = source.encode('utf-8')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
source = "# coding: latin-1\n_ = ü"
self.loader.source = source.encode('latin-1')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
(Frozen_SourceOnlyLoaderTests,
Source_SourceOnlyLoaderTests
) = test_util.test_both(SourceOnlyLoaderTests, util=util,
loader_mock=SPLIT_SOL)
@unittest.skipIf(sys.dont_write_bytecode, "sys.dont_write_bytecode is true")
class SourceLoaderBytecodeTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader's use of bytecode.
Source-only testing handled by SourceOnlyLoaderTests.
"""
def verify_code(self, code_object, *, bytecode_written=False):
super().verify_code(code_object)
if bytecode_written:
self.assertIn(self.cached, self.loader.written)
data = bytearray(self.util.MAGIC_NUMBER)
data.extend(self.init._pack_uint32(0))
data.extend(self.init._pack_uint32(self.loader.source_mtime))
data.extend(self.init._pack_uint32(self.loader.source_size))
data.extend(marshal.dumps(code_object))
self.assertEqual(self.loader.written[self.cached], bytes(data))
def test_code_with_everything(self):
# When everything should work.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_no_bytecode(self):
# If no bytecode exists then move on to the source.
self.loader.bytecode_path = "<does not exist>"
# Sanity check
with self.assertRaises(OSError):
bytecode_path = self.util.cache_from_source(self.path)
self.loader.get_data(bytecode_path)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_code_bad_timestamp(self):
# Bytecode is only used when the timestamp matches the source EXACTLY.
for source_mtime in (0, 2):
assert source_mtime != self.loader.source_mtime
original = self.loader.source_mtime
self.loader.source_mtime = source_mtime
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
self.loader.source_mtime = original
def test_code_bad_magic(self):
# Skip over bytecode with a bad magic number.
self.setUp(magic=b'0000')
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_dont_write_bytecode(self):
# Bytecode is not written if sys.dont_write_bytecode is true.
# Can assume it is false already thanks to the skipIf class decorator.
try:
sys.dont_write_bytecode = True
self.loader.bytecode_path = "<does not exist>"
code_object = self.loader.get_code(self.name)
self.assertNotIn(self.cached, self.loader.written)
finally:
sys.dont_write_bytecode = False
def test_no_set_data(self):
# If set_data is not defined, one can still read bytecode.
self.setUp(magic=b'0000')
original_set_data = self.loader.__class__.mro()[1].set_data
try:
del self.loader.__class__.mro()[1].set_data
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
finally:
self.loader.__class__.mro()[1].set_data = original_set_data
def test_set_data_raises_exceptions(self):
# Raising NotImplementedError or OSError is okay for set_data.
def raise_exception(exc):
def closure(*args, **kwargs):
raise exc
return closure
self.setUp(magic=b'0000')
self.loader.set_data = raise_exception(NotImplementedError)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
(Frozen_SLBytecodeTests,
SourceSLBytecodeTests
) = test_util.test_both(SourceLoaderBytecodeTests, init=init, util=util,
loader_mock=SPLIT_SL)
class SourceLoaderGetSourceTests:
"""Tests for importlib.abc.SourceLoader.get_source()."""
@unittest.skip("TODO: RUSTPYTHON, AttributeError: module 'tokenize' has no attribute 'detect_encoding'")
def test_default_encoding(self):
# Should have no problems with UTF-8 text.
name = 'mod'
mock = self.SourceOnlyLoaderMock('mod.file')
source = 'x = "ü"'
mock.source = source.encode('utf-8')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
@unittest.skip("TODO: RUSTPYTHON, AttributeError: module 'tokenize' has no attribute 'detect_encoding'")
def test_decoded_source(self):
# Decoding should work.
name = 'mod'
mock = self.SourceOnlyLoaderMock("mod.file")
source = "# coding: Latin-1\nx='ü'"
assert source.encode('latin-1') != source.encode('utf-8')
mock.source = source.encode('latin-1')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
@unittest.skip("TODO: RUSTPYTHON, AttributeError: module 'io' has no attribute 'IncrementalNewlineDecoder'")
def test_universal_newlines(self):
# PEP 302 says universal newlines should be used.
name = 'mod'
mock = self.SourceOnlyLoaderMock('mod.file')
source = "x = 42\r\ny = -13\r\n"
mock.source = source.encode('utf-8')
expect = io.IncrementalNewlineDecoder(None, True).decode(source)
self.assertEqual(mock.get_source(name), expect)
(Frozen_SourceOnlyLoaderGetSourceTests,
Source_SourceOnlyLoaderGetSourceTests
) = test_util.test_both(SourceLoaderGetSourceTests,
SourceOnlyLoaderMock=SPLIT_SOL)
if __name__ == '__main__':
unittest.main()
| 34.164846 | 112 | 0.660795 |
bf9d77104da80500e820fa91ab7050d725159775 | 5,006 | py | Python | homeassistant/components/sensehat/sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 4 | 2021-07-11T09:11:00.000Z | 2022-02-27T14:43:50.000Z | homeassistant/components/sensehat/sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 277 | 2021-10-04T06:39:33.000Z | 2021-12-28T22:04:17.000Z | homeassistant/components/sensehat/sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 3 | 2021-11-14T13:29:33.000Z | 2021-12-27T17:05:22.000Z | """Support for Sense HAT sensors."""
from __future__ import annotations
from datetime import timedelta
import logging
from pathlib import Path
from sense_hat import SenseHat
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import (
CONF_DISPLAY_OPTIONS,
CONF_NAME,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "sensehat"
CONF_IS_HAT_ATTACHED = "is_hat_attached"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="temperature",
name="temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="humidity",
name="humidity",
native_unit_of_measurement=PERCENTAGE,
),
SensorEntityDescription(
key="pressure",
name="pressure",
native_unit_of_measurement="mb",
),
)
SENSOR_KEYS: list[str] = [desc.key for desc in SENSOR_TYPES]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_DISPLAY_OPTIONS, default=SENSOR_KEYS): [vol.In(SENSOR_KEYS)],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_IS_HAT_ATTACHED, default=True): cv.boolean,
}
)
def get_cpu_temp():
"""Get CPU temperature."""
t_cpu = (
Path("/sys/class/thermal/thermal_zone0/temp")
.read_text(encoding="utf-8")
.strip()
)
return float(t_cpu) * 0.001
def get_average(temp_base):
"""Use moving average to get better readings."""
if not hasattr(get_average, "temp"):
get_average.temp = [temp_base, temp_base, temp_base]
get_average.temp[2] = get_average.temp[1]
get_average.temp[1] = get_average.temp[0]
get_average.temp[0] = temp_base
temp_avg = (get_average.temp[0] + get_average.temp[1] + get_average.temp[2]) / 3
return temp_avg
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Sense HAT sensor platform."""
_LOGGER.warning(
"The Sense HAT integration is deprecated and will be removed "
"in Home Assistant Core 2022.4; this integration is removed under "
"Architectural Decision Record 0019, more information can be found here: "
"https://github.com/home-assistant/architecture/blob/master/adr/0019-GPIO.md"
)
data = SenseHatData(config.get(CONF_IS_HAT_ATTACHED))
display_options = config[CONF_DISPLAY_OPTIONS]
entities = [
SenseHatSensor(data, description)
for description in SENSOR_TYPES
if description.key in display_options
]
add_entities(entities, True)
class SenseHatSensor(SensorEntity):
"""Representation of a Sense HAT sensor."""
def __init__(self, data, description: SensorEntityDescription):
"""Initialize the sensor."""
self.entity_description = description
self.data = data
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
if not self.data.humidity:
_LOGGER.error("Don't receive data")
return
sensor_type = self.entity_description.key
if sensor_type == "temperature":
self._attr_native_value = self.data.temperature
elif sensor_type == "humidity":
self._attr_native_value = self.data.humidity
elif sensor_type == "pressure":
self._attr_native_value = self.data.pressure
class SenseHatData:
"""Get the latest data and update."""
def __init__(self, is_hat_attached):
"""Initialize the data object."""
self.temperature = None
self.humidity = None
self.pressure = None
self.is_hat_attached = is_hat_attached
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Sense HAT."""
sense = SenseHat()
temp_from_h = sense.get_temperature_from_humidity()
temp_from_p = sense.get_temperature_from_pressure()
t_total = (temp_from_h + temp_from_p) / 2
if self.is_hat_attached:
t_cpu = get_cpu_temp()
t_correct = t_total - ((t_cpu - t_total) / 1.5)
t_correct = get_average(t_correct)
else:
t_correct = get_average(t_total)
self.temperature = t_correct
self.humidity = sense.get_humidity()
self.pressure = sense.get_pressure()
| 30.52439 | 87 | 0.68298 |
4231c167995cee3f162e0218cfc7fbb9099ee53c | 445 | py | Python | lesson3/task1.py | kati-Ist/python_geekbrains | fdafd6134ab4287d80fbf5b6c1750142d2013238 | [
"MIT"
] | null | null | null | lesson3/task1.py | kati-Ist/python_geekbrains | fdafd6134ab4287d80fbf5b6c1750142d2013238 | [
"MIT"
] | null | null | null | lesson3/task1.py | kati-Ist/python_geekbrains | fdafd6134ab4287d80fbf5b6c1750142d2013238 | [
"MIT"
] | null | null | null | # 1. Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление.
# Числа запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль.
def my_f(k_1, k_2):
try:
number = k_1 / k_2
return round(number, 2)
except ZeroDivisionError:
print("Второе число не должно быть нулем!")
print(my_f((float(input("Введите делимое: "))), (float(input("Введите делитель: ")))))
| 31.785714 | 97 | 0.698876 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.