content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hackcrisis.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| manage.py | 630 | Django's command-line utility for administrative tasks.
!/usr/bin/env python | 77 | en | 0.656913 |
# This script fetches Atelier 801 translation file and adds the required IDs into our own translation files
import sys
from urllib.request import urlopen
import zlib
from string import Template
import json
if len(sys.argv) < 2:
print("Please pass in lang code for first arguement")
exit()
lang = sys.argv[1]
url = 'https://www.transformice.com/langues/tfm-'+lang+'.gz'
# Fetch file
response = urlopen(url)
filedata = response.read()
filedata = zlib.decompress(filedata)
filedata = bytes.decode(filedata)
# Parse file
filedata = filedata.split("\n-\n")
i18n = {}
for data in filedata:
if(not data): continue
key,val = data.split("=", 1)
i18n[key] = val
# Use data to do the actual thing this tool is for
def desc(key, arg1=None):
if(arg1 != None):
return i18n[key].replace("%1", arg1)
return i18n[key]
transKeys = [
"C_GuideSprirituel",
"C_MaitresseDuVent",
"C_Mecanicienne",
"C_Sauvageonne",
"C_Physicienne",
"C_14", "C_14_T",
"C_11", "C_11_T",
"C_12", "C_12_T",
"C_13", "C_13_T",
"C_8", "C_8_T",
"C_9", "C_9_T",
"C_10", "C_10_T",
"C_5", "C_5_T",
"C_6", "C_6_T",
"C_7", "C_7_T",
"C_2", "C_2_T",
"C_3", "C_3_T",
"C_4", "C_4_T",
"C_0", "C_0_T",
"C_1", "C_1_T",
"C_34", "C_34_T",
"C_31", "C_31_T",
"C_32", "C_32_T",
"C_33", "C_33_T",
"C_28", "C_28_T",
"C_29", "C_29_T",
"C_30", "C_30_T",
"C_25", "C_25_T",
"C_26", "C_26_T",
"C_27", "C_27_T",
"C_22", "C_22_T",
"C_23", "C_23_T",
"C_24", "C_24_T",
"C_20", "C_20_T",
"C_21", "C_21_T",
"C_54", "C_54_T",
"C_51", "C_51_T",
"C_52", "C_52_T",
"C_53", "C_53_T",
"C_48", "C_48_T",
"C_49", "C_49_T",
"C_50", "C_50_T",
"C_45", "C_45_T",
"C_46", "C_46_T",
"C_47", "C_47_T",
"C_42", "C_42_T",
"C_43", "C_43_T",
"C_44", "C_44_T",
"C_40", "C_40_T",
"C_41", "C_41_T",
"C_94", "C_94_T",
"C_80", "C_80_T",
"C_93", "C_93_T",
"C_70", "C_70_T",
"C_72", "C_72_T",
"C_81", "C_81_T",
"C_92", "C_92_T",
"C_66", "C_66_T",
"C_71", "C_71_T",
"C_73", "C_73_T",
"C_68", "C_68_T",
"C_88", "C_88_T",
"C_84", "C_84_T",
"C_86", "C_86_T",
"C_89", "C_89_T",
"C_91", "C_91_T",
"C_83", "C_83_T",
"C_85", "C_85_T",
"C_90", "C_90_T",
"C_63", "C_63_T",
"C_74", "C_74_T",
"C_87", "C_87_T",
"C_82", "C_82_T",
"C_60", "C_60_T",
"C_64", "C_64_T",
"C_65", "C_65_T",
"C_69", "C_69_T",
"C_67", "C_67_T",
"C_61", "C_61_T",
"C_62", "C_62_T",
]
i18nToWrite = {}
for key in transKeys:
i18nToWrite[key] = i18n[key]
with open(lang+'.json', 'w') as outfile:
# outfile.write(i18nToWrite)
json.dump(i18nToWrite, outfile, indent=4) | i18n/_tfm_trans_to_skilldata.py | 2,522 | This script fetches Atelier 801 translation file and adds the required IDs into our own translation files Fetch file Parse file Use data to do the actual thing this tool is for outfile.write(i18nToWrite) | 203 | en | 0.747974 |
#!/usr/bin/env python
__author__ = ('Duy Tin Truong (duytin.truong@unitn.it), '
'Aitor Blanco Miguez (aitor.blancomiguez@unitn.it)')
__version__ = '3.0'
__date__ = '21 Feb 2020'
import argparse as ap
import dendropy
from io import StringIO
import re
from collections import defaultdict
import matplotlib.colors as colors
import subprocess
def read_params():
p = ap.ArgumentParser()
p.add_argument('-t', '--ifn_tree',
required=True,
default=None,
type=str,
help='The input tree in newick format.')
p.add_argument('-m', '--colorized_metadata',
required=False,
default='unset',
type=str,
help='The metadata field to colorize. Default "unset".')
p.add_argument('--fig_size',
required=False,
default=8,
type=float,
help='The figure size. Default "8".')
p.add_argument('--legend_marker_size',
required=False,
default=20,
type=int,
help='The legend marker size. Default "20".'
)
p.add_argument('--legend_font_size',
required=False,
default=10,
type=int,
help='The legend font size. Default "10".'
)
p.add_argument('--legend_marker_edge_width',
required=False,
default=0.2,
type=float,
help='The legend marker edge width. Default "0.2".'
)
p.add_argument('--leaf_marker_size',
required=False,
default=20,
type=int,
help='The legend marker size. Default "20".'
)
p.add_argument('--leaf_marker_edge_width',
required=False,
default=0.2,
type=float,
help='The legend marker edge width. Default "0.2".'
)
p.add_argument('--dpi',
required=False,
default=300,
type=int,
help='The figure dpi.')
p.add_argument('--figure_extension',
required=False,
default='.png',
type=str,
help='The figure extension. Default ".png".')
p.add_argument('--ofn_prefix',
required=False,
default=None,
type=str,
help='The prefix of output files.')
return p.parse_args()
def run(cmd):
print (cmd)
subprocess.call(cmd.split())
def main():
args = read_params()
tree = dendropy.Tree.get_from_path(args.ifn_tree, schema='newick',
preserve_underscores=True)
tree.reroot_at_midpoint()
count = 0
metadatas = set([])
node2metadata = {}
for node in tree.preorder_node_iter():
nodestr = node.__getattribute__("taxon").__str__().strip("'")
if node.is_leaf():
if '.' in nodestr:
nodestr = nodestr.replace('.',',')
node.taxon = dendropy.Taxon(label=nodestr)
substrs = re.findall(
'%s-[a-zA-Z0-9.]*'%args.colorized_metadata,
nodestr)
if substrs:
md = substrs[0].replace(args.colorized_metadata + '-', '')
metadatas.add(md)
node2metadata[nodestr] = md
else:
count += 1
node.taxon = dendropy.Taxon(label='node_%d'%count)
metadatas = sorted(list(metadatas))
color_names = list(colors.cnames.keys())
metadata2color = {}
for i, md in enumerate(metadatas):
metadata2color[md] = color_names[i % len(color_names)]
if not args.ofn_prefix:
args.ofn_prefix = args.ifn_tree
ofn_tree = args.ofn_prefix + '.graphlantree'
tree.write_to_path(ofn_tree, 'newick')
ofn_annot = args.ofn_prefix + '.annot'
with open(ofn_annot, 'w') as ofile:
#ofile.write('clade_separation\t0\n')
ofile.write('branch_bracket_width\t0\n')
#ofile.write('clade_separation\t0.15\n')
ofile.write('branch_bracket_depth\t0\n')
#ofile.write('branch_thickness\t1.25\n')
ofile.write('annotation_background_width\t0\n')
# legend
ofile.write('#legends\n')
ofile.write('class_legend_font_size\t%d\n'%args.legend_font_size)
for md in metadata2color:
ofile.write('%s\tclade_marker_size\t%d\n'%(md, args.legend_marker_size))
ofile.write('%s\tclade_marker_color\t%s\n'%(md, metadata2color[md]))
ofile.write('%s\tclade_marker_edge_width\t%f\n'%(md, args.legend_marker_edge_width))
# remove intermedate nodes
for node in tree.preorder_node_iter():
if not node.is_leaf():
nodestr = node.__getattribute__("taxon").__str__().strip("'")
ofile.write('%s\tclade_marker_size\t0\n'%(nodestr))
# colorize leaf nodes
for node in tree.seed_node.leaf_nodes():
nodestr = node.__getattribute__("taxon").__str__().strip("'")
if nodestr in node2metadata:
leaf_color = metadata2color[node2metadata[nodestr]]
ofile.write('%s\tclade_marker_size\t%d\n'%(nodestr, args.leaf_marker_size))
ofile.write('%s\tclade_marker_color\t%s\n'%(nodestr, leaf_color))
ofile.write('%s\tclade_marker_edge_width\t%f\n'%(nodestr, args.leaf_marker_edge_width))
ofn_xml = args.ofn_prefix + '.xml'
cmd = 'graphlan_annotate.py --annot %s %s %s'%(ofn_annot, ofn_tree, ofn_xml)
run(cmd)
ofn_fig = args.ofn_prefix + args.figure_extension
cmd = 'graphlan.py %s %s --dpi %d --size %f'%(ofn_xml, ofn_fig, args.dpi, args.fig_size)
run(cmd)
print ('Output file: %s'%ofn_fig)
if __name__ == '__main__':
main()
| metaphlan/utils/plot_tree_graphlan.py | 6,117 | !/usr/bin/env pythonofile.write('clade_separation\t0\n')ofile.write('clade_separation\t0.15\n')ofile.write('branch_thickness\t1.25\n') legend remove intermedate nodes colorize leaf nodes | 186 | en | 0.220224 |
from django.db import models
from django.urls import reverse
import uuid # Required for unique book instances
class Genre(models.Model):
"""
Model representing a book genre (e.g. Science Fiction, Non Fiction).
"""
name = models.CharField(max_length=200,
help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Language(models.Model):
"""
Model representing a Language (e.g. Russian, English etc.)
"""
name = models.CharField(max_length=200,
help_text="Enter a book language (e.g. Russian, English etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Book(models.Model):
"""
Model representing a book (but not a specific copy of a book).
"""
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file.
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the book")
isbn = models.CharField('ISBN',max_length=13,
help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text="Select a genre for this book")
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True)
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
def get_absolute_url(self):
"""
Returns the url to access a particular book instance.
"""
return reverse('book-detail', args=[str(self.id)])
class BookInstance(models.Model):
"""
Model representing a specific copy of a book (i.e. that can be borrowed from the library).
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text="Unique ID for this particular book across whole library")
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(max_length=1, choices=LOAN_STATUS, blank=True, default='m', help_text='Book availability')
class Meta:
ordering = ["due_back"]
def __str__(self):
"""
String for representing the Model object
"""
return '{0} ({1})'.format(self.id, self.book.title)
class Author(models.Model):
"""
Model representing an author.
"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('Died', null=True, blank=True)
def get_absolute_url(self):
"""
Returns the url to access a particular author instance.
"""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return '{0} ({1})'.format(self.last_name, self.first_name)
| src/locallibrary/catalog/models.py | 3,786 | Model representing an author.
Model representing a book (but not a specific copy of a book).
Model representing a specific copy of a book (i.e. that can be borrowed from the library).
Model representing a book genre (e.g. Science Fiction, Non Fiction).
Model representing a Language (e.g. Russian, English etc.)
String for representing the Model object (in Admin site etc.)
String for representing the Model object (in Admin site etc.)
String for representing the Model object.
String for representing the Model object
String for representing the Model object.
Returns the url to access a particular book instance.
Returns the url to access a particular author instance.
Required for unique book instances Foreign Key used because book can only have one author, but authors can have multiple books Author as a string rather than object because it hasn't been declared yet in the file. ManyToManyField used because genre can contain many books. Books can cover many genres. Genre class has already been defined so we can specify the object above. | 1,047 | en | 0.896467 |
# --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
df = pd.read_csv(path)
df.head()
X = df[['ages','num_reviews','piece_count','play_star_rating','review_difficulty','star_rating','theme_name','val_star_rating','country']]
y = df['list_price']
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state = 6, test_size = 0.3)
# code ends here
# --------------
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
# code starts here
cols = X_train.columns
#cols= list(X_train.columns.values)
sns.pairplot(df)
# code ends here
# --------------
# Code starts here
corr = X_train.corr()
print(corr)
X_train.drop(['play_star_rating', 'val_star_rating'], axis = 1,inplace = True)
X_test.drop(['play_star_rating', 'val_star_rating'], axis = 1,inplace = True)
# Code ends here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
import math
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train,y_train)
y_pred = regressor.predict(X_test)
def metrics(actual,pred):
print('Mean Squared Error', mean_squared_error(actual,pred))
print('R-Squared', r2_score(actual,pred))
metrics(y_test,y_pred)
mse = 2106.7634311857673
r2 = 0.7747160273433752
# Code ends here
# --------------
# Code starts here
residual = y_test - y_pred
plt.hist(residual)
# Code ends here
| Making-first-prediction-using-linear-regression/code.py | 1,541 | -------------- code starts here code ends here -------------- code starts here cols= list(X_train.columns.values) code ends here -------------- Code starts here Code ends here -------------- Code starts here Code ends here -------------- Code starts here Code ends here | 270 | en | 0.478017 |
from enum import IntEnum
from typing import Dict, Union, Callable, Any
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events = []
self.static_events = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self):
return self.events
def __len__(self):
return len(self.events)
def add(self, event_name, static=False):
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self):
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type):
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types, callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
alert_priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration_sound: float,
duration_hud_alert: float,
duration_text: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.alert_priority = alert_priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration_sound = duration_sound
self.duration_hud_alert = duration_hud_alert
self.duration_text = duration_text
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.start_time = 0.
self.alert_type = ""
self.event_type = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.alert_priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.alert_priority > alert2.alert_priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2, audible_alert=AudibleAlert.chimeError,
visual_alert=VisualAlert.none, duration_hud_alert=2.):
super().__init__("์คํํ์ผ๋ฟ ์ฌ์ฉ๋ถ๊ฐ", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
audible_alert, .4, duration_hud_alert, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("ํธ๋ค์ ์ฆ์ ์ก์์ฃผ์ธ์", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.chimeError, .1, 2., 2.),
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2, alert_text_1="ํธ๋ค์ ์ฆ์ ์ก์์ฃผ์ธ์"):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.chimeWarningRepeat, 2.2, 3., 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert=True):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2, 0., 0.),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str, duration_text: float = 0.2):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., duration_text),
# ********** alert callback functions **********
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(round(CP.minSteerSpeed * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = "km/h" if metric else "mph"
return Alert(
"ํธ๋ค์ ์ก์์ฃผ์ธ์",
"%d %s ์ด์์ ์๋์์ ์๋์กฐํฅ๋ฉ๋๋ค" % (speed, unit),
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.none, 0., 0.4, .3)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(MIN_SPEED_FILTER * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH))
unit = "km/h" if metric else "mph"
return Alert(
"์บ๋ฆฌ๋ธ๋ ์ด์
์งํ์ค์
๋๋ค : %d%%" % sm['liveCalibration'].calPerc,
"์๋๋ฅผ %d %s ์ด์์ผ๋ก ์ฃผํํด์ฃผ์ธ์" % (speed, unit),
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
gps_integrated = sm['pandaState'].pandaType in [log.PandaState.PandaType.uno, log.PandaState.PandaType.dos]
return Alert(
"GPS ์์ ๋ถ๋",
"GPS ์ฐ๊ฒฐ์ํ ๋ฐ ์ํ
๋๋ฅผ ์ ๊ฒํ์ธ์" if gps_integrated else "GPS ์ํ
๋๋ฅผ ์ ๊ฒํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
text = "ํฌ๋ฃจ์ฆ ๋นํ์ฑ์ํ"
if CP.carName == "honda":
text = "๋ฉ์ธ ์ค์์น OFF"
return NoEntryAlert(text, duration_hud_alert=0.)
def startup_fuzzy_fingerprint_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
return Alert(
"WARNING: No Exact Match on Car Model",
f"Closest Match: {CP.carFingerprint.title()[:40]}",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.)
def auto_lane_change_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
alc_timer = sm['lateralPlan'].autoLaneChangeTimer
return Alert(
"์๋์ฐจ์ ๋ณ๊ฒฝ์ด %d์ด ๋ค์ ์์๋ฉ๋๋ค" % alc_timer,
"์ฐจ์ ์ ์ฐจ๋์ ํ์ธํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.steerRequired, AudibleAlert.none, 0., .1, .1, alert_rate=0.75)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
return Alert(
"Joystick Mode",
f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .1)
EVENTS: Dict[int, Dict[str, Union[Alert, Callable[[Any, messaging.SubMaster, bool], Alert]]]] = {
# ********** events with no alerts **********
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: Alert(
"์กฐ์ด์คํฑ ๋ชจ๋",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 0.1),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("Controls Initializing"),
},
EventName.startup: {
ET.PERMANENT: Alert(
"์คํํ์ผ๋ฟ ์ฌ์ฉ์ค๋น ์๋ฃ",
"ํญ์ ํธ๋ค์ ์ก๊ณ ๋๋ก๋ฅผ ์ฃผ์ํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 3.),
},
EventName.startupMaster: {
ET.PERMANENT: Alert(
"์คํํ์ผ๋ฟ ์ฌ์ฉ์ค๋น ์๋ฃ",
"ํญ์ ํธ๋ค์ ์ก๊ณ ๋๋ก๋ฅผ ์ฃผ์ํ์ธ์",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 3.),
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: Alert(
"๋์์บ ๋ชจ๋",
"ํญ์ ํธ๋ค์ ์ก๊ณ ๋๋ก๋ฅผ ์ฃผ์ํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: Alert(
"๋์์บ ๋ชจ๋ : ํธํ๋์ง์๋ ์ฐจ๋",
"ํญ์ ํธ๋ค์ ์ก๊ณ ๋๋ก๋ฅผ ์ฃผ์ํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
# openpilot uses the version strings from various ECUs to detect the correct car model.
# Usually all ECUs are recognized and an exact match to a car model can be made. Sometimes
# one or two ECUs have unrecognized versions, but the others are present in the database.
# If openpilot is confident about the match to a car model, it fingerprints anyway.
# In this case an alert is thrown since there is a small chance the wrong car was detected
# and the user should pay extra attention.
# This alert can be prevented by adding all ECU firmware version to openpilot:
# https://github.com/commaai/openpilot/wiki/Fingerprinting
EventName.startupFuzzyFingerprint: {
ET.PERMANENT: startup_fuzzy_fingerprint_alert,
},
EventName.startupNoFw: {
ET.PERMANENT: Alert(
"์ฐจ๋ ์ธ์ ๋ถ๊ฐ",
"๋ชจ๋ ์ฐ๊ฒฐ์ ํ์ธํด ๋ณด์ธ์",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.dashcamMode: {
ET.PERMANENT: Alert(
"๋์์บ ๋ชจ๋",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: Alert(
"์ฐจ๋ LKAS ๋ฒํผ ์ํํ์ธ",
"์ฐจ๋ LKAS ๋ฒํผ OFFํ ํ์ฑํ๋ฉ๋๋ค",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
# Some features or cars are marked as community features. If openpilot
# detects the use of a community feature it switches to dashcam mode
# until these features are allowed using a toggle in settings.
EventName.communityFeatureDisallowed: {
# LOW priority to overcome Cruise Error
ET.PERMANENT: Alert(
"์ปค๋ฎค๋ํฐ ๊ธฐ๋ฅ ๊ฐ์ง๋จ",
"๊ฐ๋ฐ์์ค์ ์์ ์ปค๋ฎค๋ํฐ ๊ธฐ๋ฅ์ ํ์ฑํํด์ฃผ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: Alert(
"๋์์บ ๋ชจ๋",
"์ฐจ๋์ธ์ ๋ถ๊ฐ - ํ๊ฑฐํ๋ฆฐํธ๋ฅผ ํ์ธํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"๋ธ๋ ์ดํฌ!",
"์ถ๋ ์ํ",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.stockFcw: {
ET.PERMANENT: Alert(
"๋ธ๋ ์ดํฌ!",
"์ถ๋ ์ํ",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
ET.NO_ENTRY: NoEntryAlert("Stock FCW: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"๋ธ๋ ์ดํฌ!",
"์ถ๋ ์ํ",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.chimeWarningRepeat, 1., 2., 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"ํธ๋ค์ ์ก์์ฃผ์ธ์",
"์ฐจ์ ์ดํ ๊ฐ์ง๋จ",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.ldw, AudibleAlert.chimePrompt, 1., 2., 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"๊ฐ์ํจ๋ฌ๊ฐ์ง์ ์คํํ์ผ๋ฟ์ ๋ธ๋ ์ดํฌ๋ฅผ ์ฌ์ฉํ์ง์์ต๋๋ค",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .0, .0, .1, creation_delay=1.),
},
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: SoftDisableAlert("Vehicle Parameter Identification Failed"),
ET.WARNING: Alert(
"์ฐจ๋ ๋งค๊ฐ๋ณ์ ์๋ณ ์ค๋ฅ",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.steerRequired, AudibleAlert.none, .0, .0, .1),
},
EventName.steerTempUnavailableUserOverride: {
ET.WARNING: Alert(
"ํธ๋ค์ ์ก์์ฃผ์ธ์",
"์กฐํฅ์ ์ด ์ผ์์ ์ผ๋ก ์ฌ์ฉ๋ถ๊ฐ",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, 1., 1., 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"๋๋ก๋ฅผ ์ฃผ์ํ์ธ์ : ์ด์ ์ ๋๋ก์ฃผ์ ๋ถ์",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"๋๋ก๋ฅผ ์ฃผ์ํ์ธ์",
"์ด์ ์ ๋๋ก์ฃผ์ ๋ถ์",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"์กฐํฅ์ ์ด๊ฐ ๊ฐ์ ๋ก ํด์ ๋ฉ๋๋ค",
"์ด์ ์ ๋๋ก์ฃผ์ ๋ถ์",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"ํธ๋ค์ ์ก์์ฃผ์ธ์ : ์ด์ ์ ์ธ์ ๋ถ๊ฐ",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"ํธ๋ค์ ์ก์์ฃผ์ธ์",
"์ด์ ์ ์๋ตํ์ง์์",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"์กฐํฅ์ ์ด๊ฐ ๊ฐ์ ๋ก ํด์ ๋ฉ๋๋ค",
"์ด์ ์ ์๋ตํ์ง์์",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"ํธ๋ค์ ์ก์์ฃผ์ธ์",
"์๋์ผ๋ก ์ฌํ์ฑํํ์ธ์",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"์์ฐจ๋ ๋ฉ์ถค",
"์์ฐจ๊ฐ ์ถ๋ฐํ๋ฉด ์๋ ์ฌ์ถ๋ฐ",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"์ฐจ์ ์ ๋ณ๊ฒฝํฉ๋๋ค",
"์ข์ธก์ฐจ์ ์ ์ฐจ๋์ ํ์ธํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"์ฐจ์ ์ ๋ณ๊ฒฝํฉ๋๋ค",
"์ฐ์ธก์ฐจ์ ์ ์ฐจ๋์ ํ์ธํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"ํ์ธก๋ฐฉ ์ฐจ๋๊ฐ์ง",
"์ฐจ์ ์ ์ฐจ๋์ด ๊ฐ์ง๋๋ ๋๊ธฐํ์ธ์",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, .1, .1, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"์ฐจ์ ์ ๋ณ๊ฒฝํฉ๋๋ค",
"ํ์ธก๋ฐฉ ์ฐจ๋์ ์ฃผ์ํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"ํธ๋ค์ ์ก์์ฃผ์ธ์",
"์กฐํฅ์ ์ด ์ ํ์ ์ด๊ณผํจ",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, 1., 1., 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("FAN ์ค์๋", "ํ๋์จ์ด๋ฅผ ์ ๊ฒํ์ธ์"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("", ""),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Contact Support"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Localizer unstable", "Contact Support"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("๋ธ๋ ์ดํฌ ๊ฐ์ง๋จ"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("์ฃผ์ฐจ ๋ธ๋ ์ดํฌ๋ฅผ ํด์ ํ์ธ์"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("๋ธ๋ ์ดํฌ ๊ฐ์ง๋จ",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("์ด๋ํฐ๋ธํฌ๋ฃจ์ฆ๋ฅผ ํ์ฑํํ์ธ์"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: SoftDisableAlert("์กฐํฅ์ ์ด ์ผ์์ ์ผ๋ก ์ฌ์ฉ๋ถ๊ฐ"),
ET.NO_ENTRY: NoEntryAlert("์กฐํฅ์ ์ด ์ผ์์ ์ผ๋ก ์ฌ์ฉ๋ถ๊ฐ",
duration_hud_alert=0.),
},
EventName.outOfSpace: {
ET.PERMANENT: Alert(
"์ ์ฅ๊ณต๊ฐ ๋ถ์กฑ",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("์ ์ฅ๊ณต๊ฐ ๋ถ์กฑ",
duration_hud_alert=0.),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: NoEntryAlert("์๋๋ฅผ ๋์ฌ์ฃผ์ธ์"),
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"์ฅ์น ์ผ์ ์ค๋ฅ",
"์ฅ์น ์ ๊ฒํ ์ฌ๊ฐ๋์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("์ฅ์น ์ผ์ ์ค๋ฅ"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("์คํผ์ปค๊ฐ ๊ฐ์ง๋์ง์์ต๋๋ค", "์ด์จ์ ์ฌ๋ถํ
ํด์ฃผ์ธ์"),
ET.NO_ENTRY: NoEntryAlert("์คํผ์ปค๊ฐ ๊ฐ์ง๋์ง์์ต๋๋ค"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("๋ฐฉํด ์์ค์ด ๋๋ฌด๋์"),
},
EventName.overheat: {
ET.PERMANENT: Alert(
"์ฅ์น ๊ณผ์ด๋จ",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.SOFT_DISABLE: SoftDisableAlert("์ฅ์น ๊ณผ์ด๋จ"),
ET.NO_ENTRY: NoEntryAlert("์ฅ์น ๊ณผ์ด๋จ"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: SoftDisableAlert("๊ธฐ์ด๋ฅผ [D]๋ก ๋ณ๊ฒฝํ์ธ์"),
ET.NO_ENTRY: NoEntryAlert("๊ธฐ์ด๋ฅผ [D]๋ก ๋ณ๊ฒฝํ์ธ์"),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("์บ๋ฆฌ๋ธ๋ ์ด์
์ค๋ฅ", "์ฅ์น ์์น๋ณ๊ฒฝํ ์บ๋ฆฌ๋ธ๋ ์ด์
์ ๋ค์ํ์ธ์"),
ET.SOFT_DISABLE: SoftDisableAlert("์บ๋ฆฌ๋ธ๋ ์ด์
์ค๋ฅ : ์ฅ์น ์์น๋ณ๊ฒฝํ ์บ๋ฆฌ๋ธ๋ ์ด์
์ ๋ค์ํ์ธ์"),
ET.NO_ENTRY: NoEntryAlert("์บ๋ฆฌ๋ธ๋ ์ด์
์ค๋ฅ : ์ฅ์น ์์น๋ณ๊ฒฝํ ์บ๋ฆฌ๋ธ๋ ์ด์
์ ๋ค์ํ์ธ์"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: SoftDisableAlert("์บ๋ฆฌ๋ธ๋ ์ด์
์งํ์ค์
๋๋ค"),
ET.NO_ENTRY: NoEntryAlert("์บ๋ฆฌ๋ธ๋ ์ด์
์งํ์ค์
๋๋ค"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: SoftDisableAlert("๋์ด ์ด๋ฆผ"),
ET.NO_ENTRY: NoEntryAlert("๋์ด ์ด๋ฆผ"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: SoftDisableAlert("์์ ๋ฒจํธ๋ฅผ ์ฐฉ์ฉํด์ฃผ์ธ์"),
ET.NO_ENTRY: NoEntryAlert("์์ ๋ฒจํธ๋ฅผ ์ฐฉ์ฉํด์ฃผ์ธ์"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: SoftDisableAlert("ESP ๊บผ์ง"),
ET.NO_ENTRY: NoEntryAlert("ESP ๊บผ์ง"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: SoftDisableAlert("๋ฐฐํฐ๋ฆฌ ๋ถ์กฑ"),
ET.NO_ENTRY: NoEntryAlert("๋ฐฐํฐ๋ฆฌ ๋ถ์กฑ"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: SoftDisableAlert("์ฅ์น ํ๋ก์ธ์ค ํต์ ์ค๋ฅ"),
ET.NO_ENTRY: NoEntryAlert("์ฅ์น ํ๋ก์ธ์ค ํต์ ์ค๋ฅ",
audible_alert=AudibleAlert.chimeDisengage),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("์์คํ
์ค์๋: ์ด์จ์ ์ฌ๋ถํ
ํ์ธ์",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.radarFault: {
ET.SOFT_DISABLE: SoftDisableAlert("๋ ์ด๋ ์ค๋ฅ : ์ฐจ๋์ ์ฌ๊ฐ๋ํ์ธ์"),
ET.NO_ENTRY : NoEntryAlert("๋ ์ด๋ ์ค๋ฅ : ์ฐจ๋์ ์ฌ๊ฐ๋ํ์ธ์"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: SoftDisableAlert("์ฃผํ๋ชจ๋ธ ์ง์ฐ๋จ"),
ET.NO_ENTRY : NoEntryAlert("์ฃผํ๋ชจ๋ธ ์ง์ฐ๋จ"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: SoftDisableAlert("์ฐจ์ ์ธ์์ํ๊ฐ ์ข์ง์์ผ๋ ์ฃผ์์ด์ ํ์ธ์"),
ET.NO_ENTRY: NoEntryAlert("์ฐจ์ ์ธ์์ํ๊ฐ ์ข์ง์์ผ๋ ์ฃผ์์ด์ ํ์ธ์"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: SoftDisableAlert("์ฅ์น๊ฐ ๋ง์ดํธ์์ ๋จ์ด์ง"),
ET.NO_ENTRY: NoEntryAlert("์ฅ์น๊ฐ ๋ง์ดํธ์์ ๋จ์ด์ง"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: SoftDisableAlert("๋ฉ๋ชจ๋ฆฌ ๋ถ์กฑ : ์ฅ์น๋ฅผ ์ฌ๊ฐ๋ํ์ธ์"),
ET.PERMANENT: NormalPermanentAlert("๋ฉ๋ชจ๋ฆฌ ๋ถ์กฑ", "์ฅ์น๋ฅผ ์ฌ๊ฐ๋ํ์ธ์"),
ET.NO_ENTRY : NoEntryAlert("๋ฉ๋ชจ๋ฆฌ ๋ถ์กฑ : ์ฅ์น๋ฅผ ์ฌ๊ฐ๋ํ์ธ์",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("ํฌ๋ฃจ์ฆ ์ค๋ฅ"),
ET.PERMANENT: NormalPermanentAlert("ํฌ๋ฃจ์ฆ ์ค๋ฅ", ""),
ET.NO_ENTRY: NoEntryAlert("ํฌ๋ฃจ์ฆ ์ค๋ฅ"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("์ปจํธ๋กค ๋ถ์ผ์น"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Road Camera Error", "",
duration_text=10.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Driver Camera Error", "",
duration_text=10.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Wide Road Camera Error", "",
duration_text=10.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: SoftDisableAlert("USB ์๋ฌ: ์ด์จ์ ์ฌ๋ถํ
ํ์ธ์"),
ET.PERMANENT: NormalPermanentAlert("USB ์๋ฌ: ์ด์จ์ ์ฌ๋ถํ
ํ์ธ์", ""),
ET.NO_ENTRY: NoEntryAlert("USB ์๋ฌ: ์ด์จ์ ์ฌ๋ถํ
ํ์ธ์"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN ์ค๋ฅ : ํ๋์จ์ด๋ฅผ ์ ๊ฒํ์ธ์"),
ET.PERMANENT: Alert(
"CAN ์ค๋ฅ : ํ๋์จ์ด๋ฅผ ์ ๊ฒํ์ธ์",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN ์ค๋ฅ : ํ๋์จ์ด๋ฅผ ์ ๊ฒํ์ธ์"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS ์ค๋ฅ : ์ฐจ๋์ ์ฌ๊ฐ๋ํ์ธ์"),
ET.PERMANENT: Alert(
"LKAS ์ค๋ฅ : ์ฐจ๋์ ์ฌ๊ฐ๋ํ์ธ์",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("LKAS ์ค๋ฅ : ์ฐจ๋์ ์ฌ๊ฐ๋ํ์ธ์"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: Alert(
"ํฌ๋ฃจ์ฆ ์ค๋ฅ : ์ฐจ๋์ ์ฌ๊ฐ๋ํ์ธ์",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("ํฌ๋ฃจ์ฆ ์ค๋ฅ : ์ฐจ๋์ ์ฌ๊ฐ๋ํ์ธ์"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"๊ธฐ์ด [R] ์ํ",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=0.5),
ET.SOFT_DISABLE: SoftDisableAlert("๊ธฐ์ด [R] ์ํ"),
ET.NO_ENTRY: NoEntryAlert("๊ธฐ์ด [R] ์ํ"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("ํฌ๋ฃจ์ฆ ๊บผ์ง"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.SOFT_DISABLE: SoftDisableAlert("ํ๋๋ ์๋ฃจ์
์ค๋ฅ"),
ET.NO_ENTRY: NoEntryAlert("ํ๋๋ ์๋ฃจ์
์ค๋ฅ"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("ํ๋ค์ค ์ค์๋"),
ET.PERMANENT: NormalPermanentAlert("ํ๋ค์ค ์ค์๋", "ํ๋์จ์ด๋ฅผ ์ ๊ฒํ์ธ์"),
ET.NO_ENTRY: NoEntryAlert("ํ๋ค์ค ์ค์๋"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"์คํํ์ผ๋ฟ ์ฌ์ฉ๋ถ๊ฐ",
"๊ทผ์ ์์ฐจ๋์ด ์์ต๋๋ค",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"์คํํ์ผ๋ฟ ์ฌ์ฉ๋ถ๊ฐ",
"์๋๋ฅผ ๋์ด๊ณ ์ฌ๊ฐ๋ํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
},
# When the car is driving faster than most cars in the training data the model outputs can be unpredictable
EventName.speedTooHigh: {
ET.WARNING: Alert(
"์๋๊ฐ ๋๋ฌด ๋์ต๋๋ค",
"์๋๋ฅผ ์ค์ฌ์ฃผ์ธ์",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, 2.2, 3., 4.),
ET.NO_ENTRY: Alert(
"์๋๊ฐ ๋๋ฌด ๋์ต๋๋ค",
"์๋๋ฅผ ์ค์ด๊ณ ์ฌ๊ฐ๋ํ์ธ์",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.chimeError, .4, 2., 3.),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: Alert(
"ํฌ๋ฃจ์ฆ ์ค๋ฅ : ์ฐจ๋์ ์ฌ๊ฐ๋ํ์ธ์",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("ํฌ๋ฃจ์ฆ ์ค๋ฅ : ์ฐจ๋์ ์ฌ๊ฐ๋ํ์ธ์"),
},
EventName.turningIndicatorOn: {
ET.WARNING: Alert(
"๋ฐฉํฅ์ง์๋ฑ ๋์์ค์๋ ํธ๋ค์ ์ก์์ฃผ์ธ์",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .0, .0, .2),
},
EventName.autoLaneChange: {
ET.WARNING: auto_lane_change_alert,
},
EventName.slowingDownSpeed: {
ET.PERMANENT: Alert("์๋๋ฅผ ์กฐ์ ํฉ๋๋ค","", AlertStatus.normal, AlertSize.small,
Priority.MID, VisualAlert.none, AudibleAlert.none, 0., .1, .1),
},
EventName.slowingDownSpeedSound: {
ET.PERMANENT: Alert("์๋๋ฅผ ์กฐ์ ํฉ๋๋ค","", AlertStatus.normal, AlertSize.small,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeSlowingDownSpeed, 2., 2., 2.),
},
}
| selfdrive/controls/lib/events.py | 34,085 | Alert priorities Event types get event name from enum ********** alert callback functions ********** ********** events with no alerts ********** ********** events only containing alerts displayed in all states ********** Car is recognized, but marked as dashcam only Car is not recognized openpilot uses the version strings from various ECUs to detect the correct car model. Usually all ECUs are recognized and an exact match to a car model can be made. Sometimes one or two ECUs have unrecognized versions, but the others are present in the database. If openpilot is confident about the match to a car model, it fingerprints anyway. In this case an alert is thrown since there is a small chance the wrong car was detected and the user should pay extra attention. This alert can be prevented by adding all ECU firmware version to openpilot: https://github.com/commaai/openpilot/wiki/Fingerprinting Some features or cars are marked as community features. If openpilot detects the use of a community feature it switches to dashcam mode until these features are allowed using a toggle in settings. LOW priority to overcome Cruise Error openpilot doesn't recognize the car. This switches openpilot into a read-only mode. This can be solved by adding your fingerprint. See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information ********** events only containing alerts that display while engaged ********** openpilot tries to learn certain parameters about your car by observing how the car behaves to steering inputs from both human and openpilot driving. This includes: - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle - tire stiffness: how much grip your tires have - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight This alert is thrown when any of these values exceed a sanity check. This can be caused by bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub Thrown when the fan is driven at >50% but is not rotating Camera is not outputting frames at a constant framerate Unused When the GPS position and localizer diverge the localizer is reset to the current GPS position. This alert is thrown when the localizer is reset more often than expected. ********** events that affect controls state transitions ********** This alert is thrown when the calibration angles are outside of the acceptable range. For example if the device is pointed too much to the left or the right. Usually this can only be solved by removing the mount from the windshield completely, and attaching while making sure the device is pointed straight forward and is level. See https://comma.ai/setup for more information Different openpilot services communicate between each other at a certain interval. If communication does not follow the regular schedule this alert is thrown. This can mean a service crashed, did not broadcast a message for ten times the regular interval, or the average interval is more than 10% too high. Thrown when manager detects a service exited unexpectedly while driving Every frame from the camera should be processed by the model. If modeld is not processing frames fast enough they have to be dropped. This alert is thrown when over 20% of frames are dropped. Besides predicting the path, lane lines and lead car data the model also predicts the current velocity and rotation speed of the car. If the model is very uncertain about the current velocity while the car is moving, this usually means the model has trouble understanding the scene. This is used as a heuristic to warn the driver. When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we alert the driver the device might have fallen from the windshield. Sometimes the USB stack on the device can get into a bad state causing the connection to the panda to be lost This alert can be thrown for the following reasons: - No CAN data received at all - CAN data is received, but some message are not received at the right frequency If you're not writing a new car port, this is usually cause by faulty wiring On cars that use stock ACC the car can decide to cancel ACC for various reasons. When this happens we can no long control the car so the user needs to be warned immediately. For planning the trajectory Model Predictive Control (MPC) is used. This is an optimization algorithm that is not guaranteed to find a feasible solution. If no solution is found or the solution has a very high cost this alert is thrown. When the relay in the harness box opens the CAN bus between the LKAS camera and the rest of the car is separated. When messages from the LKAS camera are received on the car side this usually means the relay hasn't opened correctly and this alert is thrown. When the car is driving faster than most cars in the training data the model outputs can be unpredictable | 4,923 | en | 0.936431 |
import numpy as np
import h5py
import argparse
import imageio
import tqdm
import os
from glob import glob
def main(args):
"""Main function to parse in Nuclei Dataset from Kaggle and store as HDF5
Parameters
----------
args: ArgumentParser()
input_dir: str
directory of the Nuclei data
output_dir: str
path to the HDF5 output directory
"""
# create hdf5
hdf5_fn = h5py.File(os.path.join(args.output_dir, "data_360.hdf5"), "a")
# get all data directory
data_dirs = glob(os.path.join(args.input_dir, "*/"))
with tqdm.tqdm(total=len(data_dirs), unit="folder") as progress_bar:
for path in data_dirs:
data_name = path.split("/")[-2]
x, y, masks = parse_data(path)
# TODO only use majority size for now
if x is None:
progress_bar.update(1)
continue
# stack x and y together
y = np.expand_dims(y, axis=0)
data = np.vstack((x,y,masks))
hdf5_fn.create_dataset(str(data_name), data=data, dtype=np.float, chunks=True)
progress_bar.update(1)
hdf5_fn.close()
def parse_data(path):
# define data folders
x_path = os.path.join(path, "images/")
y_path = os.path.join(path, "masks/")
# get all data paths
x_file = glob(os.path.join(x_path, "*.png"))[0]
y_files = glob(os.path.join(y_path, "*.png"))
# parse in data
x = imageio.imread(x_file)
# TODO only using majority shape
if x.shape != (256, 256, 4):
return None, None, None
masks = np.array([imageio.imread(y) for y in y_files])
y = np.zeros_like(masks[0])
for y_raw in masks:
y = np.maximum(y, y_raw)
# normalize
x = x / 255.0
y = y / 255.0
masks = masks / 255.0
# fix dimentions
x = np.transpose(x, (2,0,1)) # channels first
return x, y, masks
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
main(args)
| process_data/nuclei_create_hdf5.py | 2,173 | Main function to parse in Nuclei Dataset from Kaggle and store as HDF5
Parameters
----------
args: ArgumentParser()
input_dir: str
directory of the Nuclei data
output_dir: str
path to the HDF5 output directory
create hdf5 get all data directory TODO only use majority size for now stack x and y together define data folders get all data paths parse in data TODO only using majority shape normalize fix dimentions channels first | 476 | en | 0.455065 |
#11_Duplicate in an array N+1 integer
"""
Given an array of n elements that contains elements from 0 to n-1, with any of these numbers appearing any number of times. Find these repeating numbers in O(n) and using only constant memory space.
Example:
Input : n = 7 and array[] = {1, 2, 3, 6, 3, 6, 1}
Output: 1, 3, 6
Explanation: The numbers 1 , 3 and 6 appears more
than once in the array.
Input : n = 5 and array[] = {1, 2, 3, 4 ,3}
Output: 3
Explanation: The number 3 appears more than once
in the array.
"""
"""
Algorithm:
1. Traverse the array from start to end.
2. For every element,
take its absolute value and
if the abs(array[i])โth element is positive, the element has not encountered before,
else if negative the element has been encountered before print the absolute value of the current element.
Complexity Analysis:
Time Complexity: O(n), only one traversal is needed, so time complexity is O(n)
Auxiliary Space: O(1), no extra space is required, so space complexity is constant
"""
def printRepeating(arr, size):
print("The repeating elements are :")
for i in range(0,size):
if arr[abs(arr[i])] > 0:
arr[abs(arr[i])] =-arr[abs(arr[i])]
else:
print(abs(arr[i]), end=" ")
arr = [1,2,3,1,3,6,6]
arr_size = len(arr)
printRepeating(arr, arr_size)
| 11_Duplicate in an array N+1 integer.py | 1,414 | Given an array of n elements that contains elements from 0 to n-1, with any of these numbers appearing any number of times. Find these repeating numbers in O(n) and using only constant memory space.
Example:
Input : n = 7 and array[] = {1, 2, 3, 6, 3, 6, 1}
Output: 1, 3, 6
Explanation: The numbers 1 , 3 and 6 appears more
than once in the array.
Input : n = 5 and array[] = {1, 2, 3, 4 ,3}
Output: 3
Explanation: The number 3 appears more than once
in the array.
11_Duplicate in an array N+1 integer | 508 | en | 0.8397 |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.nn.parallel.distributed import (DistributedDataParallel,
_find_tensors)
from mmcv import print_log
from mmcv.utils import TORCH_VERSION, digit_version
from .scatter_gather import scatter_kwargs
class MMDistributedDataParallel(DistributedDataParallel):
"""The DDP module that supports DataContainer.
MMDDP has two main differences with PyTorch DDP:
- It supports a custom type :class:`DataContainer` which allows more
flexible control of input data.
- It implement two APIs ``train_step()`` and ``val_step()``.
"""
def to_kwargs(self, inputs, kwargs, device_id):
# Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8
# to move all tensors to device_id
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_step(self, *inputs, **kwargs):
"""train_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.train_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
def val_step(self, *inputs, **kwargs):
"""val_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.val_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
| mmcv/parallel/distributed.py | 5,917 | The DDP module that supports DataContainer.
MMDDP has two main differences with PyTorch DDP:
- It supports a custom type :class:`DataContainer` which allows more
flexible control of input data.
- It implement two APIs ``train_step()`` and ``val_step()``.
train_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.train_step()``.
It is compatible with PyTorch 1.1 - 1.5.
val_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.val_step()``.
It is compatible with PyTorch 1.1 - 1.5.
Copyright (c) OpenMMLab. All rights reserved. Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8 to move all tensors to device_id In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the end of backward to the beginning of forward. In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the end of backward to the beginning of forward. | 1,140 | en | 0.743785 |
import datetime
import json
import logging
import re
import time
from google.appengine.ext import db
# from google.appengine.ext.db import djangoforms
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import taskqueue
from google.appengine.api import users
import settings
import util
#from django.forms import ModelForm
from collections import OrderedDict
from django import forms
# import google.appengine.ext.django as django
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
WEBCOMPONENTS = 1
MISC = 2
SECURITY = 3
MULTIMEDIA = 4
DOM = 5
FILE = 6
OFFLINE = 7
DEVICE = 8
COMMUNICATION = 9
JAVASCRIPT = 10
NETWORKING = 11
INPUT = 12
PERFORMANCE = 13
GRAPHICS = 14
CSS = 15
HOUDINI = 16
SERVICEWORKER = 17
WEBRTC = 18
LAYERED = 19
FEATURE_CATEGORIES = {
CSS: 'CSS',
WEBCOMPONENTS: 'Web Components',
MISC: 'Misc',
SECURITY: 'Security',
MULTIMEDIA: 'Multimedia',
DOM: 'DOM',
FILE: 'File APIs',
OFFLINE: 'Offline / Storage',
DEVICE: 'Device',
COMMUNICATION: 'Realtime / Communication',
JAVASCRIPT: 'JavaScript',
NETWORKING: 'Network / Connectivity',
INPUT: 'User input',
PERFORMANCE: 'Performance',
GRAPHICS: 'Graphics',
HOUDINI: 'Houdini',
SERVICEWORKER: 'Service Worker',
WEBRTC: 'Web RTC',
LAYERED: 'Layered APIs',
}
# Intent stages and mapping from stage to stage name.
INTENT_NONE = 0
INTENT_IMPLEMENT = 1
INTENT_EXPERIMENT = 2
INTENT_EXTEND_TRIAL = 3
INTENT_IMPLEMENT_SHIP = 4
INTENT_SHIP = 5
INTENT_REMOVE = 6
INTENT_STAGES = {
INTENT_NONE: 'None',
INTENT_IMPLEMENT: 'Prototype',
INTENT_EXPERIMENT: 'Experiment',
INTENT_EXTEND_TRIAL: 'Extend Origin Trial',
INTENT_IMPLEMENT_SHIP: 'Implement and Ship',
INTENT_SHIP: 'Ship',
INTENT_REMOVE: 'Remove',
}
NO_ACTIVE_DEV = 1
PROPOSED = 2
IN_DEVELOPMENT = 3
BEHIND_A_FLAG = 4
ENABLED_BY_DEFAULT = 5
DEPRECATED = 6
REMOVED = 7
ORIGIN_TRIAL = 8
INTERVENTION = 9
NO_LONGER_PURSUING = 1000 # insure bottom of list
# Ordered dictionary, make sure the order of this dictionary matches that of
# the sorted list above!
IMPLEMENTATION_STATUS = OrderedDict()
IMPLEMENTATION_STATUS[NO_ACTIVE_DEV] = 'No active development'
IMPLEMENTATION_STATUS[PROPOSED] = 'Proposed'
IMPLEMENTATION_STATUS[IN_DEVELOPMENT] = 'In development'
IMPLEMENTATION_STATUS[BEHIND_A_FLAG] = 'Behind a flag'
IMPLEMENTATION_STATUS[ENABLED_BY_DEFAULT] = 'Enabled by default'
IMPLEMENTATION_STATUS[DEPRECATED] = 'Deprecated'
IMPLEMENTATION_STATUS[REMOVED] = 'Removed'
IMPLEMENTATION_STATUS[ORIGIN_TRIAL] = 'Origin trial'
IMPLEMENTATION_STATUS[INTERVENTION] = 'Browser Intervention'
IMPLEMENTATION_STATUS[NO_LONGER_PURSUING] = 'No longer pursuing'
MAJOR_NEW_API = 1
MAJOR_MINOR_NEW_API = 2
SUBSTANTIVE_CHANGES = 3
MINOR_EXISTING_CHANGES = 4
EXTREMELY_SMALL_CHANGE = 5
FOOTPRINT_CHOICES = {
MAJOR_NEW_API: ('A major new independent API (e.g. adding a large # '
'independent concepts with many methods/properties/objects)'),
MAJOR_MINOR_NEW_API: ('Major changes to an existing API OR a minor new '
'independent API (e.g. adding a large # of new '
'methods/properties or introducing new concepts to '
'augment an existing API)'),
SUBSTANTIVE_CHANGES: ('Substantive changes to an existing API (e.g. small '
'number of new methods/properties)'),
MINOR_EXISTING_CHANGES: (
'Minor changes to an existing API (e.g. adding a new keyword/allowed '
'argument to a property/method)'),
EXTREMELY_SMALL_CHANGE: ('Extremely small tweaks to an existing API (e.g. '
'how existing keywords/arguments are interpreted)'),
}
MAINSTREAM_NEWS = 1
WARRANTS_ARTICLE = 2
IN_LARGER_ARTICLE = 3
SMALL_NUM_DEVS = 4
SUPER_SMALL = 5
VISIBILITY_CHOICES = {
MAINSTREAM_NEWS: 'Likely in mainstream tech news',
WARRANTS_ARTICLE: 'Will this feature generate articles on sites like developers.google.com/web/',
IN_LARGER_ARTICLE: 'Covered as part of a larger article but not on its own',
SMALL_NUM_DEVS: 'Only a very small number of web developers will care',
SUPER_SMALL: "So small it doesn't need to be covered in this dashboard",
}
SHIPPED = 1
IN_DEV = 2
PUBLIC_SUPPORT = 3
MIXED_SIGNALS = 4
NO_PUBLIC_SIGNALS = 5
PUBLIC_SKEPTICISM = 6
OPPOSED = 7
VENDOR_VIEWS = {
SHIPPED: 'Shipped',
IN_DEV: 'In development',
PUBLIC_SUPPORT: 'Public support',
MIXED_SIGNALS: 'Mixed public signals',
NO_PUBLIC_SIGNALS: 'No public signals',
PUBLIC_SKEPTICISM: 'Public skepticism',
OPPOSED: 'Opposed',
}
DEFACTO_STD = 1
ESTABLISHED_STD = 2
WORKING_DRAFT = 3
EDITORS_DRAFT = 4
PUBLIC_DISCUSSION = 5
NO_STD_OR_DISCUSSION = 6
STANDARDIZATION = {
DEFACTO_STD: 'De-facto standard',
ESTABLISHED_STD: 'Established standard',
WORKING_DRAFT: 'Working draft or equivalent',
EDITORS_DRAFT: "Editor's draft",
PUBLIC_DISCUSSION: 'Public discussion',
NO_STD_OR_DISCUSSION: 'No public standards discussion',
}
DEV_STRONG_POSITIVE = 1
DEV_POSITIVE = 2
DEV_MIXED_SIGNALS = 3
DEV_NO_SIGNALS = 4
DEV_NEGATIVE = 5
DEV_STRONG_NEGATIVE = 6
WEB_DEV_VIEWS = {
DEV_STRONG_POSITIVE: 'Strongly positive',
DEV_POSITIVE: 'Positive',
DEV_MIXED_SIGNALS: 'Mixed signals',
DEV_NO_SIGNALS: 'No signals',
DEV_NEGATIVE: 'Negative',
DEV_STRONG_NEGATIVE: 'Strongly negative',
}
def del_none(d):
"""
Delete dict keys with None values, and empty lists, recursively.
"""
for key, value in d.items():
if value is None or (isinstance(value, list) and len(value) == 0):
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def list_to_chunks(l, n):
"""Yield successive n-sized chunk lists from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
class DictModel(db.Model):
# def to_dict(self):
# return dict([(p, unicode(getattr(self, p))) for p in self.properties()])
def format_for_template(self):
d = self.to_dict()
d['id'] = self.key().id()
return d
def to_dict(self):
output = {}
for key, prop in self.properties().iteritems():
value = getattr(self, key)
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
#ms = time.mktime(value.utctimetuple())
#ms += getattr(value, 'microseconds', 0) / 1000
#output[key] = int(ms)
output[key] = unicode(value)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
elif isinstance(value, db.Model):
output[key] = to_dict(value)
elif isinstance(value, users.User):
output[key] = value.email()
else:
raise ValueError('cannot encode ' + repr(prop))
return output
class BlinkComponent(DictModel):
DEFAULT_COMPONENT = 'Blink'
COMPONENTS_URL = 'https://blinkcomponents-b48b5.firebaseapp.com'
COMPONENTS_ENDPOINT = '%s/blinkcomponents' % COMPONENTS_URL
WF_CONTENT_ENDPOINT = '%s/wfcomponents' % COMPONENTS_URL
name = db.StringProperty(required=True, default=DEFAULT_COMPONENT)
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
@property
def subscribers(self):
return FeatureOwner.all().filter('blink_components = ', self.key()).order('name').fetch(None)
@property
def owners(self):
return FeatureOwner.all().filter('primary_blink_components = ', self.key()).order('name').fetch(None)
@classmethod
def fetch_all_components(self, update_cache=False):
"""Returns the list of blink components from live endpoint if unavailable in the cache."""
key = '%s|blinkcomponents' % (settings.MEMCACHE_KEY_PREFIX)
components = memcache.get(key)
if components is None or update_cache:
components = []
result = urlfetch.fetch(self.COMPONENTS_ENDPOINT, deadline=60)
if result.status_code == 200:
components = sorted(json.loads(result.content))
memcache.set(key, components)
else:
logging.error('Fetching blink components returned: %s' % result.status_code)
return components
@classmethod
def fetch_wf_content_for_components(self, update_cache=False):
"""Returns the /web content that use each blink component."""
key = '%s|wfcomponents' % (settings.MEMCACHE_KEY_PREFIX)
components = memcache.get(key)
if components is None or update_cache:
components = {}
result = urlfetch.fetch(self.WF_CONTENT_ENDPOINT, deadline=60)
if result.status_code == 200:
components = json.loads(result.content)
memcache.set(key, components)
else:
logging.error('Fetching /web blink components content returned: %s' % result.status_code)
return components
@classmethod
def update_db(self):
"""Updates the db with new Blink components from the json endpoint"""
self.fetch_wf_content_for_components(update_cache=True) # store /web content in memcache
new_components = self.fetch_all_components(update_cache=True)
existing_comps = self.all().fetch(None)
for name in new_components:
if not len([x.name for x in existing_comps if x.name == name]):
logging.info('Adding new BlinkComponent: ' + name)
c = BlinkComponent(name=name)
c.put()
@classmethod
def get_by_name(self, component_name):
"""Fetch blink component with given name."""
q = self.all()
q.filter('name =', component_name)
component = q.fetch(1)
if not component:
logging.error('%s is an unknown BlinkComponent.' % (component_name))
return None
return component[0]
# UMA metrics.
class StableInstance(DictModel):
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
property_name = db.StringProperty(required=True)
bucket_id = db.IntegerProperty(required=True)
date = db.DateProperty(verbose_name='When the data was fetched',
required=True)
#hits = db.IntegerProperty(required=True)
#total_pages = db.IntegerProperty()
day_percentage = db.FloatProperty()
rolling_percentage = db.FloatProperty()
class AnimatedProperty(StableInstance):
pass
class FeatureObserver(StableInstance):
pass
# Feature dashboard.
class Feature(DictModel):
"""Container for a feature."""
DEFAULT_MEMCACHE_KEY = '%s|features' % (settings.MEMCACHE_KEY_PREFIX)
MAX_CHUNK_SIZE = 500 # max num features to save for each memcache chunk.
@classmethod
def get_feature_chunk_memcache_keys(self, key_prefix):
num_features = len(Feature.all().fetch(limit=None, keys_only=True))
l = list_to_chunks(range(0, num_features), self.MAX_CHUNK_SIZE)
return ['%s|chunk%s' % (key_prefix, i) for i,val in enumerate(l)]
@classmethod
def set_feature_chunk_memcache_keys(self, key_prefix, feature_list):
chunks = list_to_chunks(feature_list, self.MAX_CHUNK_SIZE)
vals = []
for i, chunk in enumerate(chunks):
vals.append(('%s|chunk%s' % (key_prefix, i), chunk))
# d = OrderedDict(sorted(dict(vals).items(), key=lambda t: t[0]))
d = dict(vals)
return d
@classmethod
def _first_of_milestone(self, feature_list, milestone, start=0):
for i in xrange(start, len(feature_list)):
f = feature_list[i]
if (str(f['shipped_milestone']) == str(milestone) or
f['impl_status_chrome'] == str(milestone)):
return i
elif (f['shipped_milestone'] == None and
str(f['shipped_android_milestone']) == str(milestone)):
return i
return -1
@classmethod
def _first_of_milestone_v2(self, feature_list, milestone, start=0):
for i in xrange(start, len(feature_list)):
f = feature_list[i]
desktop_milestone = f['browsers']['chrome'].get('desktop', None)
android_milestone = f['browsers']['chrome'].get('android', None)
status = f['browsers']['chrome']['status'].get('text', None)
if (str(desktop_milestone) == str(milestone) or status == str(milestone)):
return i
elif (desktop_milestone == None and str(android_milestone) == str(milestone)):
return i
return -1
@classmethod
def _annotate_first_of_milestones(self, feature_list, version=None):
try:
omaha_data = util.get_omaha_data()
win_versions = omaha_data[0]['versions']
# Find the latest canary major version from the list of windows versions.
canary_versions = [x for x in win_versions if x.get('channel') and x.get('channel').startswith('canary')]
LATEST_VERSION = int(canary_versions[0].get('version').split('.')[0])
milestones = range(1, LATEST_VERSION + 1)
milestones.reverse()
versions = [
IMPLEMENTATION_STATUS[NO_ACTIVE_DEV],
IMPLEMENTATION_STATUS[PROPOSED],
IMPLEMENTATION_STATUS[IN_DEVELOPMENT],
IMPLEMENTATION_STATUS[DEPRECATED],
]
versions.extend(milestones)
versions.append(IMPLEMENTATION_STATUS[NO_LONGER_PURSUING])
first_of_milestone_func = Feature._first_of_milestone
if version == 2:
first_of_milestone_func = Feature._first_of_milestone_v2
last_good_idx = 0
for i, ver in enumerate(versions):
idx = first_of_milestone_func(feature_list, ver, start=last_good_idx)
if idx != -1:
feature_list[idx]['first_of_milestone'] = True
last_good_idx = idx
except Exception as e:
logging.error(e)
def format_for_template(self, version=None):
d = self.to_dict()
if version == 2:
if self.is_saved():
d['id'] = self.key().id()
else:
d['id'] = None
d['category'] = FEATURE_CATEGORIES[self.category]
if self.intent_stage is not None:
d['intent_stage'] = INTENT_STAGES[self.intent_stage]
d['created'] = {
'by': d.pop('created_by', None),
'when': d.pop('created', None),
}
d['updated'] = {
'by': d.pop('updated_by', None),
'when': d.pop('updated', None),
}
d['standards'] = {
'spec': d.pop('spec_link', None),
'status': {
'text': STANDARDIZATION[self.standardization],
'val': d.pop('standardization', None),
},
'visibility': {
'text': VISIBILITY_CHOICES[self.visibility],
'val': d.pop('visibility', None),
},
'footprint': {
'val': d.pop('footprint', None),
#'text': FOOTPRINT_CHOICES[self.footprint]
}
}
d['resources'] = {
'samples': d.pop('sample_links', []),
'docs': d.pop('doc_links', []),
}
d['tags'] = d.pop('search_tags', [])
d['browsers'] = {
'chrome': {
'bug': d.pop('bug_url', None),
'blink_components': d.pop('blink_components', []),
'owners': d.pop('owner', []),
'origintrial': self.impl_status_chrome == ORIGIN_TRIAL,
'intervention': self.impl_status_chrome == INTERVENTION,
'prefixed': d.pop('prefixed', False),
'flag': self.impl_status_chrome == BEHIND_A_FLAG,
'status': {
'text': IMPLEMENTATION_STATUS[self.impl_status_chrome],
'val': d.pop('impl_status_chrome', None)
},
'desktop': d.pop('shipped_milestone', None),
'android': d.pop('shipped_android_milestone', None),
'webview': d.pop('shipped_webview_milestone', None),
'ios': d.pop('shipped_ios_milestone', None),
},
'ff': {
'view': {
'text': VENDOR_VIEWS[self.ff_views],
'val': d.pop('ff_views', None),
'url': d.pop('ff_views_link', None),
'notes': d.pop('ff_views_notes', None),
}
},
'edge': {
'view': {
'text': VENDOR_VIEWS[self.ie_views],
'val': d.pop('ie_views', None),
'url': d.pop('ie_views_link', None),
'notes': d.pop('ie_views_notes', None),
}
},
'safari': {
'view': {
'text': VENDOR_VIEWS[self.safari_views],
'val': d.pop('safari_views', None),
'url': d.pop('safari_views_link', None),
'notes': d.pop('safari_views_notes', None),
}
},
'webdev': {
'view': {
'text': WEB_DEV_VIEWS[self.web_dev_views],
'val': d.pop('web_dev_views', None),
'url': d.pop('web_dev_views_link', None),
'notes': d.pop('web_dev_views_notes', None),
}
}
}
if self.shipped_milestone:
d['browsers']['chrome']['status']['milestone_str'] = self.shipped_milestone
elif self.shipped_milestone is None and self.shipped_android_milestone:
d['browsers']['chrome']['status']['milestone_str'] = self.shipped_android_milestone
else:
d['browsers']['chrome']['status']['milestone_str'] = d['browsers']['chrome']['status']['text']
del d['created']
del_none(d) # Further prune response by removing null/[] values.
else:
if self.is_saved():
d['id'] = self.key().id()
else:
d['id'] = None
d['category'] = FEATURE_CATEGORIES[self.category]
if self.intent_stage is not None:
d['intent_stage'] = INTENT_STAGES[self.intent_stage]
d['visibility'] = VISIBILITY_CHOICES[self.visibility]
d['impl_status_chrome'] = IMPLEMENTATION_STATUS[self.impl_status_chrome]
d['meta'] = {
'origintrial': self.impl_status_chrome == ORIGIN_TRIAL,
'intervention': self.impl_status_chrome == INTERVENTION,
'needsflag': self.impl_status_chrome == BEHIND_A_FLAG,
}
if self.shipped_milestone:
d['meta']['milestone_str'] = self.shipped_milestone
elif self.shipped_milestone is None and self.shipped_android_milestone:
d['meta']['milestone_str'] = self.shipped_android_milestone
else:
d['meta']['milestone_str'] = d['impl_status_chrome']
d['ff_views'] = {'value': self.ff_views,
'text': VENDOR_VIEWS[self.ff_views]}
d['ie_views'] = {'value': self.ie_views,
'text': VENDOR_VIEWS[self.ie_views]}
d['safari_views'] = {'value': self.safari_views,
'text': VENDOR_VIEWS[self.safari_views]}
d['standardization'] = {'value': self.standardization,
'text': STANDARDIZATION[self.standardization]}
d['web_dev_views'] = {'value': self.web_dev_views,
'text': WEB_DEV_VIEWS[self.web_dev_views]}
return d
def format_for_edit(self):
d = self.to_dict()
#d['id'] = self.key().id
d['owner'] = ', '.join(self.owner)
d['explainer_links'] = '\r\n'.join(self.explainer_links)
d['doc_links'] = '\r\n'.join(self.doc_links)
d['sample_links'] = '\r\n'.join(self.sample_links)
d['search_tags'] = ', '.join(self.search_tags)
d['blink_components'] = self.blink_components[0] #TODO: support more than one component.
return d
@classmethod
def get_all(self, limit=None, order='-updated', filterby=None,
update_cache=False):
KEY = '%s|%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, order, limit)
# TODO(ericbidelman): Support more than one filter.
if filterby is not None:
s = ('%s%s' % (filterby[0], filterby[1])).replace(' ', '')
KEY += '|%s' % s
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
query = Feature.all().order(order) #.order('name')
# TODO(ericbidelman): Support more than one filter.
if filterby:
query.filter(filterby[0], filterby[1])
features = query.fetch(limit)
feature_list = [f.format_for_template() for f in features]
memcache.set(KEY, feature_list)
return feature_list
@classmethod
def get_all_with_statuses(self, statuses, update_cache=False):
if not statuses:
return []
KEY = '%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, sorted(statuses))
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
# There's no way to do an OR in a single datastore query, and there's a
# very good chance that the self.get_all() results will already be in
# memcache, so use an array comprehension to grab the features we
# want from the array of everything.
feature_list = [feature for feature in self.get_all(update_cache=update_cache)
if feature['impl_status_chrome'] in statuses]
memcache.set(KEY, feature_list)
return feature_list
@classmethod
def get_feature(self, feature_id, update_cache=False):
KEY = '%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, feature_id)
feature = memcache.get(KEY)
if feature is None or update_cache:
unformatted_feature = Feature.get_by_id(feature_id)
if unformatted_feature:
feature = unformatted_feature.format_for_template()
feature['updated_display'] = unformatted_feature.updated.strftime("%Y-%m-%d")
feature['new_crbug_url'] = unformatted_feature.new_crbug_url()
memcache.set(KEY, feature)
return feature
@classmethod
def get_chronological(self, limit=None, update_cache=False, version=None):
KEY = '%s|%s|%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY,
'cronorder', limit, version)
keys = Feature.get_feature_chunk_memcache_keys(KEY)
feature_list = memcache.get_multi(keys)
# If we didn't get the expected number of chunks back (or a cache update
# was requested), do a db query.
if len(feature_list.keys()) != len(keys) or update_cache:
# Features with no active, in dev, proposed features.
q = Feature.all()
q.order('impl_status_chrome')
q.order('name')
q.filter('impl_status_chrome <=', IN_DEVELOPMENT)
pre_release = q.fetch(None)
# Shipping features. Exclude features that do not have a desktop
# shipping milestone.
q = Feature.all()
q.order('-shipped_milestone')
q.order('name')
q.filter('shipped_milestone !=', None)
shipping_features = q.fetch(None)
# Features with an android shipping milestone but no desktop milestone.
q = Feature.all()
q.order('-shipped_android_milestone')
q.order('name')
q.filter('shipped_milestone =', None)
android_only_shipping_features = q.fetch(None)
# No longer pursuing features.
q = Feature.all()
q.order('impl_status_chrome')
q.order('name')
q.filter('impl_status_chrome =', NO_LONGER_PURSUING)
no_longer_pursuing_features = q.fetch(None)
shipping_features.extend(android_only_shipping_features)
shipping_features = [f for f in shipping_features if (IN_DEVELOPMENT < f.impl_status_chrome < NO_LONGER_PURSUING)]
def getSortingMilestone(feature):
feature._sort_by_milestone = (feature.shipped_milestone or
feature.shipped_android_milestone)
return feature
# Sort the feature list on either Android shipping milestone or desktop
# shipping milestone, depending on which is specified. If a desktop
# milestone is defined, that will take default.
shipping_features = map(getSortingMilestone, shipping_features)
# First sort by name, then sort by feature milestone (latest first).
shipping_features.sort(key=lambda f: f.name, reverse=False)
shipping_features.sort(key=lambda f: f._sort_by_milestone, reverse=True)
# Constructor the proper ordering.
pre_release.extend(shipping_features)
pre_release.extend(no_longer_pursuing_features)
feature_list = [f.format_for_template(version) for f in pre_release]
self._annotate_first_of_milestones(feature_list, version=version)
# Memcache doesn't support saving values > 1MB. Break up features list into
# chunks so we don't hit the limit.
memcache.set_multi(Feature.set_feature_chunk_memcache_keys(KEY, feature_list))
else:
temp_feature_list = []
# Reconstruct feature list by ordering chunks.
for key in sorted(feature_list.keys()):
temp_feature_list.extend(feature_list[key])
feature_list = temp_feature_list
return feature_list
@classmethod
def get_shipping_samples(self, limit=None, update_cache=False):
KEY = '%s|%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, 'samples', limit)
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
# Get all shipping features. Ordered by shipping milestone (latest first).
q = Feature.all()
q.filter('impl_status_chrome IN', [ENABLED_BY_DEFAULT, ORIGIN_TRIAL, INTERVENTION])
q.order('-impl_status_chrome')
q.order('-shipped_milestone')
q.order('name')
features = q.fetch(None)
# Get non-shipping features (sans removed or deprecated ones) and
# append to bottom of list.
q = Feature.all()
q.filter('impl_status_chrome <', ENABLED_BY_DEFAULT)
q.order('-impl_status_chrome')
q.order('-shipped_milestone')
q.order('name')
others = q.fetch(None)
features.extend(others)
# Filter out features without sample links.
feature_list = [f.format_for_template() for f in features
if len(f.sample_links)]
memcache.set(KEY, feature_list)
return feature_list
def crbug_number(self):
if not self.bug_url:
return
m = re.search(r'[\/|?id=]([0-9]+)$', self.bug_url)
if m:
return m.group(1)
def new_crbug_url(self):
url = 'https://bugs.chromium.org/p/chromium/issues/entry'
params = ['components=' + self.blink_components[0] or BlinkComponent.DEFAULT_COMPONENT]
crbug_number = self.crbug_number()
if crbug_number and self.impl_status_chrome in (
NO_ACTIVE_DEV,
PROPOSED,
IN_DEVELOPMENT,
BEHIND_A_FLAG,
ORIGIN_TRIAL,
INTERVENTION):
params.append('blocking=' + crbug_number)
if self.owner:
params.append('cc=' + ','.join(self.owner))
return url + '?' + '&'.join(params)
def __init__(self, *args, **kwargs):
super(Feature, self).__init__(*args, **kwargs)
# Stash existing values when entity is created so we can diff property
# values later in put() to know what's changed. https://stackoverflow.com/a/41344898
for prop_name, prop in self.properties().iteritems():
old_val = getattr(self, prop_name, None)
setattr(self, '_old_' + prop_name, old_val)
def __notify_feature_subscribers_of_changes(self, is_update):
"""Async notifies subscribers of new features and property changes to features by
posting to a task queue."""
# Diff values to see what properties have changed.
changed_props = []
for prop_name, prop in self.properties().iteritems():
new_val = getattr(self, prop_name, None)
old_val = getattr(self, '_old_' + prop_name, None)
if new_val != old_val:
changed_props.append({
'prop_name': prop_name, 'old_val': old_val, 'new_val': new_val})
payload = json.dumps({
'changes': changed_props,
'is_update': is_update,
'feature': self.format_for_template(version=2)
})
# Create task to email subscribers.
queue = taskqueue.Queue()#name='emailer')
task = taskqueue.Task(method="POST", url='/tasks/email-subscribers',
target='notifier', payload=payload)
queue.add(task)
# Create task to send push notifications
queue = taskqueue.Queue()
task = taskqueue.Task(method="POST", url='/tasks/send_notifications',
target='notifier', payload=payload)
queue.add(task)
def put(self, **kwargs):
is_update = self.is_saved()
key = super(Feature, self).put(**kwargs)
self.__notify_feature_subscribers_of_changes(is_update)
return key
# Metadata.
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
updated_by = db.UserProperty(auto_current_user=True)
created_by = db.UserProperty(auto_current_user_add=True)
intent_template_use_count = db.IntegerProperty(default = 0)
# General info.
category = db.IntegerProperty(required=True)
name = db.StringProperty(required=True)
intent_stage = db.IntegerProperty(default=0)
summary = db.StringProperty(required=True, multiline=True)
intent_to_implement_url = db.LinkProperty()
origin_trial_feedback_url = db.LinkProperty()
# A list of intent threads in the format "date|subject|url"
intent_threads = db.StringListProperty()
motivation = db.StringProperty(multiline=True)
# Chromium details.
bug_url = db.LinkProperty()
blink_components = db.StringListProperty(required=True, default=[BlinkComponent.DEFAULT_COMPONENT])
impl_status_chrome = db.IntegerProperty(required=True)
shipped_milestone = db.IntegerProperty()
shipped_android_milestone = db.IntegerProperty()
shipped_ios_milestone = db.IntegerProperty()
shipped_webview_milestone = db.IntegerProperty()
owner = db.ListProperty(db.Email)
footprint = db.IntegerProperty()
interop_compat_risks = db.StringProperty(multiline=True)
ergonomics_risks = db.StringProperty(multiline=True)
activation_risks = db.StringProperty(multiline=True)
security_risks = db.StringProperty(multiline=True)
debuggability = db.StringProperty(multiline=True)
all_platforms = db.BooleanProperty()
all_platforms_descr = db.StringProperty(multiline=True)
wpt = db.BooleanProperty()
wpt_descr = db.StringProperty(multiline=True)
visibility = db.IntegerProperty(required=True)
#webbiness = db.IntegerProperty() # TODO: figure out what this is
# Standards details.
standardization = db.IntegerProperty(required=True)
spec_link = db.LinkProperty()
tag_review = db.StringProperty(multiline=True)
prefixed = db.BooleanProperty()
explainer_links = db.StringListProperty()
ff_views = db.IntegerProperty(required=True, default=NO_PUBLIC_SIGNALS)
ie_views = db.IntegerProperty(required=True, default=NO_PUBLIC_SIGNALS)
safari_views = db.IntegerProperty(required=True, default=NO_PUBLIC_SIGNALS)
web_dev_views = db.IntegerProperty(required=True)
ff_views_link = db.LinkProperty()
ie_views_link = db.LinkProperty()
safari_views_link = db.LinkProperty()
web_dev_views_link = db.LinkProperty()
ff_views_notes = db.StringProperty(multiline=True)
ie_views_notes = db.StringProperty(multiline=True)
safari_views_notes = db.StringProperty(multiline=True)
web_dev_views_notes = db.StringProperty(multiline=True)
doc_links = db.StringListProperty()
sample_links = db.StringListProperty()
#tests = db.StringProperty()
search_tags = db.StringListProperty()
comments = db.StringProperty(multiline=True)
experiment_goals = db.StringProperty(multiline=True)
experiment_timeline = db.StringProperty(multiline=True)
experiment_risks = db.StringProperty(multiline=True)
experiment_extension_reason = db.StringProperty(multiline=True)
ongoing_constraints = db.StringProperty(multiline=True)
class PlaceholderCharField(forms.CharField):
def __init__(self, *args, **kwargs):
#super(forms.CharField, self).__init__(*args, **kwargs)
attrs = {}
if kwargs.get('placeholder'):
attrs['placeholder'] = kwargs.get('placeholder')
del kwargs['placeholder']
label = kwargs.get('label') or ''
if label:
del kwargs['label']
self.max_length = kwargs.get('max_length') or None
super(forms.CharField, self).__init__(label=label,
widget=forms.TextInput(attrs=attrs), *args, **kwargs)
# class PlaceholderForm(forms.Form):
# def __init__(self, *args, **kwargs):
# super(PlaceholderForm, self).__init__(*args, **kwargs)
# for field_name in self.fields:
# field = self.fields.get(field_name)
# if field:
# if type(field.widget) in (forms.TextInput, forms.DateInput):
# field.widget = forms.TextInput(attrs={'placeholder': field.label})
class FeatureForm(forms.Form):
SHIPPED_HELP_TXT = ('First milestone to ship with this '
'status. Applies to: Enabled by default, Behind a flag, '
'Origin trial, Browser Intervention, and Deprecated. If '
'the flag is \'test\' rather than \'experimental\' set '
'status to In development.')
# Note that the "required" argument in the following field definitions only
# mean so much in practice. There's various code in js/admin/feature_form.js,
# including intentStageChanged(), that adjusts what fields are required (as
# well as visible at all). IOW, when making changes to what form fields are or
# are not required, look both in the definitions here as well as in
# js/admin/feature_form.js and make sure the code works as intended.
#name = PlaceholderCharField(required=True, placeholder='Feature name')
name = forms.CharField(required=True, label='Feature',
help_text='Capitalize only the first letter and the beginnings of proper nouns.')
summary = forms.CharField(label='', required=True,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 500}),
help_text='Provide a one sentence description followed by one or two lines explaining how this feature helps web developers.')
category = forms.ChoiceField(required=True, help_text='Select the most specific category. If unsure, leave as "%s".' % FEATURE_CATEGORIES[MISC],
initial=MISC,
choices=sorted(FEATURE_CATEGORIES.items(), key=lambda x: x[1]))
intent_stage = forms.ChoiceField(required=True, label='Intent stage', help_text='Select the appropriate intent stage.',
initial=INTENT_IMPLEMENT,
choices=INTENT_STAGES.items())
current_user_email = users.get_current_user().email if users.get_current_user() else None
owner = forms.CharField(initial=current_user_email, required=True, label='Contact emails',
help_text='Comma separated list of full email addresses. Prefer @chromium.org.')
summary = forms.CharField(label='Feature summary', required=True, max_length=500,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 500}),
help_text='Summarize the feature using complete sentences as you would to an external developer using the feature.')
motivation = forms.CharField(label='Motivation', required=True,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Explain why the web needs this change. It may be useful to describe what web developers are forced to do without it. When possible, include links to back up your claims in the explainer.')
explainer_links = forms.CharField(label='Explainer link(s)', required=False,
widget=forms.Textarea(attrs={'rows': 4, 'cols': 50, 'maxlength': 500}),
help_text='Link to explainer(s) (one URL per line). You should have at least an explainer in hand and have shared it on a public forum before sending an Intent to Prototype in order to enable discussion with other browser vendors, standards bodies, or other interested parties.')
intent_to_implement_url = forms.URLField(required=False, label='Intent to Prototype link',
help_text='Link to the "Intent to Prototype" discussion thread.')
origin_trial_feedback_url = forms.URLField(required=False, label='Origin Trial feedback summary',
help_text='If your feature was available as an Origin Trial, link to a summary of usage and developer feedback. If not, leave this empty.')
doc_links = forms.CharField(label='Doc link(s)', required=False,
widget=forms.Textarea(attrs={'rows': 4, 'cols': 50, 'maxlength': 500}),
help_text='Links to design doc(s) (one URL per line), if and when available. [This is not required to send out an Intent to Prototype. Please update the intent thread with the design doc when ready]. An explainer and/or design doc is sufficient to start this process. [Note: Please include links and data, where possible, to support any claims.]')
standardization = forms.ChoiceField(
label='Standardization', choices=STANDARDIZATION.items(),
initial=EDITORS_DRAFT,
help_text=("The standardization status of the API. In bodies that don't "
"use this nomenclature, use the closest equivalent."))
spec_link = forms.URLField(required=False, label='Spec link',
help_text="Link to spec, if and when available. Please update the chromestatus.com entry and the intent thread(s) with the spec link when available.")
tag_review = forms.CharField(label='TAG Review', required=True,
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'maxlength': 1480}),
help_text='Link(s) to TAG review(s), or explanation why this is not needed.')
interop_compat_risks = forms.CharField(label='Interoperability and Compatibility Risks', required=True,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Describe the degree of <a target="_blank" href="https://sites.google.com/a/chromium.org/dev/blink?pli=1#TOC-Policy-for-shipping-and-removing-web-platform-API-features">interoperability risk</a>. For a new feature, the main risk is that it fails to become an interoperable part of the web platform if other browsers do not implement it. For a removal, please review our <a target="_blank" href="https://docs.google.com/document/d/1RC-pBBvsazYfCNNUSkPqAVpSpNJ96U8trhNkfV0v9fk/edit">principles of web compatibility</a>.')
safari_views = forms.ChoiceField(label='Safari views',
choices=VENDOR_VIEWS.items(),
initial=NO_PUBLIC_SIGNALS)
safari_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
safari_views_notes = forms.CharField(required=False, label='',
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'placeholder': 'Notes', 'maxlength': 1480}))
ff_views = forms.ChoiceField(label='Firefox views',
choices=VENDOR_VIEWS.items(),
initial=NO_PUBLIC_SIGNALS)
ff_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
ff_views_notes = forms.CharField(required=False, label='',
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'placeholder': 'Notes', 'maxlength': 1480}))
ie_views = forms.ChoiceField(label='Edge',
choices=VENDOR_VIEWS.items(),
initial=NO_PUBLIC_SIGNALS)
ie_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
ie_views_notes = forms.CharField(required=False, label='',
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'placeholder': 'Notes', 'maxlength': 1480}))
web_dev_views = forms.ChoiceField(
label='Web / Framework developer views',
choices=WEB_DEV_VIEWS.items(),
initial=DEV_NO_SIGNALS,
help_text='If unsure, default to "No signals".')
web_dev_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
web_dev_views_notes = forms.CharField(required=False, label='',
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'placeholder': 'Notes', 'maxlength': 1480}),
help_text='Reference known representative examples of opinion, both positive and negative.')
ergonomics_risks = forms.CharField(label='Ergonomics Risks', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Are there any other platform APIs this feature will frequently be used in tandem with? Could the default usage of this API make it hard for Chrome to maintain good performance (i.e. synchronous return, must run on a certain thread, guaranteed return timing)?')
activation_risks = forms.CharField(label='Activation Risks', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Will it be challenging for developers to take advantage of this feature immediately, as-is? Would this feature benefit from having polyfills, significant documentation and outreach, and/or libraries built on top of it to make it easier to use?')
security_risks = forms.CharField(label='Security Risks', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='List any security considerations that were taken into account when deigning this feature.')
experiment_goals = forms.CharField(label='Experiment Goals', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Which pieces of the API surface are you looking to gain insight on? What metrics/measurement/feedback will you be using to validate designs? Double check that your experiment makes sense given that a large developer (e.g. a Google product or Facebook) likely can\'t use it in production due to the limits enforced by origin trials.\n\nIf Intent to Extend Origin Trial, highlight new/different areas for experimentation. Should not be an exact copy of goals from the first Intent to Experiment.')
experiment_timeline = forms.CharField(label='Experiment Timeline', required=False,
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'maxlength': 1480}),
help_text='When does the experiment start and expire?')
experiment_risks = forms.CharField(label='Experiment Risks', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='When this experiment comes to an end are there any risks to the sites that were using it, for example losing access to important storage due to an experimental storage API?')
experiment_extension_reason = forms.CharField(label='Experiment Extension Reason', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='If this is a repeat experiment, link to the previous Intent to Experiment thread and explain why you want to extend this experiment.')
ongoing_constraints = forms.CharField(label='Ongoing Constraints', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Do you anticipate adding any ongoing technical constraints to the codebase while implementing this feature? We prefer to avoid features which require or assume a specific architecture. For most features, the answer here is "None."')
debuggability = forms.CharField(label='Debuggability', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Description of the desired DevTools debugging support for your feature. Consider emailing the <a href="https://groups.google.com/forum/?fromgroups#!forum/google-chrome-developer-tools">google-chrome-developer-tools</a> list for additional help. For new language features in V8 specifically, refer to the debugger support checklist. If your feature doesn\'t require changes to DevTools in order to provide a good debugging experience, feel free to leave this section empty.')
all_platforms = forms.BooleanField(required=False, initial=False, label='Supported on all platforms?',
help_text='Will this feature be supported on all six Blink platforms (Windows, Mac, Linux, Chrome OS, Android, and Android WebView)?')
all_platforms_descr = forms.CharField(label='Platform Support Explanation', required=False,
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'maxlength': 2000}),
help_text='Explanation for why this feature is, or is not, supported on all platforms.')
wpt = forms.BooleanField(required=False, initial=False, label='Web Platform Tests', help_text='Is this feature fully tested in Web Platform Tests?')
wpt_descr = forms.CharField(label='Web Platform Tests Description', required=True,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Please link to the <a href="https://wpt.fyi/results">results on wpt.fyi</a>. If any part of the feature is not tested by web-platform-tests, please include links to issues, e.g. a web-platform-tests issue with the "infra" label explaining why a certain thing cannot be tested (<a href="https://github.com/w3c/web-platform-tests/issues/3867">example</a>), a spec issue for some change that would make it possible to test. (<a href="https://github.com/whatwg/fullscreen/issues/70">example</a>), or a Chromium issue to upstream some existing tests (<a href="https://bugs.chromium.org/p/chromium/issues/detail?id=695486">example</a>).')
sample_links = forms.CharField(label='Samples links', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 500}),
help_text='Links to samples (one URL per line).')
bug_url = forms.URLField(required=False, label='Tracking bug URL',
help_text='Tracking bug url (https://bugs.chromium.org/...). This bug should have "Type=Feature" set and be world readable.')
blink_components = forms.ChoiceField(
required=True,
label='Blink component',
help_text='Select the most specific component. If unsure, leave as "%s".' % BlinkComponent.DEFAULT_COMPONENT,
choices=[(x, x) for x in BlinkComponent.fetch_all_components()],
initial=[BlinkComponent.DEFAULT_COMPONENT])
impl_status_chrome = forms.ChoiceField(required=True,
label='Status in Chromium', choices=IMPLEMENTATION_STATUS.items())
#shipped_milestone = PlaceholderCharField(required=False,
# placeholder='First milestone the feature shipped with this status (either enabled by default or experimental)')
shipped_milestone = forms.IntegerField(required=False, label='',
help_text='Desktop:<br/>' + SHIPPED_HELP_TXT)
shipped_android_milestone = forms.IntegerField(required=False, label='',
help_text='Chrome for Android:</br/>' + SHIPPED_HELP_TXT)
shipped_ios_milestone = forms.IntegerField(required=False, label='',
help_text='Chrome for iOS (RARE):<br/>' + SHIPPED_HELP_TXT)
shipped_webview_milestone = forms.IntegerField(required=False, label='',
help_text='Android WebView:<br/>' + SHIPPED_HELP_TXT)
prefixed = forms.BooleanField(required=False, initial=False, label='Prefixed?')
footprint = forms.ChoiceField(label='Technical footprint',
choices=FOOTPRINT_CHOICES.items(), initial=MAJOR_MINOR_NEW_API)
visibility = forms.ChoiceField(
label='Developer visibility',
choices=VISIBILITY_CHOICES.items(),
initial=WARRANTS_ARTICLE,
help_text=('How much press / media / web developer buzz will this '
'feature generate?'))
search_tags = forms.CharField(label='Search tags', required=False,
help_text='Comma separated keywords used only in search')
comments = forms.CharField(label='Comments', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Additional comments, caveats, info...')
class Meta:
model = Feature
#exclude = ('shipped_webview_milestone',)
def __init__(self, *args, **keyargs):
super(FeatureForm, self).__init__(*args, **keyargs)
meta = getattr(self, 'Meta', None)
exclude = getattr(meta, 'exclude', [])
for field_name in exclude:
if field_name in self.fields:
del self.fields[field_name]
for field, val in self.fields.iteritems():
if val.required:
self.fields[field].widget.attrs['required'] = 'required'
class AppUser(DictModel):
"""Describes a user for whitelisting."""
#user = db.UserProperty(required=True, verbose_name='Google Account')
email = db.EmailProperty(required=True)
#is_admin = db.BooleanProperty(default=False)
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
def list_with_component(l, component):
return [x for x in l if x.id() == component.key().id()]
def list_without_component(l, component):
return [x for x in l if x.id() != component.key().id()]
class FeatureOwner(DictModel):
"""Describes subscribers of a web platform feature."""
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
name = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
twitter = db.StringProperty()
blink_components = db.ListProperty(db.Key)
primary_blink_components = db.ListProperty(db.Key)
watching_all_features = db.BooleanProperty(default=False)
# def __eq__(self, other):
# return self.key().id() == other.key().id()
def add_to_component_subscribers(self, component_name):
"""Adds the user to the list of Blink component subscribers."""
c = BlinkComponent.get_by_name(component_name)
if c:
# Add the user if they're not already in the list.
if not len(list_with_component(self.blink_components, c)):
self.blink_components.append(c.key())
return self.put()
return None
def remove_from_component_subscribers(self, component_name, remove_as_owner=False):
"""Removes the user from the list of Blink component subscribers or as the owner
of the component."""
c = BlinkComponent.get_by_name(component_name)
if c:
if remove_as_owner:
self.primary_blink_components = list_without_component(self.primary_blink_components, c)
else:
self.blink_components = list_without_component(self.blink_components, c)
self.primary_blink_components = list_without_component(self.primary_blink_components, c)
return self.put()
return None
def add_as_component_owner(self, component_name):
"""Adds the user as the Blink component owner."""
c = BlinkComponent.get_by_name(component_name)
if c:
# Update both the primary list and blink components subscribers if the
# user is not already in them.
self.add_to_component_subscribers(component_name)
if not len(list_with_component(self.primary_blink_components, c)):
self.primary_blink_components.append(c.key())
return self.put()
return None
def remove_as_component_owner(self, component_name):
return self.remove_from_component_subscribers(component_name, remove_as_owner=True)
class HistogramModel(db.Model):
"""Container for a histogram."""
bucket_id = db.IntegerProperty(required=True)
property_name = db.StringProperty(required=True)
MAX_CHUNK_SIZE = 500 # max num features to save for each memcache chunk.
@classmethod
def get_property_chunk_memcache_keys(self, property_class, key_prefix):
num_props = len(property_class.all().fetch(limit=None, keys_only=True))
l = list_to_chunks(range(0, num_props), self.MAX_CHUNK_SIZE)
return ['%s|chunk%s' % (key_prefix, i) for i,val in enumerate(l)]
@classmethod
def set_property_chunk_memcache_keys(self, key_prefix, pop_list):
chunks = list_to_chunks(pop_list, self.MAX_CHUNK_SIZE)
vals = []
for i, chunk in enumerate(chunks):
vals.append(('%s|chunk%s' % (key_prefix, i), chunk))
d = dict(vals)
return d
@classmethod
def get_all(self):
output = {}
buckets = self.all().fetch(None)
for bucket in buckets:
output[bucket.bucket_id] = bucket.property_name
return output
class CssPropertyHistogram(HistogramModel):
pass
class FeatureObserverHistogram(HistogramModel):
pass
| models.py | 51,306 | Describes a user for whitelisting.
Container for a feature.
Describes subscribers of a web platform feature.
Container for a histogram.
Async notifies subscribers of new features and property changes to features by
posting to a task queue.
Adds the user as the Blink component owner.
Adds the user to the list of Blink component subscribers.
Delete dict keys with None values, and empty lists, recursively.
Returns the list of blink components from live endpoint if unavailable in the cache.
Returns the /web content that use each blink component.
Fetch blink component with given name.
Yield successive n-sized chunk lists from l.
Removes the user from the list of Blink component subscribers or as the owner
of the component.
Updates the db with new Blink components from the json endpoint
from google.appengine.ext.db import djangoformsfrom django.forms import ModelForm import google.appengine.ext.django as django Intent stages and mapping from stage to stage name. insure bottom of list Ordered dictionary, make sure the order of this dictionary matches that of the sorted list above! def to_dict(self): return dict([(p, unicode(getattr(self, p))) for p in self.properties()]) Convert date/datetime to ms-since-epoch ("new Date()").ms = time.mktime(value.utctimetuple())ms += getattr(value, 'microseconds', 0) / 1000output[key] = int(ms) store /web content in memcache UMA metrics.hits = db.IntegerProperty(required=True)total_pages = db.IntegerProperty() Feature dashboard. max num features to save for each memcache chunk. d = OrderedDict(sorted(dict(vals).items(), key=lambda t: t[0])) Find the latest canary major version from the list of windows versions.'text': FOOTPRINT_CHOICES[self.footprint] Further prune response by removing null/[] values.d['id'] = self.key().idTODO: support more than one component. TODO(ericbidelman): Support more than one filter..order('name') TODO(ericbidelman): Support more than one filter. There's no way to do an OR in a single datastore query, and there's a very good chance that the self.get_all() results will already be in memcache, so use an array comprehension to grab the features we want from the array of everything. If we didn't get the expected number of chunks back (or a cache update was requested), do a db query. Features with no active, in dev, proposed features. Shipping features. Exclude features that do not have a desktop shipping milestone. Features with an android shipping milestone but no desktop milestone. No longer pursuing features. Sort the feature list on either Android shipping milestone or desktop shipping milestone, depending on which is specified. If a desktop milestone is defined, that will take default. First sort by name, then sort by feature milestone (latest first). Constructor the proper ordering. Memcache doesn't support saving values > 1MB. Break up features list into chunks so we don't hit the limit. Reconstruct feature list by ordering chunks. Get all shipping features. Ordered by shipping milestone (latest first). Get non-shipping features (sans removed or deprecated ones) and append to bottom of list. Filter out features without sample links. Stash existing values when entity is created so we can diff property values later in put() to know what's changed. https://stackoverflow.com/a/41344898 Diff values to see what properties have changed. Create task to email subscribers.name='emailer') Create task to send push notifications Metadata. General info. A list of intent threads in the format "date|subject|url" Chromium details.webbiness = db.IntegerProperty() TODO: figure out what this is Standards details.tests = db.StringProperty()super(forms.CharField, self).__init__(*args, **kwargs) class PlaceholderForm(forms.Form): def __init__(self, *args, **kwargs): super(PlaceholderForm, self).__init__(*args, **kwargs) for field_name in self.fields: field = self.fields.get(field_name) if field: if type(field.widget) in (forms.TextInput, forms.DateInput): field.widget = forms.TextInput(attrs={'placeholder': field.label}) Note that the "required" argument in the following field definitions only mean so much in practice. There's various code in js/admin/feature_form.js, including intentStageChanged(), that adjusts what fields are required (as well as visible at all). IOW, when making changes to what form fields are or are not required, look both in the definitions here as well as in js/admin/feature_form.js and make sure the code works as intended.name = PlaceholderCharField(required=True, placeholder='Feature name')shipped_milestone = PlaceholderCharField(required=False, placeholder='First milestone the feature shipped with this status (either enabled by default or experimental)')exclude = ('shipped_webview_milestone',)user = db.UserProperty(required=True, verbose_name='Google Account')is_admin = db.BooleanProperty(default=False) def __eq__(self, other): return self.key().id() == other.key().id() Add the user if they're not already in the list. Update both the primary list and blink components subscribers if the user is not already in them. max num features to save for each memcache chunk. | 5,201 | en | 0.790135 |
# -*- coding: utf-8 -*-
import time
from pymongo import MongoClient
from config import MONGO_CONFIG
def get_current_time(format_str: str = '%Y-%m-%d %H:%M:%S'):
"""
่ทๅๅฝๅๆถ้ด๏ผ้ป่ฎคไธบ 2020-01-01 00:00:00 ๆ ผๅผ
:param format_str: ๆ ผๅผ
:return:
"""
return time.strftime(format_str, time.localtime())
class MongoDb:
def __init__(self):
"""ๅๅงๅ
ๅๅงๅ mongo db
"""
mongo_uri = 'mongodb://%s:%s@%s:%s' % (
MONGO_CONFIG['user'],
MONGO_CONFIG['pwd'],
MONGO_CONFIG['host'],
MONGO_CONFIG['port'])
self.mongo = MongoClient(mongo_uri)
self.sogou_db = self.mongo['sogou_dev']
self.sogou_search_col = self.sogou_db['sogou_search_results']
# self.task_db = self.mongo['sogou_tast']
def update_sogou_login_cookie(self, username, cookie):
"""
ๆดๆฐๆ็ๅพฎไฟก็ปๅฝ cookie ไฟกๆฏ
:param username:
:param cookie:
:return:
"""
col = self.sogou_db['sogou_login_cookies']
ctime = get_current_time()
find_obj = {
'nickname': username,
'is_valid': 1,
}
login_item = col.find_one(find_obj)
print(login_item)
# ๆๅ
ฅๆฐๆฐๆฎ
if not login_item:
cookie = 'DESC=0; %s' % cookie
col.insert_one({
'cookie': cookie,
'nickname': username,
'device': '0',
'state': 'normal',
'c_time': ctime,
'm_time': ctime,
'is_valid': 1,
'failures': 0,
})
return
# ๆดๆฐๅๆๆฐๆฎ
cookie = 'DESC=%s; %s' % (login_item['device'], cookie)
col.update_one(find_obj, {
'$set': {
'state': 'normal',
'cookie': cookie,
'c_time': ctime,
'm_time': ctime,
'failures': 0,
}
})
def insert_sogou_search_result(self, result):
"""
ไฟๅญๆ็ๆ็ดขไฟกๆฏ
:param results: ็ปๆๆฐ็ป
"""
ctime = get_current_time()
find_obj = {
'id': result['id'],
'is_valid': 1
}
search_item = self.sogou_search_col.find_one(find_obj)
print(search_item)
new_result = result
# ๆๅ
ฅๆฐๆฐๆฎ
if not search_item:
new_result["c_time"] = ctime
new_result["m_time"] = ctime
new_result["is_valid"] = 1
self.sogou_search_col.insert_one(new_result)
return
# ๆดๆฐๅๆๆฐๆฎ
new_result["m_time"] = ctime
self.sogou_search_col.update_one(find_obj, {
'$set': new_result
}) | src/sogou_wechat/mongoDB.py | 2,862 | ๅๅงๅ
ๅๅงๅ mongo db
่ทๅๅฝๅๆถ้ด๏ผ้ป่ฎคไธบ 2020-01-01 00:00:00 ๆ ผๅผ
:param format_str: ๆ ผๅผ
:return:
ไฟๅญๆ็ๆ็ดขไฟกๆฏ
:param results: ็ปๆๆฐ็ป
ๆดๆฐๆ็ๅพฎไฟก็ปๅฝ cookie ไฟกๆฏ
:param username:
:param cookie:
:return:
-*- coding: utf-8 -*- self.task_db = self.mongo['sogou_tast'] ๆๅ
ฅๆฐๆฐๆฎ ๆดๆฐๅๆๆฐๆฎ ๆๅ
ฅๆฐๆฐๆฎ ๆดๆฐๅๆๆฐๆฎ | 261 | zh | 0.789484 |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadRequest
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitso(Exchange):
def describe(self):
return self.deep_extend(super(bitso, self).describe(), {
'id': 'bitso',
'name': 'Bitso',
'countries': ['MX'], # Mexico
'rateLimit': 2000, # 30 requests per minute
'version': 'v3',
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchDepositAddress': True,
'fetchFundingFee': False,
'fetchFundingFees': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87295554-11f98280-c50e-11ea-80d6-15b3bafa8cbf.jpg',
'api': 'https://api.bitso.com',
'www': 'https://bitso.com',
'doc': 'https://bitso.com/api_info',
'fees': 'https://bitso.com/fees',
'referral': 'https://bitso.com/?ref=itej',
},
'precisionMode': TICK_SIZE,
'options': {
'precision': {
'XRP': 0.000001,
'MXN': 0.01,
'TUSD': 0.01,
},
'defaultPrecision': 0.00000001,
},
'timeframes': {
'1m': '60',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'4h': '14400',
'12h': '43200',
'1d': '86400',
'1w': '604800',
},
'api': {
'public': {
'get': [
'available_books',
'ticker',
'order_book',
'trades',
'ohlc',
],
},
'private': {
'get': [
'account_status',
'balance',
'fees',
'fundings',
'fundings/{fid}',
'funding_destination',
'kyc_documents',
'ledger',
'ledger/trades',
'ledger/fees',
'ledger/fundings',
'ledger/withdrawals',
'mx_bank_codes',
'open_orders',
'order_trades/{oid}',
'orders/{oid}',
'user_trades',
'user_trades/{tid}',
'withdrawals/',
'withdrawals/{wid}',
],
'post': [
'bitcoin_withdrawal',
'debit_card_withdrawal',
'ether_withdrawal',
'ripple_withdrawal',
'bcash_withdrawal',
'litecoin_withdrawal',
'orders',
'phone_number',
'phone_verification',
'phone_withdrawal',
'spei_withdrawal',
'ripple_withdrawal',
'bcash_withdrawal',
'litecoin_withdrawal',
],
'delete': [
'orders/{oid}',
'orders/all',
],
},
},
'exceptions': {
'0201': AuthenticationError, # Invalid Nonce or Invalid Credentials
'104': InvalidNonce, # Cannot perform request - nonce must be higher than 1520307203724237
'0304': BadRequest, # {"success":false,"error":{"code":"0304","message":"The field time_bucket() is either invalid or missing"}}
},
})
def fetch_markets(self, params={}):
response = self.publicGetAvailableBooks(params)
#
# {
# "success":true,
# "payload":[
# {
# "book":"btc_mxn",
# "minimum_price":"500",
# "maximum_price":"10000000",
# "minimum_amount":"0.00005",
# "maximum_amount":"500",
# "minimum_value":"5",
# "maximum_value":"10000000",
# "tick_size":"0.01",
# "fees":{
# "flat_rate":{"maker":"0.500","taker":"0.650"},
# "structure":[
# {"volume":"1500000","maker":"0.00500","taker":"0.00650"},
# {"volume":"2000000","maker":"0.00490","taker":"0.00637"},
# {"volume":"5000000","maker":"0.00480","taker":"0.00624"},
# {"volume":"7000000","maker":"0.00440","taker":"0.00572"},
# {"volume":"10000000","maker":"0.00420","taker":"0.00546"},
# {"volume":"15000000","maker":"0.00400","taker":"0.00520"},
# {"volume":"35000000","maker":"0.00370","taker":"0.00481"},
# {"volume":"50000000","maker":"0.00300","taker":"0.00390"},
# {"volume":"150000000","maker":"0.00200","taker":"0.00260"},
# {"volume":"250000000","maker":"0.00100","taker":"0.00130"},
# {"volume":"9999999999","maker":"0.00000","taker":"0.00130"},
# ]
# }
# },
# ]
# }
markets = self.safe_value(response, 'payload')
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'book')
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
fees = self.safe_value(market, 'fees', {})
flatRate = self.safe_value(fees, 'flat_rate', {})
takerString = self.safe_string(flatRate, 'taker')
makerString = self.safe_string(flatRate, 'maker')
taker = self.parse_number(Precise.string_div(takerString, '100'))
maker = self.parse_number(Precise.string_div(makerString, '100'))
feeTiers = self.safe_value(fees, 'structure', [])
fee = {
'taker': taker,
'maker': maker,
'percentage': True,
'tierBased': True,
}
takerFees = []
makerFees = []
for j in range(0, len(feeTiers)):
tier = feeTiers[j]
volume = self.safe_number(tier, 'volume')
takerFee = self.safe_number(tier, 'taker')
makerFee = self.safe_number(tier, 'maker')
takerFees.append([volume, takerFee])
makerFees.append([volume, makerFee])
if j == 0:
fee['taker'] = takerFee
fee['maker'] = makerFee
tiers = {
'taker': takerFees,
'maker': makerFees,
}
fee['tiers'] = tiers
defaultPricePrecision = self.safe_number(self.options['precision'], quote, self.options['defaultPrecision'])
result.append(self.extend({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': None,
'contract': False,
'linear': None,
'inverse': None,
'taker': taker,
'maker': maker,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(self.options['precision'], base, self.options['defaultPrecision']),
'price': self.safe_number(market, 'tick_size', defaultPricePrecision),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minimum_amount'),
'max': self.safe_number(market, 'maximum_amount'),
},
'price': {
'min': self.safe_number(market, 'minimum_price'),
'max': self.safe_number(market, 'maximum_price'),
},
'cost': {
'min': self.safe_number(market, 'minimum_value'),
'max': self.safe_number(market, 'maximum_value'),
},
},
'info': market,
}, fee))
return result
def parse_balance(self, response):
payload = self.safe_value(response, 'payload', {})
balances = self.safe_value(payload, 'balances')
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'locked')
account['total'] = self.safe_string(balance, 'total')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetBalance(params)
#
# {
# "success": True,
# "payload": {
# "balances": [
# {
# "currency": "bat",
# "available": "0.00000000",
# "locked": "0.00000000",
# "total": "0.00000000",
# "pending_deposit": "0.00000000",
# "pending_withdrawal": "0.00000000"
# },
# {
# "currency": "bch",
# "available": "0.00000000",
# "locked": "0.00000000",
# "total": "0.00000000",
# "pending_deposit": "0.00000000",
# "pending_withdrawal": "0.00000000"
# },
# ],
# },
# }
#
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'book': self.market_id(symbol),
}
response = self.publicGetOrderBook(self.extend(request, params))
orderbook = self.safe_value(response, 'payload')
timestamp = self.parse8601(self.safe_string(orderbook, 'updated_at'))
return self.parse_order_book(orderbook, symbol, timestamp, 'bids', 'asks', 'price', 'amount')
def parse_ticker(self, ticker, market=None):
#
# {
# "high":"37446.85",
# "last":"36599.54",
# "created_at":"2022-01-28T12:06:11+00:00",
# "book":"btc_usdt",
# "volume":"7.29075419",
# "vwap":"36579.1564400307",
# "low":"35578.52",
# "ask":"36574.76",
# "bid":"36538.22",
# "change_24":"-105.64"
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.parse8601(self.safe_string(ticker, 'created_at'))
vwap = self.safe_string(ticker, 'vwap')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = Precise.string_mul(baseVolume, vwap)
last = self.safe_string(ticker, 'last')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'book': market['id'],
}
response = self.publicGetTicker(self.extend(request, params))
ticker = self.safe_value(response, 'payload')
#
# {
# "success":true,
# "payload":{
# "high":"37446.85",
# "last":"37051.96",
# "created_at":"2022-01-28T17:03:29+00:00",
# "book":"btc_usdt",
# "volume":"6.16176186",
# "vwap":"36582.6293169472",
# "low":"35578.52",
# "ask":"37083.62",
# "bid":"37039.66",
# "change_24":"478.45"
# }
# }
#
return self.parse_ticker(ticker, market)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'book': market['id'],
'time_bucket': self.timeframes[timeframe],
}
if since is not None:
request['start'] = since
if limit is not None:
duration = self.parse_timeframe(timeframe)
request['end'] = self.sum(since, duration * limit * 1000)
elif limit is not None:
now = self.milliseconds()
request['end'] = now
request['start'] = now - self.parse_timeframe(timeframe) * 1000 * limit
response = self.publicGetOhlc(self.extend(request, params))
#
# {
# "success":true,
# "payload": [
# {
# "bucket_start_time":1648219140000,
# "first_trade_time":1648219154990,
# "last_trade_time":1648219189441,
# "first_rate":"44958.60",
# "last_rate":"44979.88",
# "min_rate":"44957.33",
# "max_rate":"44979.88",
# "trade_count":8,
# "volume":"0.00082814",
# "vwap":"44965.02"
# },
# ]
# }
#
payload = self.safe_value(response, 'payload', [])
return self.parse_ohlcvs(payload, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m'):
#
# {
# "bucket_start_time":1648219140000,
# "first_trade_time":1648219154990,
# "last_trade_time":1648219189441,
# "first_rate":"44958.60",
# "last_rate":"44979.88",
# "min_rate":"44957.33",
# "max_rate":"44979.88",
# "trade_count":8,
# "volume":"0.00082814",
# "vwap":"44965.02"
# },
#
return [
self.safe_integer(ohlcv, 'bucket_start_time'),
self.safe_number(ohlcv, 'first_rate'),
self.safe_number(ohlcv, 'max_rate'),
self.safe_number(ohlcv, 'min_rate'),
self.safe_number(ohlcv, 'last_rate'),
self.safe_number(ohlcv, 'volume'),
]
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "book": "btc_usdt",
# "created_at": "2021-11-24T12:14:53+0000",
# "amount": "0.00026562",
# "maker_side": "sell",
# "price": "56471.55",
# "tid": "52557338"
# }
#
# fetchMyTrades(private)
#
# {
# "book": "btc_usdt",
# "created_at": "2021-11-24T12:31:03+0000",
# "minor": "11.30356000",
# "major": "-0.00020000",
# "fees_amount": "0.01119052",
# "fees_currency": "usdt",
# "minor_currency": "usdt",
# "major_currency": "btc",
# "oid": "djTzMIWx2Vi3iMjl",
# "tid": "52559051",
# "price": "56517.80",
# "side": "sell",
# "maker_side": "buy"
# }
#
# fetchOrderTrades(private)
#
# {
# "book": "btc_usdt",
# "created_at": "2021-11-24T12:30:52+0000",
# "minor": "-11.33047916",
# "major": "0.00020020",
# "fees_amount": "0.00000020",
# "fees_currency": "btc",
# "minor_currency": "usdt",
# "major_currency": "btc",
# "oid": "O0D2zcljjjQF5xlG",
# "tid": "52559030",
# "price": "56595.80",
# "side": "buy",
# "maker_side": "sell"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
marketId = self.safe_string(trade, 'book')
symbol = self.safe_symbol(marketId, market, '_')
side = self.safe_string_2(trade, 'side', 'maker_side')
makerSide = self.safe_string(trade, 'maker_side')
takerOrMaker = None
if side == makerSide:
takerOrMaker = 'maker'
else:
takerOrMaker = 'taker'
amount = self.safe_string_2(trade, 'amount', 'major')
if amount is not None:
amount = Precise.string_abs(amount)
fee = None
feeCost = self.safe_string(trade, 'fees_amount')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'fees_currency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
cost = self.safe_string(trade, 'minor')
if cost is not None:
cost = Precise.string_abs(cost)
price = self.safe_string(trade, 'price')
orderId = self.safe_string(trade, 'oid')
id = self.safe_string(trade, 'tid')
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'book': market['id'],
}
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response['payload'], market, since, limit)
def fetch_trading_fees(self, params={}):
self.load_markets()
response = self.privateGetFees(params)
#
# {
# success: True,
# payload: {
# fees: [
# {
# book: 'btc_mxn',
# fee_percent: '0.6500',
# fee_decimal: '0.00650000',
# taker_fee_percent: '0.6500',
# taker_fee_decimal: '0.00650000',
# maker_fee_percent: '0.5000',
# maker_fee_decimal: '0.00500000',
# volume_currency: 'mxn',
# current_volume: '0.00',
# next_volume: '1500000.00',
# next_maker_fee_percent: '0.490',
# next_taker_fee_percent: '0.637',
# nextVolume: '1500000.00',
# nextFee: '0.490',
# nextTakerFee: '0.637'
# },
# ...
# ],
# deposit_fees: [
# {
# currency: 'btc',
# method: 'rewards',
# fee: '0.00',
# is_fixed: False
# },
# ...
# ],
# withdrawal_fees: {
# ada: '0.20958100',
# bch: '0.00009437',
# ars: '0',
# btc: '0.00001209',
# ...
# }
# }
# }
#
payload = self.safe_value(response, 'payload', {})
fees = self.safe_value(payload, 'fees', [])
result = {}
for i in range(0, len(fees)):
fee = fees[i]
marketId = self.safe_string(fee, 'book')
symbol = self.safe_symbol(marketId, None, '_')
result[symbol] = {
'info': fee,
'symbol': symbol,
'maker': self.safe_number(fee, 'maker_fee_decimal'),
'taker': self.safe_number(fee, 'taker_fee_decimal'),
'percentage': True,
'tierBased': True,
}
return result
def fetch_my_trades(self, symbol=None, since=None, limit=25, params={}):
self.load_markets()
market = self.market(symbol)
# the don't support fetching trades starting from a date yet
# use the `marker` extra param for that
# self is not a typo, the variable name is 'marker'(don't confuse with 'market')
markerInParams = ('marker' in params)
# warn the user with an exception if the user wants to filter
# starting from since timestamp, but does not set the trade id with an extra 'marker' param
if (since is not None) and not markerInParams:
raise ExchangeError(self.id + ' fetchMyTrades does not support fetching trades starting from a timestamp with the `since` argument, use the `marker` extra param to filter starting from an integer trade id')
# convert it to an integer unconditionally
if markerInParams:
params = self.extend(params, {
'marker': int(params['marker']),
})
request = {
'book': market['id'],
'limit': limit, # default = 25, max = 100
# 'sort': 'desc', # default = desc
# 'marker': id, # integer id to start from
}
response = self.privateGetUserTrades(self.extend(request, params))
return self.parse_trades(response['payload'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'book': self.market_id(symbol),
'side': side,
'type': type,
'major': self.amount_to_precision(symbol, amount),
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostOrders(self.extend(request, params))
id = self.safe_string(response['payload'], 'oid')
return {
'info': response,
'id': id,
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'oid': id,
}
return self.privateDeleteOrdersOid(self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'partial-fill': 'open', # self is a common substitution in ccxt
'completed': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'oid')
side = self.safe_string(order, 'side')
status = self.parse_order_status(self.safe_string(order, 'status'))
marketId = self.safe_string(order, 'book')
symbol = self.safe_symbol(marketId, market, '_')
orderType = self.safe_string(order, 'type')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'original_amount')
remaining = self.safe_string(order, 'unfilled_amount')
clientOrderId = self.safe_string(order, 'client_id')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': orderType,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': None,
'remaining': remaining,
'filled': None,
'status': status,
'fee': None,
'average': None,
'trades': None,
}, market)
def fetch_open_orders(self, symbol=None, since=None, limit=25, params={}):
self.load_markets()
market = self.market(symbol)
# the don't support fetching trades starting from a date yet
# use the `marker` extra param for that
# self is not a typo, the variable name is 'marker'(don't confuse with 'market')
markerInParams = ('marker' in params)
# warn the user with an exception if the user wants to filter
# starting from since timestamp, but does not set the trade id with an extra 'marker' param
if (since is not None) and not markerInParams:
raise ExchangeError(self.id + ' fetchOpenOrders does not support fetching orders starting from a timestamp with the `since` argument, use the `marker` extra param to filter starting from an integer trade id')
# convert it to an integer unconditionally
if markerInParams:
params = self.extend(params, {
'marker': int(params['marker']),
})
request = {
'book': market['id'],
'limit': limit, # default = 25, max = 100
# 'sort': 'desc', # default = desc
# 'marker': id, # integer id to start from
}
response = self.privateGetOpenOrders(self.extend(request, params))
orders = self.parse_orders(response['payload'], market, since, limit)
return orders
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privateGetOrdersOid({
'oid': id,
})
payload = self.safe_value(response, 'payload')
if isinstance(payload, list):
numOrders = len(response['payload'])
if numOrders == 1:
return self.parse_order(payload[0])
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'oid': id,
}
response = self.privateGetOrderTradesOid(self.extend(request, params))
return self.parse_trades(response['payload'], market)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'fund_currency': currency['id'],
}
response = self.privateGetFundingDestination(self.extend(request, params))
address = self.safe_string(response['payload'], 'account_identifier')
tag = None
if address.find('?dt=') >= 0:
parts = address.split('?dt=')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
def fetch_funding_fees(self, params={}):
self.load_markets()
response = self.privateGetFees(params)
#
# {
# success: True,
# payload: {
# fees: [
# {
# book: 'btc_mxn',
# fee_percent: '0.6500',
# fee_decimal: '0.00650000',
# taker_fee_percent: '0.6500',
# taker_fee_decimal: '0.00650000',
# maker_fee_percent: '0.5000',
# maker_fee_decimal: '0.00500000',
# volume_currency: 'mxn',
# current_volume: '0.00',
# next_volume: '1500000.00',
# next_maker_fee_percent: '0.490',
# next_taker_fee_percent: '0.637',
# nextVolume: '1500000.00',
# nextFee: '0.490',
# nextTakerFee: '0.637'
# },
# ...
# ],
# deposit_fees: [
# {
# currency: 'btc',
# method: 'rewards',
# fee: '0.00',
# is_fixed: False
# },
# ...
# ],
# withdrawal_fees: {
# ada: '0.20958100',
# bch: '0.00009437',
# ars: '0',
# btc: '0.00001209',
# ...
# }
# }
# }
#
payload = self.safe_value(response, 'payload', {})
depositFees = self.safe_value(payload, 'deposit_fees', [])
deposit = {}
for i in range(0, len(depositFees)):
depositFee = depositFees[i]
currencyId = self.safe_string(depositFee, 'currency')
code = self.safe_currency_code(currencyId)
deposit[code] = self.safe_number(depositFee, 'fee')
withdraw = {}
withdrawalFees = self.safe_value(payload, 'withdrawal_fees', [])
currencyIds = list(withdrawalFees.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
withdraw[code] = self.safe_number(withdrawalFees, currencyId)
return {
'info': response,
'deposit': deposit,
'withdraw': withdraw,
}
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
methods = {
'BTC': 'Bitcoin',
'ETH': 'Ether',
'XRP': 'Ripple',
'BCH': 'Bcash',
'LTC': 'Litecoin',
}
currency = self.currency(code)
method = methods[code] if (code in methods) else None
if method is None:
raise ExchangeError(self.id + ' not valid withdraw coin: ' + code)
request = {
'amount': amount,
'address': address,
'destination_tag': tag,
}
classMethod = 'privatePost' + method + 'Withdrawal'
response = getattr(self, classMethod)(self.extend(request, params))
#
# {
# "success": True,
# "payload": [
# {
# "wid": "c5b8d7f0768ee91d3b33bee648318688",
# "status": "pending",
# "created_at": "2016-04-08T17:52:31.000+00:00",
# "currency": "btc",
# "method": "Bitcoin",
# "amount": "0.48650929",
# "details": {
# "withdrawal_address": "18MsnATiNiKLqUHDTRKjurwMg7inCrdNEp",
# "tx_hash": "d4f28394693e9fb5fffcaf730c11f32d1922e5837f76ca82189d3bfe30ded433"
# }
# },
# ]
# }
#
payload = self.safe_value(response, 'payload', [])
first = self.safe_value(payload, 0)
return self.parse_transaction(first, currency)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "wid": "c5b8d7f0768ee91d3b33bee648318688",
# "status": "pending",
# "created_at": "2016-04-08T17:52:31.000+00:00",
# "currency": "btc",
# "method": "Bitcoin",
# "amount": "0.48650929",
# "details": {
# "withdrawal_address": "18MsnATiNiKLqUHDTRKjurwMg7inCrdNEp",
# "tx_hash": "d4f28394693e9fb5fffcaf730c11f32d1922e5837f76ca82189d3bfe30ded433"
# }
# }
#
currency = self.safe_currency(None, currency)
return {
'id': self.safe_string(transaction, 'wid'),
'txid': None,
'timestamp': None,
'datetime': None,
'network': None,
'addressFrom': None,
'address': None,
'addressTo': None,
'amount': None,
'type': None,
'currency': currency['code'],
'status': None,
'updated': None,
'tagFrom': None,
'tag': None,
'tagTo': None,
'comment': None,
'fee': None,
'info': transaction,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
endpoint = '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
endpoint += '?' + self.urlencode(query)
url = self.urls['api'] + endpoint
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
request = ''.join([nonce, method, endpoint])
if method != 'GET':
if query:
body = self.json(query)
request += body
signature = self.hmac(self.encode(request), self.encode(self.secret))
auth = self.apiKey + ':' + nonce + ':' + signature
headers = {
'Authorization': 'Bitso ' + auth,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'success' in response:
#
# {"success":false,"error":{"code":104,"message":"Cannot perform request - nonce must be higher than 1520307203724237"}}
#
success = self.safe_value(response, 'success', False)
if isinstance(success, str):
if (success == 'true') or (success == '1'):
success = True
else:
success = False
if not success:
feedback = self.id + ' ' + self.json(response)
error = self.safe_value(response, 'error')
if error is None:
raise ExchangeError(feedback)
code = self.safe_string(error, 'code')
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
| python/ccxt/bitso.py | 40,287 | -*- coding: utf-8 -*- PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.mdhow-to-contribute-code Mexico 30 requests per minute Invalid Nonce or Invalid Credentials Cannot perform request - nonce must be higher than 1520307203724237 {"success":false,"error":{"code":"0304","message":"The field time_bucket() is either invalid or missing"}} { "success":true, "payload":[ { "book":"btc_mxn", "minimum_price":"500", "maximum_price":"10000000", "minimum_amount":"0.00005", "maximum_amount":"500", "minimum_value":"5", "maximum_value":"10000000", "tick_size":"0.01", "fees":{ "flat_rate":{"maker":"0.500","taker":"0.650"}, "structure":[ {"volume":"1500000","maker":"0.00500","taker":"0.00650"}, {"volume":"2000000","maker":"0.00490","taker":"0.00637"}, {"volume":"5000000","maker":"0.00480","taker":"0.00624"}, {"volume":"7000000","maker":"0.00440","taker":"0.00572"}, {"volume":"10000000","maker":"0.00420","taker":"0.00546"}, {"volume":"15000000","maker":"0.00400","taker":"0.00520"}, {"volume":"35000000","maker":"0.00370","taker":"0.00481"}, {"volume":"50000000","maker":"0.00300","taker":"0.00390"}, {"volume":"150000000","maker":"0.00200","taker":"0.00260"}, {"volume":"250000000","maker":"0.00100","taker":"0.00130"}, {"volume":"9999999999","maker":"0.00000","taker":"0.00130"}, ] } }, ] } { "success": True, "payload": { "balances": [ { "currency": "bat", "available": "0.00000000", "locked": "0.00000000", "total": "0.00000000", "pending_deposit": "0.00000000", "pending_withdrawal": "0.00000000" }, { "currency": "bch", "available": "0.00000000", "locked": "0.00000000", "total": "0.00000000", "pending_deposit": "0.00000000", "pending_withdrawal": "0.00000000" }, ], }, } { "high":"37446.85", "last":"36599.54", "created_at":"2022-01-28T12:06:11+00:00", "book":"btc_usdt", "volume":"7.29075419", "vwap":"36579.1564400307", "low":"35578.52", "ask":"36574.76", "bid":"36538.22", "change_24":"-105.64" } { "success":true, "payload":{ "high":"37446.85", "last":"37051.96", "created_at":"2022-01-28T17:03:29+00:00", "book":"btc_usdt", "volume":"6.16176186", "vwap":"36582.6293169472", "low":"35578.52", "ask":"37083.62", "bid":"37039.66", "change_24":"478.45" } } { "success":true, "payload": [ { "bucket_start_time":1648219140000, "first_trade_time":1648219154990, "last_trade_time":1648219189441, "first_rate":"44958.60", "last_rate":"44979.88", "min_rate":"44957.33", "max_rate":"44979.88", "trade_count":8, "volume":"0.00082814", "vwap":"44965.02" }, ] } { "bucket_start_time":1648219140000, "first_trade_time":1648219154990, "last_trade_time":1648219189441, "first_rate":"44958.60", "last_rate":"44979.88", "min_rate":"44957.33", "max_rate":"44979.88", "trade_count":8, "volume":"0.00082814", "vwap":"44965.02" }, fetchTrades(public) { "book": "btc_usdt", "created_at": "2021-11-24T12:14:53+0000", "amount": "0.00026562", "maker_side": "sell", "price": "56471.55", "tid": "52557338" } fetchMyTrades(private) { "book": "btc_usdt", "created_at": "2021-11-24T12:31:03+0000", "minor": "11.30356000", "major": "-0.00020000", "fees_amount": "0.01119052", "fees_currency": "usdt", "minor_currency": "usdt", "major_currency": "btc", "oid": "djTzMIWx2Vi3iMjl", "tid": "52559051", "price": "56517.80", "side": "sell", "maker_side": "buy" } fetchOrderTrades(private) { "book": "btc_usdt", "created_at": "2021-11-24T12:30:52+0000", "minor": "-11.33047916", "major": "0.00020020", "fees_amount": "0.00000020", "fees_currency": "btc", "minor_currency": "usdt", "major_currency": "btc", "oid": "O0D2zcljjjQF5xlG", "tid": "52559030", "price": "56595.80", "side": "buy", "maker_side": "sell" } { success: True, payload: { fees: [ { book: 'btc_mxn', fee_percent: '0.6500', fee_decimal: '0.00650000', taker_fee_percent: '0.6500', taker_fee_decimal: '0.00650000', maker_fee_percent: '0.5000', maker_fee_decimal: '0.00500000', volume_currency: 'mxn', current_volume: '0.00', next_volume: '1500000.00', next_maker_fee_percent: '0.490', next_taker_fee_percent: '0.637', nextVolume: '1500000.00', nextFee: '0.490', nextTakerFee: '0.637' }, ... ], deposit_fees: [ { currency: 'btc', method: 'rewards', fee: '0.00', is_fixed: False }, ... ], withdrawal_fees: { ada: '0.20958100', bch: '0.00009437', ars: '0', btc: '0.00001209', ... } } } the don't support fetching trades starting from a date yet use the `marker` extra param for that self is not a typo, the variable name is 'marker'(don't confuse with 'market') warn the user with an exception if the user wants to filter starting from since timestamp, but does not set the trade id with an extra 'marker' param convert it to an integer unconditionally default = 25, max = 100 'sort': 'desc', default = desc 'marker': id, integer id to start from self is a common substitution in ccxt the don't support fetching trades starting from a date yet use the `marker` extra param for that self is not a typo, the variable name is 'marker'(don't confuse with 'market') warn the user with an exception if the user wants to filter starting from since timestamp, but does not set the trade id with an extra 'marker' param convert it to an integer unconditionally default = 25, max = 100 'sort': 'desc', default = desc 'marker': id, integer id to start from { success: True, payload: { fees: [ { book: 'btc_mxn', fee_percent: '0.6500', fee_decimal: '0.00650000', taker_fee_percent: '0.6500', taker_fee_decimal: '0.00650000', maker_fee_percent: '0.5000', maker_fee_decimal: '0.00500000', volume_currency: 'mxn', current_volume: '0.00', next_volume: '1500000.00', next_maker_fee_percent: '0.490', next_taker_fee_percent: '0.637', nextVolume: '1500000.00', nextFee: '0.490', nextTakerFee: '0.637' }, ... ], deposit_fees: [ { currency: 'btc', method: 'rewards', fee: '0.00', is_fixed: False }, ... ], withdrawal_fees: { ada: '0.20958100', bch: '0.00009437', ars: '0', btc: '0.00001209', ... } } } { "success": True, "payload": [ { "wid": "c5b8d7f0768ee91d3b33bee648318688", "status": "pending", "created_at": "2016-04-08T17:52:31.000+00:00", "currency": "btc", "method": "Bitcoin", "amount": "0.48650929", "details": { "withdrawal_address": "18MsnATiNiKLqUHDTRKjurwMg7inCrdNEp", "tx_hash": "d4f28394693e9fb5fffcaf730c11f32d1922e5837f76ca82189d3bfe30ded433" } }, ] } withdraw { "wid": "c5b8d7f0768ee91d3b33bee648318688", "status": "pending", "created_at": "2016-04-08T17:52:31.000+00:00", "currency": "btc", "method": "Bitcoin", "amount": "0.48650929", "details": { "withdrawal_address": "18MsnATiNiKLqUHDTRKjurwMg7inCrdNEp", "tx_hash": "d4f28394693e9fb5fffcaf730c11f32d1922e5837f76ca82189d3bfe30ded433" } } fallback to default error handler {"success":false,"error":{"code":104,"message":"Cannot perform request - nonce must be higher than 1520307203724237"}} | 9,947 | en | 0.602412 |
# list all object store access policies
res = client.get_object_store_access_policies()
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# Valid fields: continuation_token, filter, ids, limit, names, offset, sort
# See section "Common Fields" for examples
| docs/source/examples/FB2.0/get_object_store_access_policies.py | 299 | list all object store access policies Valid fields: continuation_token, filter, ids, limit, names, offset, sort See section "Common Fields" for examples | 152 | en | 0.517476 |
import logging
import urllib.parse
from typing import Any, Dict, Optional, Type, Union
from globus_sdk import config, exc, utils
from globus_sdk.authorizers import GlobusAuthorizer
from globus_sdk.paging import PaginatorTable
from globus_sdk.response import GlobusHTTPResponse
from globus_sdk.scopes import ScopeBuilder
from globus_sdk.transport import RequestsTransport
log = logging.getLogger(__name__)
class BaseClient:
r"""
Abstract base class for clients with error handling for Globus APIs.
:param authorizer: A ``GlobusAuthorizer`` which will generate Authorization headers
:type authorizer: :class:`GlobusAuthorizer\
<globus_sdk.authorizers.base.GlobusAuthorizer>`
:param app_name: Optional "nice name" for the application. Has no bearing on the
semantics of client actions. It is just passed as part of the User-Agent
string, and may be useful when debugging issues with the Globus Team
:type app_name: str
:param transport_params: Options to pass to the transport for this client
:type transport_params: dict
All other parameters are for internal use and should be ignored.
"""
# service name is used to lookup a service URL from config
service_name: str = "_base"
# path under the client base URL
base_path: str = "/"
#: the class for errors raised by this client on HTTP 4xx and 5xx errors
#: this can be set in subclasses, but must always be a subclass of GlobusError
error_class: Type[exc.GlobusAPIError] = exc.GlobusAPIError
#: the type of Transport which will be used, defaults to ``RequestsTransport``
transport_class: Type[RequestsTransport] = RequestsTransport
#: the scopes for this client may be present as a ``ScopeBuilder``
scopes: Optional[ScopeBuilder] = None
def __init__(
self,
*,
environment: Optional[str] = None,
base_url: Optional[str] = None,
authorizer: Optional[GlobusAuthorizer] = None,
app_name: Optional[str] = None,
transport_params: Optional[Dict[str, Any]] = None,
):
# explicitly check the `service_name` to ensure that it was set
#
# unfortunately, we can't rely on declaring BaseClient as an ABC because it
# doesn't have any abstract methods
#
# if we declare `service_name` without a value, we get AttributeError on access
# instead of the (desired) TypeError when instantiating a BaseClient because
# it's abstract
if self.service_name == "_base":
raise NotImplementedError(
"Cannot instantiate clients which do not set a 'service_name'"
)
log.info(
f'Creating client of type {type(self)} for service "{self.service_name}"'
)
# if an environment was passed, it will be used, but otherwise lookup
# the env var -- and in the special case of `production` translate to
# `default`, regardless of the source of that value
# logs the environment when it isn't `default`
self.environment = config.get_environment_name(environment)
self.transport = self.transport_class(**(transport_params or {}))
log.debug(f"initialized transport of type {type(self.transport)}")
if not self.service_name and not base_url:
raise ValueError("Either service_name or base_url must be set")
self.base_url = utils.slash_join(
config.get_service_url(self.service_name, environment=self.environment)
if base_url is None
else base_url,
self.base_path,
)
self.authorizer = authorizer
# set application name if given
self._app_name = None
if app_name is not None:
self.app_name = app_name
# setup paginated methods
self.paginated = PaginatorTable(self)
@property
def app_name(self) -> Optional[str]:
return self._app_name
@app_name.setter
def app_name(self, value: str) -> None:
self._app_name = self.transport.user_agent = value
@utils.classproperty
def resource_server(cls) -> Optional[str]:
"""
The resource_server name for the API and scopes associated with this client.
This information is pulled from the ``scopes`` attribute of the client class.
If the client does not have associated scopes, this value will be ``None``.
"""
if cls.scopes is None:
return None
return cls.scopes.resource_server
def get(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> GlobusHTTPResponse:
"""
Make a GET request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"GET to {path} with query_params {query_params}")
return self.request("GET", path, query_params=query_params, headers=headers)
def post(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a POST request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"POST to {path} with query_params {query_params}")
return self.request(
"POST",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def delete(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> GlobusHTTPResponse:
"""
Make a DELETE request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"DELETE to {path} with query_params {query_params}")
return self.request("DELETE", path, query_params=query_params, headers=headers)
def put(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a PUT request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"PUT to {path} with query_params {query_params}")
return self.request(
"PUT",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def patch(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a PATCH request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"PATCH to {path} with query_params {query_params}")
return self.request(
"PATCH",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def request(
self,
method: str,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Send an HTTP request
:param method: HTTP request method, as an all caps string
:type method: str
:param path: Path for the request, with or without leading slash
:type path: str
:param query_params: Parameters to be encoded as a query string
:type query_params: dict, optional
:param headers: HTTP headers to add to the request
:type headers: dict
:param data: Data to send as the request body. May pass through encoding.
:type data: dict or str
:param encoding: A way to encode request data. "json", "form", and "text"
are all valid values. Custom encodings can be used only if they are
registered with the transport. By default, strings get "text" behavior and
all other objects get "json".
:type encoding: str
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
# prepare data...
# copy headers if present
rheaders = {**headers} if headers else {}
# if a client is asked to make a request against a full URL, not just the path
# component, then do not resolve the path, simply pass it through as the URL
if path.startswith("https://") or path.startswith("http://"):
url = path
else:
url = utils.slash_join(self.base_url, urllib.parse.quote(path))
# make the request
log.debug("request will hit URL: %s", url)
r = self.transport.request(
method=method,
url=url,
data=data.data if isinstance(data, utils.PayloadWrapper) else data,
query_params=query_params,
headers=rheaders,
encoding=encoding,
authorizer=self.authorizer,
)
log.debug("request made to URL: %s", r.url)
if 200 <= r.status_code < 400:
log.debug(f"request completed with response code: {r.status_code}")
return GlobusHTTPResponse(r, self)
log.debug(f"request completed with (error) response code: {r.status_code}")
raise self.error_class(r)
| src/globus_sdk/client.py | 10,804 | Abstract base class for clients with error handling for Globus APIs.
:param authorizer: A ``GlobusAuthorizer`` which will generate Authorization headers
:type authorizer: :class:`GlobusAuthorizer\
<globus_sdk.authorizers.base.GlobusAuthorizer>`
:param app_name: Optional "nice name" for the application. Has no bearing on the
semantics of client actions. It is just passed as part of the User-Agent
string, and may be useful when debugging issues with the Globus Team
:type app_name: str
:param transport_params: Options to pass to the transport for this client
:type transport_params: dict
All other parameters are for internal use and should be ignored.
Make a DELETE request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object
Make a GET request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object
Make a PATCH request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object
Make a POST request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object
Make a PUT request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object
Send an HTTP request
:param method: HTTP request method, as an all caps string
:type method: str
:param path: Path for the request, with or without leading slash
:type path: str
:param query_params: Parameters to be encoded as a query string
:type query_params: dict, optional
:param headers: HTTP headers to add to the request
:type headers: dict
:param data: Data to send as the request body. May pass through encoding.
:type data: dict or str
:param encoding: A way to encode request data. "json", "form", and "text"
are all valid values. Custom encodings can be used only if they are
registered with the transport. By default, strings get "text" behavior and
all other objects get "json".
:type encoding: str
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object
The resource_server name for the API and scopes associated with this client.
This information is pulled from the ``scopes`` attribute of the client class.
If the client does not have associated scopes, this value will be ``None``.
service name is used to lookup a service URL from config path under the client base URL: the class for errors raised by this client on HTTP 4xx and 5xx errors: this can be set in subclasses, but must always be a subclass of GlobusError: the type of Transport which will be used, defaults to ``RequestsTransport``: the scopes for this client may be present as a ``ScopeBuilder`` explicitly check the `service_name` to ensure that it was set unfortunately, we can't rely on declaring BaseClient as an ABC because it doesn't have any abstract methods if we declare `service_name` without a value, we get AttributeError on access instead of the (desired) TypeError when instantiating a BaseClient because it's abstract if an environment was passed, it will be used, but otherwise lookup the env var -- and in the special case of `production` translate to `default`, regardless of the source of that value logs the environment when it isn't `default` set application name if given setup paginated methods prepare data... copy headers if present if a client is asked to make a request against a full URL, not just the path component, then do not resolve the path, simply pass it through as the URL make the request | 3,998 | en | 0.752215 |
#!/Users/drpaneas/Virtualenvs/linuxed/bin/python2.7
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| bin/rst2pseudoxml.py | 634 | A minimal front end to the Docutils Publisher, producing pseudo-XML.
!/Users/drpaneas/Virtualenvs/linuxed/bin/python2.7 $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $ Author: David Goodger <goodger@python.org> Copyright: This module has been placed in the public domain. | 282 | en | 0.601946 |
"""Identifiers for objects in Robustness Gym."""
from __future__ import annotations
import ast
import json
from typing import Any, Callable, List, Union
# from robustnessgym.core.tools import persistent_hash
class Identifier:
"""Class for creating identifiers for objects in Robustness Gym."""
def __init__(
self,
_name: str,
_index: Union[str, int] = None,
**kwargs,
):
self._name = _name
self._index = str(_index) if _index is not None else None
self._parameters = kwargs
# Add the parameter
for param, value in self.parameters.items():
self.add_parameter(param, value)
@property
def name(self):
"""Base name."""
return self._name
@property
def index(self):
"""Index associated with the identifier."""
return self._index
@property
def parameters(self):
"""Additional parameters contained in the identifier."""
return self._parameters
@classmethod
def range(cls, n: int, _name: str, **kwargs) -> List[Identifier]:
"""Create a list of identifiers, with index varying from 1 to `n`."""
if n > 1:
return [cls(_name=_name, _index=i, **kwargs) for i in range(1, n + 1)]
return [cls(_name=_name, **kwargs)]
def __call__(self, **kwargs):
"""Call the identifier with additional parameters to return a new
identifier."""
ident = Identifier.loads(self.dumps())
for parameter, value in kwargs.items():
ident.add_parameter(parameter, value)
return ident
def __repr__(self):
params = ", ".join([f"{k}={v}" for k, v in self.parameters.items()])
if self.index is not None:
return (
f"{self.name}-{self.index}({params})"
if len(params) > 0
else f"{self.name}-{self.index}"
)
return f"{self.name}({params})" if len(params) > 0 else f"{self.name}"
def __hash__(self):
# return persistent_hash(str(self))
return hash(str(self))
def __eq__(self, other: Union[Identifier, str]):
return str(self) == str(other)
def dumps(self):
"""Dump the identifier to JSON."""
return json.dumps(self.__dict__)
@staticmethod
def _parse_args(s: str):
"""https://stackoverflow.com/questions/49723047/parsing-a-string-as-a-
python-argument-list."""
args = "f({})".format(s)
tree = ast.parse(args)
funccall = tree.body[0].value
# return {arg.arg: ast.literal_eval(arg.value) for arg in funccall.keywords}
params = {}
for arg in funccall.keywords:
try:
params[arg.arg] = ast.literal_eval(arg.value)
except ValueError:
params[arg.arg] = arg.value.id
return params
@classmethod
def parse(cls, s: str) -> Identifier:
"""Parse in an identifier from string."""
# Parse out the various components
if "(" in s:
name_index, params = s.split("(")
params = params.split(")")[0]
else:
name_index = s
params = None
# Create the name and index
if "-" in name_index:
name, index = name_index.split("-")[:-1], name_index.split("-")[-1]
name = "-".join(name)
if index.isnumeric():
index = int(index)
else:
name = "-".join([name, index])
index = None
else:
name = name_index
index = None
# Parse out the params
if params is not None:
params = cls._parse_args(params)
else:
params = {}
return cls(_name=name, _index=index, **params)
def without(self, *params) -> Identifier:
"""Returns an identifier without `params`."""
return Identifier(
self.name,
self.index,
**{k: v for k, v in self.parameters.items() if k not in set(params)},
)
@classmethod
def loads(cls, s: str):
"""Load the identifier from JSON."""
identifier = Identifier(_name="")
identifier.__dict__ = json.loads(s)
return identifier
def add_parameter(self, parameter: str, value: Any) -> None:
"""Add a parameter to the identifier."""
if isinstance(value, Callable):
self.parameters[parameter] = ".".join(
[str(value.__module__), str(value.__name__)]
)
else:
self.parameters[parameter] = value
# Assign Id as an alias for the Identifier class
Id = Identifier
| robustnessgym/core/identifier.py | 4,719 | Class for creating identifiers for objects in Robustness Gym.
Call the identifier with additional parameters to return a new
identifier.
https://stackoverflow.com/questions/49723047/parsing-a-string-as-a-
python-argument-list.
Add a parameter to the identifier.
Dump the identifier to JSON.
Index associated with the identifier.
Load the identifier from JSON.
Base name.
Additional parameters contained in the identifier.
Parse in an identifier from string.
Create a list of identifiers, with index varying from 1 to `n`.
Returns an identifier without `params`.
Identifiers for objects in Robustness Gym.
from robustnessgym.core.tools import persistent_hash Add the parameter return persistent_hash(str(self)) return {arg.arg: ast.literal_eval(arg.value) for arg in funccall.keywords} Parse out the various components Create the name and index Parse out the params Assign Id as an alias for the Identifier class | 913 | en | 0.453536 |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAbind(RPackage):
"""
Combine Multidimensional Arrays.
Combine multidimensional arrays into a single array. This is a
generalization of 'cbind' and 'rbind'. Works with vectors, matrices, and
higher-dimensional arrays. Also provides functions 'adrop', 'asub', and
'afill' for manipulating, extracting and replacing data in arrays."""
cran = "abind"
version('1.4-5', sha256='3a3ace5afbcb86e56889efcebf3bf5c3bb042a282ba7cc4412d450bb246a3f2c')
version('1.4-3', sha256='b6c255878c1ab81701ae701f34546e88be115629b984ac4272e311fa3c0ea6ce')
depends_on('r@1.5.0:', type=('build', 'run'))
| var/spack/repos/builtin/packages/r-abind/package.py | 848 | Combine Multidimensional Arrays.
Combine multidimensional arrays into a single array. This is a
generalization of 'cbind' and 'rbind'. Works with vectors, matrices, and
higher-dimensional arrays. Also provides functions 'adrop', 'asub', and
'afill' for manipulating, extracting and replacing data in arrays.
Copyright 2013-2022 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) | 499 | en | 0.776947 |
import numpy as np
import torch
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
''' Sinusoid position encoding table '''
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.
return torch.FloatTensor(sinusoid_table) | src/onqg/utils/sinusoid.py | 768 | Sinusoid position encoding table
dim 2i dim 2i+1 zero vector for padding dimension | 85 | en | 0.144242 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common util functions and classes used by both keras cifar and imagenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
import tensorflow_model_optimization as tfmot
from official.utils.flags import core as flags_core
from official.utils.misc import keras_utils
FLAGS = flags.FLAGS
BASE_LEARNING_RATE = 0.1 # This matches Jing's version.
TRAIN_TOP_1 = 'training_accuracy_top_1'
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
def learning_rate_schedule(current_epoch,
current_batch,
steps_per_epoch,
batch_size):
"""Handles linear scaling rule, gradual warmup, and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
steps_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate.
"""
initial_lr = BASE_LEARNING_RATE * batch_size / 256
epoch = current_epoch + float(current_batch) / steps_per_epoch
warmup_lr_multiplier, warmup_end_epoch = LR_SCHEDULE[0]
if epoch < warmup_end_epoch:
# Learning rate increases linearly per step.
return initial_lr * warmup_lr_multiplier * epoch / warmup_end_epoch
for mult, start_epoch in LR_SCHEDULE:
if epoch >= start_epoch:
learning_rate = initial_lr * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
"""Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Attributes:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
"""
def __init__(self, schedule, batch_size, steps_per_epoch):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
class PiecewiseConstantDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries,
multipliers, compute_lr_on_cpu=True, name=None):
super(PiecewiseConstantDecayWithWarmup, self).__init__()
if len(boundaries) != len(multipliers) - 1:
raise ValueError('The length of boundaries must be 1 less than the '
'length of multipliers')
base_lr_batch_size = 256
steps_per_epoch = epoch_size // batch_size
self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self.step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
self.lr_values = [self.rescaled_lr * m for m in multipliers]
self.warmup_steps = warmup_epochs * steps_per_epoch
self.compute_lr_on_cpu = compute_lr_on_cpu
self.name = name
self.learning_rate_ops_cache = {}
def __call__(self, step):
if tf.executing_eagerly():
return self._get_learning_rate(step)
# In an eager function or graph, the current implementation of optimizer
# repeatedly call and thus create ops for the learning rate schedule. To
# avoid this, we cache the ops if not executing eagerly.
graph = tf.compat.v1.get_default_graph()
if graph not in self.learning_rate_ops_cache:
if self.compute_lr_on_cpu:
with tf.device('/device:CPU:0'):
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
else:
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
return self.learning_rate_ops_cache[graph]
def _get_learning_rate(self, step):
"""Compute learning rate at given step."""
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
[self.rescaled_lr, self.step_boundaries,
self.lr_values, self.warmup_steps,
self.compute_lr_on_cpu]):
def warmup_lr(step):
return self.rescaled_lr * (
tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(
step, self.step_boundaries, self.lr_values)
return tf.cond(step < self.warmup_steps,
lambda: warmup_lr(step),
lambda: piecewise_lr(step))
def get_config(self):
return {
'rescaled_lr': self.rescaled_lr,
'step_boundaries': self.step_boundaries,
'lr_values': self.lr_values,
'warmup_steps': self.warmup_steps,
'compute_lr_on_cpu': self.compute_lr_on_cpu,
'name': self.name
}
def get_optimizer(learning_rate=0.1):
"""Returns optimizer to use."""
# The learning_rate is overwritten at the beginning of each step by callback.
return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9)
# TODO(hongkuny,haoyuzhang): make cifar model use_tensor_lr to clean up code.
def get_callbacks(
steps_per_epoch,
learning_rate_schedule_fn=None,
pruning_method=None,
enable_checkpoint_and_export=False,
model_dir=None):
"""Returns common callbacks."""
time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
callbacks = [time_callback]
if not FLAGS.use_tensor_lr and learning_rate_schedule_fn:
lr_callback = LearningRateBatchScheduler(
learning_rate_schedule_fn,
batch_size=FLAGS.batch_size,
steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
if FLAGS.enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
if FLAGS.profile_steps:
profiler_callback = keras_utils.get_profiler_callback(
FLAGS.model_dir,
FLAGS.profile_steps,
FLAGS.enable_tensorboard,
steps_per_epoch)
callbacks.append(profiler_callback)
is_pruning_enabled = pruning_method is not None
if is_pruning_enabled:
callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
if model_dir is not None:
callbacks.append(tfmot.sparsity.keras.PruningSummaries(
log_dir=model_dir, profile_batch=0))
if enable_checkpoint_and_export:
if model_dir is not None:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
save_weights_only=True))
return callbacks
def build_stats(history, eval_output, callbacks):
"""Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if history and history.history:
train_hist = history.history
# Gets final loss from training.
stats['loss'] = train_hist['loss'][-1].item()
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
elif 'sparse_categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()
if not callbacks:
return stats
# Look for the time history callback which was used during keras.fit
for callback in callbacks:
if isinstance(callback, keras_utils.TimeHistory):
timestamp_log = callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
callback.batch_size * callback.log_steps *
(len(callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def define_keras_flags(
dynamic_loss_scale=True,
model=False,
optimizer=False,
pretrained_filepath=False):
"""Define flags for Keras models."""
flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True,
train_epochs=True, epochs_between_evals=True,
distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False,
synthetic_data=True,
dtype=True,
all_reduce_alg=True,
num_packs=True,
tf_gpu_thread_mode=True,
datasets_num_private_threads=True,
dynamic_loss_scale=dynamic_loss_scale,
loss_scale=True,
fp16_implementation=True,
tf_data_experimental_slack=True,
enable_xla=True,
force_v2_in_keras_compile=True,
training_dataset_cache=True)
flags_core.define_image()
flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
# TODO(b/135607288): Remove this flag once we understand the root cause of
# slowdown when setting the learning phase in Keras backend.
flags.DEFINE_boolean(
name='set_learning_phase_to_train', default=True,
help='If skip eval, also set Keras learning phase to 1 (training).')
flags.DEFINE_boolean(
name='explicit_gpu_placement', default=False,
help='If not using distribution strategy, explicitly set device scope '
'for the Keras training loop.')
flags.DEFINE_boolean(name='use_trivial_model', default=False,
help='Whether to use a trivial Keras model.')
flags.DEFINE_boolean(name='report_accuracy_metrics', default=True,
help='Report metrics during training and evaluation.')
flags.DEFINE_boolean(name='use_tensor_lr', default=False,
help='Use learning rate tensor instead of a callback.')
flags.DEFINE_boolean(
name='enable_tensorboard', default=False,
help='Whether to enable Tensorboard callback.')
flags.DEFINE_integer(
name='train_steps', default=None,
help='The number of steps to run for training. If it is larger than '
'# batches per epoch, then use # batches per epoch. This flag will be '
'ignored if train_epochs is set to be larger than 1. ')
flags.DEFINE_string(
name='profile_steps', default=None,
help='Save profiling data to model dir at given range of global steps. The '
'value must be a comma separated pair of positive integers, specifying '
'the first and last step to profile. For example, "--profile_steps=2,4" '
'triggers the profiler to process 3 steps, starting from the 2nd step. '
'Note that profiler has a non-trivial performance overhead, and the '
'output file can be gigantic if profiling many steps.')
flags.DEFINE_boolean(
name='batchnorm_spatial_persistent', default=True,
help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
flags.DEFINE_boolean(
name='enable_get_next_as_optional', default=False,
help='Enable get_next_as_optional behavior in DistributedIterator.')
flags.DEFINE_boolean(
name='enable_checkpoint_and_export', default=False,
help='Whether to enable a checkpoint callback and export the savedmodel.')
flags.DEFINE_string(
name='tpu', default='', help='TPU address to connect to.')
flags.DEFINE_integer(
name='steps_per_loop',
default=500,
help='Number of steps per training loop. Only training step happens '
'inside the loop. Callbacks will not be called inside. Will be capped at '
'steps per epoch.')
flags.DEFINE_boolean(
name='use_tf_while_loop',
default=True,
help='Whether to build a tf.while_loop inside the training loop on the '
'host. Setting it to True is critical to have peak performance on '
'TPU.')
flags.DEFINE_boolean(
name='use_tf_keras_layers', default=False,
help='Whether to use tf.keras.layers instead of tf.python.keras.layers.'
'It only changes imagenet resnet model layers for now. This flag is '
'a temporal flag during transition to tf.keras.layers. Do not use this '
'flag for external usage. this will be removed shortly.')
if model:
flags.DEFINE_string('model', 'resnet50_v1.5',
'Name of model preset. (mobilenet, resnet50_v1.5)')
if optimizer:
flags.DEFINE_string('optimizer', 'resnet50_default',
'Name of optimizer preset. '
'(mobilenet_default, resnet50_default)')
# TODO(kimjaehong): Replace as general hyper-params not only for mobilenet.
flags.DEFINE_float('initial_learning_rate_per_sample', 0.00007,
'Initial value of learning rate per sample for '
'mobilenet_default.')
flags.DEFINE_float('lr_decay_factor', 0.94,
'Learning rate decay factor for mobilenet_default.')
flags.DEFINE_float('num_epochs_per_decay', 2.5,
'Number of epochs per decay for mobilenet_default.')
if pretrained_filepath:
flags.DEFINE_string('pretrained_filepath', '',
'Pretrained file path.')
def get_synth_data(height, width, num_channels, num_classes, dtype):
"""Creates a set of synthetic random data.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
A tuple of tensors representing the inputs and labels.
"""
# Synthetic input should be within [0, 255].
inputs = tf.random.truncated_normal([height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random.uniform([1],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
return inputs, labels
def define_pruning_flags():
"""Define flags for pruning methods."""
flags.DEFINE_string('pruning_method', None,
'Pruning method.'
'None (no pruning) or polynomial_decay.')
flags.DEFINE_float('pruning_initial_sparsity', 0.0,
'Initial sparsity for pruning.')
flags.DEFINE_float('pruning_final_sparsity', 0.5,
'Final sparsity for pruning.')
flags.DEFINE_integer('pruning_begin_step', 0,
'Begin step for pruning.')
flags.DEFINE_integer('pruning_end_step', 100000,
'End step for pruning.')
flags.DEFINE_integer('pruning_frequency', 100,
'Frequency for pruning.')
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32, drop_remainder=True):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tuning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
inputs, labels = get_synth_data(height=height,
width=width,
num_channels=num_channels,
num_classes=num_classes,
dtype=dtype)
# Cast to float32 for Keras model.
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
# `drop_remainder` will make dataset produce outputs with known shapes.
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn
def set_cudnn_batchnorm_mode():
"""Set CuDNN batchnorm mode for better performance.
Note: Spatial Persistent mode may lead to accuracy losses for certain
models.
"""
if FLAGS.batchnorm_spatial_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)
| official/vision/image_classification/common.py | 20,266 | Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Attributes:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
Piecewise constant decay with warmup schedule.
Compute learning rate at given step.
Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results.
Define flags for Keras models.
Define flags for pruning methods.
Returns common callbacks.
Returns optimizer to use.
Creates a set of synthetic random data.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
A tuple of tensors representing the inputs and labels.
Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tuning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
Returns dataset filled with random data.
Handles linear scaling rule, gradual warmup, and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
steps_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate.
Executes before step begins.
Set CuDNN batchnorm mode for better performance.
Note: Spatial Persistent mode may lead to accuracy losses for certain
models.
Common util functions and classes used by both keras cifar and imagenet.
Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== This matches Jing's version. (multiplier, epoch to start) tuples Learning rate increases linearly per step. lr should be a float here In an eager function or graph, the current implementation of optimizer repeatedly call and thus create ops for the learning rate schedule. To avoid this, we cache the ops if not executing eagerly. The learning_rate is overwritten at the beginning of each step by callback. TODO(hongkuny,haoyuzhang): make cifar model use_tensor_lr to clean up code. Gets final loss from training. Gets top_1 training accuracy. Look for the time history callback which was used during keras.fit TODO(b/135607288): Remove this flag once we understand the root cause of slowdown when setting the learning phase in Keras backend. TODO(kimjaehong): Replace as general hyper-params not only for mobilenet. Synthetic input should be within [0, 255]. pylint: disable=unused-argument Cast to float32 for Keras model. `drop_remainder` will make dataset produce outputs with known shapes. | 4,712 | en | 0.805612 |
import collections
import itertools
from trie_class import Trie
import sys
import timeit
def load_dataset(filename):
dataset = [sorted(int(n) for n in i.strip().split())
for i in open(filename).readlines()]
size = len(dataset)
print('Size of the Dataset : ', size)
total_len = 0
for i in range(len(dataset)):
total_len = total_len + len(dataset[i])
avg_len = total_len / size
print('Average Transaction Length : ', avg_len)
# print(dataset)
return dataset
def find_frequent_1_itemsets(dataset, min_sup):
# print('1 - item func : min sup :', min_sup)
frequency = dict(collections.Counter(
itertools.chain.from_iterable(dataset)))
L1 = dict()
for item, freq in frequency.items():
if freq >= min_sup:
L1[item] = freq
# print(L1)
return L1
# Input : L_k (k : itemset size)
def apriori_gen(L: list, k):
# Self Join Step
L_next = list()
for l1 in L:
for l2 in L:
if len(set(l1) & set(l2)) == (k - 1):
L_next.append(sorted(list(set(l1) | set(l2))))
# Removing Duplicates
L_set = set(tuple(x) for x in L_next)
L_k1 = [list(x) for x in L_set]
L_k1.sort(key=lambda x: L_next.index(x))
L_k1_tuple = [tuple(i) for i in L_k1]
info={'join':len(L_k1)}
# Prune Step
for c in L_k1_tuple:
if has_infrequent_subset(c, L):
L_k1.remove(list(c))
info['prune'] = len(L_k1)
# Returns list of lists [L_k + 1]
return info,L_k1
def has_infrequent_subset(candidate: tuple, L: list):
for subset in list(itertools.combinations(candidate, len(candidate) - 1)):
if list(subset) not in L:
return True
return False
def apriori(db: list, min_sup):
min_sup = (len(db) * min_sup) // 100
# print('Apriori - min sup :', min_sup)
levels = list()
Levels_info = list()
L1 = find_frequent_1_itemsets(db, min_sup)
# print('L-1 :', L1)
if bool(L1) == False:
print('No 1-Itemset Satisfies Given Minimum Support Threshold')
return None
# Creating list of 1-itemset(list itself)
_L1 = [[k] for k in L1.keys()]
_L1 = sorted(_L1)
# print('L1 :', L1)
levels.append(_L1)
Levels_info.append({'join': len(_L1), 'prune': len(_L1)})
# print('Levels :', levels)
while True:
info,candidates = apriori_gen(levels[-1], len(levels[-1][0]))
trie = Trie(db)
trie.build_trie(candidates)
trie.assign_frequency()
L = list()
# print('Func : Min Sup -', min_sup)
for itemset in candidates:
# print(itemset, trie.get_candidate_freq(itemset), trie.get_candidate_freq(itemset) >= min_sup)
if trie.get_candidate_freq(itemset) >= min_sup:
# print(itemset, trie.get_candidate_freq(itemset), trie.get_candidate_freq(itemset) >= min_sup)
L.append(sorted(itemset))
if not L:
break
levels.append(L)
Levels_info.append(info)
return Levels_info,levels
if __name__ == "__main__":
db = load_dataset(str(sys.argv[1]))
min_sup = float(sys.argv[2])
print('Dataset :', str(sys.argv[1]))
print('Min Support :', min_sup, '%')
print('Min Support Count :', (len(db) * min_sup) // 100)
start = timeit.default_timer()
info, L = apriori(db, min_sup)
stop = timeit.default_timer()
pattern_total,join_total,prune_total = 0,0,0
print('Level', ' After Join', ' After Pruning', ' Frequent Itemsets')
if L is not None:
for i in range(len(L)):
print()
print((i + 1), info[i]['join'], info[i]['prune'], len(L[i]), sep='\t\t')
pattern_total+=len(L[i])
join_total+=info[i]['join']
prune_total+= info[i]['prune']
# print((i + 1), '- Frequent Itemsets :', L[i])
print('\nTotal', join_total, prune_total, pattern_total, sep='\t\t')
print('\nTime: ', stop - start, " seconds")
| Frequent Pattern Mining/apriori.py | 4,036 | print(dataset) print('1 - item func : min sup :', min_sup) print(L1) Input : L_k (k : itemset size) Self Join Step Removing Duplicates Prune Step Returns list of lists [L_k + 1] print('Apriori - min sup :', min_sup) print('L-1 :', L1) Creating list of 1-itemset(list itself) print('L1 :', L1) print('Levels :', levels) print('Func : Min Sup -', min_sup) print(itemset, trie.get_candidate_freq(itemset), trie.get_candidate_freq(itemset) >= min_sup) print(itemset, trie.get_candidate_freq(itemset), trie.get_candidate_freq(itemset) >= min_sup) print((i + 1), '- Frequent Itemsets :', L[i]) | 587 | en | 0.091484 |
"""Spamming Module
{i}spam <no of msgs> <msg>
Note:- Don't use to much"""
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.b (the "License");
# you may not use this file except in compliance with the License.
#
from asyncio import wait
from telethon import events
@ItzSjDude(outgoing=True, pattern=r"spam")
async def spammer(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
counter = int(message[6:8])
spam_message = str(e.text[8:])
await wait(
[e.respond(spam_message) for i in range(counter)]
)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP,
"#SPAM \n\n"
"Spam was executed successfully"
)
| plugins/spam.py | 919 | Spamming Module
{i}spam <no of msgs> <msg>
Note:- Don't use to much
Copyright (C) 2019 The Raphielscape Company LLC. Licensed under the Raphielscape Public License, Version 1.b (the "License"); you may not use this file except in compliance with the License. | 261 | en | 0.863271 |
# stdlib
import json
from typing import List
from typing import NoReturn
from typing import Optional
# third party
from fastapi import APIRouter
from fastapi import Depends
from fastapi import File
from fastapi import Form
from fastapi import UploadFile
from loguru import logger
from starlette import status
from starlette.exceptions import HTTPException
# grid absolute
from grid.api.dependencies.current_user import get_current_user
from grid.api.users.models import ApplicantStatus
from grid.api.users.models import User
from grid.api.users.models import UserCandidate
from grid.api.users.models import UserCreate
from grid.api.users.models import UserPrivate
from grid.api.users.models import UserUpdate
# relative
from . import syft as syft_user_messages
def raise_generic_private_error() -> NoReturn:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="There was an error processing your request.",
)
router = APIRouter()
@router.get("/me", response_model=User, name="users:me", status_code=status.HTTP_200_OK)
def get_self(current_user: UserPrivate = Depends(get_current_user)) -> User:
return current_user
# TODO: Syft should return the newly created user and the response model should be User.
@router.post("", name="users:create", status_code=status.HTTP_201_CREATED)
async def create_user_grid(
current_user: UserPrivate = Depends(get_current_user),
new_user: str = Form(...),
file: Optional[UploadFile] = File(None),
) -> str:
if file:
pdf_file = file.file.read() # type: ignore
else:
pdf_file = b""
dict_user = json.loads(new_user)
dict_user["daa_pdf"] = pdf_file
user_schema = UserCreate(**dict_user)
try:
return syft_user_messages.create_user(user_schema, current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.get("/applicants", name="users:applicants", status_code=status.HTTP_201_CREATED)
async def get_all_candidates(
current_user: UserPrivate = Depends(get_current_user),
) -> List[UserCandidate]:
try:
return syft_user_messages.get_user_requests(current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.patch(
"/applicants/{candidate_id}",
name="users:applicants:process",
status_code=status.HTTP_201_CREATED,
)
async def process_applicant_request(
candidate_id: int,
request_status: ApplicantStatus,
current_user: UserPrivate = Depends(get_current_user),
) -> str:
try:
return syft_user_messages.process_applicant_request(
current_user=current_user,
candidate_id=candidate_id,
status=request_status.status,
)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.get(
"",
response_model=List[User],
name="users:read_all",
status_code=status.HTTP_200_OK,
)
async def get_all_users_grid(
current_user: UserPrivate = Depends(get_current_user),
) -> List[User]:
try:
return syft_user_messages.get_all_users(current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.get(
"/{user_id}",
response_model=User,
name="users:read_one",
status_code=status.HTTP_200_OK,
)
async def get_user_grid(
user_id: int, current_user: UserPrivate = Depends(get_current_user)
) -> User:
try:
return syft_user_messages.get_user(user_id, current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.patch(
"/{user_id}",
name="users:update",
status_code=status.HTTP_204_NO_CONTENT,
)
async def update_user_grid(
user_id: int,
updated_user: UserUpdate,
current_user: UserPrivate = Depends(get_current_user),
) -> None:
try:
syft_user_messages.update_user(user_id, current_user, updated_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.delete(
"/{user_id}", name="users:delete", status_code=status.HTTP_204_NO_CONTENT
)
async def delete_user_grid(
user_id: int, current_user: UserPrivate = Depends(get_current_user)
) -> None:
try:
syft_user_messages.delete_user(user_id, current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
| packages/grid/backend/grid/api/users/routes.py | 4,463 | stdlib third party grid absolute relative TODO: Syft should return the newly created user and the response model should be User. type: ignore | 141 | en | 0.703342 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../../yara_scanner'))
# -- Project information -----------------------------------------------------
project = 'yara-scanner'
copyright = '2020, John Davison'
author = 'John Davison'
# The full version, including alpha/beta/rc tags
release = '1.0.14'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# Both the classโ and the __init__ methodโs docstring are concatenated and inserted.
autoclass_content = 'both'
autodoc_inherit_docstrings = False
| docs/source/conf.py | 2,190 | Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Both the classโ and the __init__ methodโs docstring are concatenated and inserted. | 1,614 | en | 0.708488 |
from pathlib import Path
from datetime import datetime, timedelta
from src.settings import envs
from airflow import DAG
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from airflow.hooks.postgres_hook import PostgresHook
import logging
from src.settings import log_config
import shutil
# Setting up module from __file__ as the interpreter sets __name__ as __main__ when the source file is executed as
# main program
logger = logging.getLogger(name=__file__.replace(envs.PROJECT_ROOT, '').replace('/', '.')[1:-3])
# these args will get passed on to each operator
# you can override them on a per-task basis during operator initialization
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': days_ago(7),
'email': ['airflow@airflow.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=5),
'catchup': False
}
DAG_ID = '{p.parent.name}_{p.stem}'.format(p=Path(__file__))
PARAMS = Variable.get(DAG_ID, deserialize_json=True)
SCHEDULE_INTERVAL = PARAMS.get('schedule_interval') or None
DAYS_TO_RETAIN = PARAMS.get('days_to_retain', 60)
TABLES = ("xcom", "task_instance", "sla_miss", "log", "dag_run", "task_fail", "task_reschedule")
LOG_DIR = '/usr/local/airflow/logs'
dag = DAG(
DAG_ID,
default_args=default_args,
description='BT Bill Clean Up DAGs Metadata and Logs',
schedule_interval=SCHEDULE_INTERVAL,
max_active_runs=1
)
def clean_dags_logs():
hook = PostgresHook(postgres_conn_id="airflow_postgres")
dag_files = Path(Path(__file__).parent).glob('*.py')
dags = ['{p.parent.name}_{p.stem}'.format(p=p) for p in dag_files]
dags = [d for d in dags if d != DAG_ID]
execution_date = datetime.date(datetime.now()) - timedelta(days=DAYS_TO_RETAIN)
p = Path(LOG_DIR)
for d in dags:
logger.info("Cleaning up meta tables for {}".format(d))
for t in TABLES:
sql = "delete from {} where dag_id='{}' and execution_date < '{}'".format(t, d, execution_date)
hook.run(sql, True)
logger.info('Cleaning up log folder for {}'.format(d))
for path in list(p.glob('{}/*/*'.format(d))):
log_date = str(path).split('/')[-1]
log_date = log_date.split('T')[0]
log_date = datetime.date(datetime.strptime(log_date, '%Y-%m-%d'))
if log_date < execution_date:
logger.info('Deleting dir {}'.format(str(path.absolute())))
shutil.rmtree(str(path.absolute()))
clean_up = PythonOperator(
task_id='clean_up',
python_callable=clean_dags_logs,
dag=dag)
dag >> clean_up
| src/airflow_dags/dags/btb/house_keeping.py | 2,740 | Setting up module from __file__ as the interpreter sets __name__ as __main__ when the source file is executed as main program these args will get passed on to each operator you can override them on a per-task basis during operator initialization | 245 | en | 0.95202 |
from abc import ABC
from asyncio import Lock as AsyncLock
from collections import ChainMap, OrderedDict
from dataclasses import dataclass, field
from datetime import timedelta
from functools import partial, wraps
from hashlib import sha256
import inspect
from pathlib import Path
import pickle
from sqlite3 import connect, Connection
from textwrap import dedent
from time import time
from threading import Lock as SyncLock
from typing import Any, Callable, Hashable, Mapping, Optional, Tuple, Type, Union
from weakref import finalize, WeakSet
Decoratee = Union[Callable, Type]
Keygen = Callable[..., Any]
class Pickler(ABC):
@staticmethod
def dumps(_str: str) -> str:
... # pragma: no cover
@staticmethod
def loads(_bytes: bytes) -> Any:
... # pragma: no cover
class _MemoZeroValue:
pass
@dataclass
class _MemoReturnState:
called: bool = False
raised: bool = False
value: Any = _MemoZeroValue
@dataclass(frozen=True)
class _MemoBase:
t0: Optional[float]
memo_return_state: _MemoReturnState = field(init=False, default_factory=_MemoReturnState)
@dataclass(frozen=True)
class _AsyncMemo(_MemoBase):
async_lock: AsyncLock = field(init=False, default_factory=lambda: AsyncLock())
@dataclass(frozen=True)
class _SyncMemo(_MemoBase):
sync_lock: SyncLock = field(init=False, default_factory=lambda: SyncLock())
_Memo = Union[_AsyncMemo, _SyncMemo]
@dataclass(frozen=True)
class _MemoizeBase:
db: Optional[Connection]
default_kwargs: Mapping[str, Any]
duration: Optional[timedelta]
fn: Callable
keygen: Optional[Keygen]
pickler: Pickler = field(hash=False)
size: Optional[int]
expire_order: OrderedDict = field(init=False, default_factory=OrderedDict, hash=False)
memos: OrderedDict = field(init=False, default_factory=OrderedDict, hash=False)
def __post_init__(self) -> None:
if self.db is not None:
self.db.isolation_level = None
self.db.execute(dedent(f'''
CREATE TABLE IF NOT EXISTS `{self.table_name}` (
k TEXT PRIMARY KEY,
t0 FLOAT,
t FLOAT,
v TEXT NOT NULL
)
'''))
if self.duration:
self.db.execute(dedent(f'''
DELETE FROM `{self.table_name}`
WHERE t0 < {time() - self.duration.total_seconds()}
'''))
if self.size:
res = self.db.execute(
f"SELECT t FROM `{self.table_name}` ORDER BY t DESC LIMIT {self.size}"
).fetchall()
if res:
(min_t,) = res[-1]
self.db.execute(f"DELETE FROM `{self.table_name}` WHERE t < {min_t}")
for k, t0, t, v in self.db.execute(
f"SELECT k, t0, t, v FROM `{self.table_name}` ORDER BY t"
).fetchall():
memo = self.make_memo(t0=t0)
memo.memo_return_state.called = True
memo.memo_return_state.value = self.pickler.loads(v)
self.memos[k] = memo
if self.duration:
for k, t0 in self.db.execute(
f"SELECT k, t0 FROM `{self.table_name}` ORDER BY t0"
).fetchall():
self.expire_order[k] = ...
def __len__(self) -> int:
return len(self.memos)
@property
def table_name(self) -> str:
# noinspection PyUnresolvedReferences
return (
f'{self.fn.__code__.co_filename}'
f':{self.fn.__code__.co_name}'
f':{self.fn.__code__.co_firstlineno}'
)
def bind_key_lifetime(self, raw_key: Tuple[Any, ...], key: Union[int, str]) -> None:
for raw_key_part in raw_key:
if (raw_key_part is not None) and (type(raw_key_part).__hash__ is object.__hash__):
finalize(raw_key_part, self.reset_key, key)
def default_keygen(self, *args, **kwargs) -> Tuple[Hashable, ...]:
"""Returns all params (args, kwargs, and missing default kwargs) for function as kwargs."""
return tuple(self.get_args_as_kwargs(*args, **kwargs).values())
def get_args_as_kwargs(self, *args, **kwargs) -> Mapping[str, Any]:
args_as_kwargs = {}
for k, v in zip(self.default_kwargs, args):
args_as_kwargs[k] = v
return ChainMap(args_as_kwargs, kwargs, self.default_kwargs)
def get_memo(self, key: Union[int, str], insert: bool) -> Optional[_Memo]:
try:
memo = self.memos[key] = self.memos.pop(key)
if self.duration is not None and memo.t0 < time() - self.duration.total_seconds():
self.expire_order.pop(key)
raise ValueError('value expired')
except (KeyError, ValueError):
if not insert:
return None
elif self.duration is None:
t0 = None
else:
t0 = time()
# The value has no significance. We're using the dict entirely for ordering keys.
self.expire_order[key] = ...
memo = self.memos[key] = self.make_memo(t0=t0)
return memo
def expire_one_memo(self) -> None:
k = None
if (
(self.expire_order is not None) and
(len(self.expire_order) > 0) and
(
self.memos[next(iter(self.expire_order))].t0 <
time() - self.duration.total_seconds()
)
):
(k, _) = self.expire_order.popitem(last=False)
self.memos.pop(k)
elif self.size is not None and self.size < len(self.memos):
(k, _) = self.memos.popitem(last=False)
if (self.db is not None) and (k is not None):
self.db.execute(f"DELETE FROM `{self.table_name}` WHERE k = '{k}'")
def finalize_memo(self, memo: _Memo, key: Union[int, str]) -> Any:
if memo.memo_return_state.raised:
raise memo.memo_return_state.value
elif (self.db is not None) and (self.memos[key] is memo):
value = self.pickler.dumps(memo.memo_return_state.value)
self.db.execute(
dedent(f'''
INSERT OR REPLACE INTO `{self.table_name}`
(k, t0, t, v)
VALUES
(?, ?, ?, ?)
'''),
(
key,
memo.t0,
time(),
value
)
)
return memo.memo_return_state.value
def get_key(self, raw_key: Tuple[Hashable, ...]) -> Union[int, str]:
if self.db is None:
key = hash(raw_key)
else:
key = sha256(str(raw_key).encode()).hexdigest()
return key
@staticmethod
def make_memo(t0: Optional[float]) -> _Memo: # pragma: no cover
raise NotImplemented
def reset(self) -> None:
object.__setattr__(self, 'expire_order', OrderedDict())
object.__setattr__(self, 'memos', OrderedDict())
if self.db is not None:
self.db.execute(f"DELETE FROM `{self.table_name}`")
def reset_key(self, key: Union[int, str]) -> None:
if key in self.memos:
self.memos.pop(key)
if self.duration is not None:
self.expire_order.pop(key)
if self.db is not None:
self.db.execute(f"DELETE FROM `{self.table_name}` WHERE k == '{key}'")
@dataclass(frozen=True)
class _AsyncMemoize(_MemoizeBase):
async def get_raw_key(self, *args, **kwargs) -> Tuple[Hashable, ...]:
if self.keygen is None:
raw_key = self.default_keygen(*args, **kwargs)
else:
raw_key = self.keygen(**self.get_args_as_kwargs(*args, **kwargs))
if isinstance(raw_key, tuple):
raw_key = list(raw_key)
else:
raw_key = [raw_key]
for i, v in enumerate(raw_key):
if inspect.isawaitable(v):
raw_key[i] = await v
raw_key = tuple(raw_key)
return raw_key
def get_behavior(self, *, insert: bool, update: bool) -> Callable:
def get_call(*, fn: Callable) -> Callable:
@wraps(self.fn)
async def call(*args, **kwargs) -> Any:
raw_key = await self.get_raw_key(*args, **kwargs)
key = self.get_key(raw_key)
memo: _AsyncMemo = self.get_memo(key, insert=insert)
if memo is None:
return await fn(*args, **kwargs)
self.expire_one_memo()
async with memo.async_lock:
if (
(insert and not memo.memo_return_state.called) or
(update and memo.memo_return_state.value is not _MemoZeroValue)
):
memo.memo_return_state.called = True
try:
memo.memo_return_state.value = await fn(*args, **kwargs)
except Exception as e:
memo.memo_return_state.raised = True
memo.memo_return_state.value = e
self.bind_key_lifetime(raw_key, key)
return self.finalize_memo(memo=memo, key=key)
return call
return get_call
async def insert(self, *args, **kwargs) -> Any:
return await self.get_behavior(insert=True, update=False)(fn=self.fn)(*args, **kwargs)
def update(self, *args, **kwargs) -> Callable:
async def to(value: Any) -> Any:
async def fn(*_args, **_kwargs) -> Any:
return value
return await self.get_behavior(insert=False, update=True)(fn=fn)(*args, **kwargs)
return to
def upsert(self, *args, **kwargs) -> Callable:
async def to(value: Any) -> Any:
async def fn(*_args, **_kwargs) -> Any:
return value
return await self.get_behavior(insert=True, update=True)(fn=fn)(*args, **kwargs)
return to
async def remove(self, *args, **kwargs) -> None:
raw_key = await self.get_raw_key(*args, **kwargs)
key = self.get_key(raw_key)
self.reset_key(key)
def get_decorator(self) -> Callable:
async def decorator(*args, **kwargs) -> Any:
return await self.insert(*args, **kwargs)
decorator.memoize = self
return decorator
@staticmethod
def make_memo(t0: Optional[float]) -> _AsyncMemo:
return _AsyncMemo(t0=t0)
@dataclass(frozen=True)
class _SyncMemoize(_MemoizeBase):
_sync_lock: SyncLock = field(init=False, default_factory=lambda: SyncLock())
def get_raw_key(self, *args, **kwargs) -> Tuple[Hashable, ...]:
if self.keygen is None:
raw_key = self.default_keygen(*args, **kwargs)
else:
raw_key = self.keygen(**self.get_args_as_kwargs(*args, **kwargs))
if not isinstance(raw_key, tuple):
raw_key = [raw_key]
raw_key = tuple(raw_key)
return raw_key
def get_behavior(self, *, insert: bool, update: bool) -> Callable:
def get_call(*, fn: Callable) -> Callable:
@wraps(self.fn)
def call(*args, **kwargs) -> Any:
raw_key = self.get_raw_key(*args, **kwargs)
key = self.get_key(raw_key)
with self._sync_lock:
memo: _SyncMemo = self.get_memo(key, insert=insert)
if memo is None:
return fn(*args, **kwargs)
self.expire_one_memo()
with memo.sync_lock:
if (
(insert and not memo.memo_return_state.called) or
(update and memo.memo_return_state.value is not _MemoZeroValue)
):
memo.memo_return_state.called = True
try:
memo.memo_return_state.value = fn(*args, **kwargs)
except Exception as e:
memo.memo_return_state.raised = True
memo.memo_return_state.value = e
self.bind_key_lifetime(raw_key, key)
return self.finalize_memo(memo=memo, key=key)
return call
return get_call
def insert(self, *args, **kwargs) -> Any:
return self.get_behavior(insert=True, update=False)(fn=self.fn)(*args, **kwargs)
def update(self, *args, **kwargs) -> Callable:
def to(value: Any) -> Any:
def fn(*_args, **_kwargs) -> Any:
return value
return self.get_behavior(insert=False, update=True)(fn=fn)(*args, **kwargs)
return to
def upsert(self, *args, **kwargs) -> Callable:
def to(value: Any) -> Any:
def fn(*_args, **_kwargs) -> Any:
return value
return self.get_behavior(insert=True, update=True)(fn=fn)(*args, **kwargs)
return to
def remove(self, *args, **kwargs) -> None:
raw_key = self.get_raw_key(*args, **kwargs)
key = self.get_key(raw_key)
self.reset_key(key)
def get_decorator(self) -> Callable:
def decorator(*args, **kwargs) -> Any:
return self.insert(*args, **kwargs)
decorator.memoize = self
return decorator
@staticmethod
def make_memo(t0: Optional[float]) -> _SyncMemo:
return _SyncMemo(t0=t0)
def reset(self) -> None:
with self._sync_lock:
super().reset()
def reset_key(self, key: Union[int, str]) -> None:
with self._sync_lock:
super().reset_key(key)
class _Memoize:
"""Decorates a function call and caches return value for given inputs.
- If `db_path` is provided, memos will persist on disk and reloaded during initialization.
- If `duration` is provided, memos will only be valid for given `duration`.
- If `keygen` is provided, memo hash keys will be created with given `keygen`.
- If `pickler` is provided, persistent memos will (de)serialize using given `pickler`.
- If `size` is provided, LRU memo will be evicted if current count exceeds given `size`.
### Examples
- Body will run once for unique input `bar` and result is cached.
```python3
@memoize
def foo(bar) -> Any: ...
foo(1) # Function actually called. Result cached.
foo(1) # Function not called. Cached result returned.
foo(2) # Function actually called. Result cached.
```
- Same as above, but async.
```python3
@memoize
async def foo(bar) -> Any: ...
# Concurrent calls from the same event loop are safe. Only one call is generated. The
# other nine calls in this example wait for the result.
await asyncio.gather(*[foo(1) for _ in range(10)])
```
- Classes may be memoized.
```python3
@memoize
Class Foo:
def init(self, _): ...
Foo(1) # Instance is actually created.
Foo(1) # Instance not created. Cached instance returned.
Foo(2) # Instance is actually created.
```
- Calls `foo(1)`, `foo(bar=1)`, and `foo(1, baz='baz')` are equivalent and only cached once.
```python3
@memoize
def foo(bar, baz='baz'): ...
```
- Only 2 items are cached. Acts as an LRU.
```python3
@memoize(size=2)
def foo(bar) -> Any: ...
foo(1) # LRU cache order [foo(1)]
foo(2) # LRU cache order [foo(1), foo(2)]
foo(1) # LRU cache order [foo(2), foo(1)]
foo(3) # LRU cache order [foo(1), foo(3)], foo(2) is evicted to keep cache size at 2
```
- Items are evicted after 1 minute.
```python3
@memoize(duration=datetime.timedelta(minutes=1))
def foo(bar) -> Any: ...
foo(1) # Function actually called. Result cached.
foo(1) # Function not called. Cached result returned.
sleep(61)
foo(1) # Function actually called. Cached result was too old.
```
- Memoize can be explicitly reset through the function's `.memoize` attribute
```python3
@memoize
def foo(bar) -> Any: ...
foo(1) # Function actually called. Result cached.
foo(1) # Function not called. Cached result returned.
foo.memoize.reset()
foo(1) # Function actually called. Cache was emptied.
```
- Current cache length can be accessed through the function's `.memoize` attribute
```python3
@memoize
def foo(bar) -> Any: ...
foo(1)
foo(2)
len(foo.memoize) # returns 2
```
- Alternate memo hash function can be specified. The inputs must match the function's.
```python3
Class Foo:
@memoize(keygen=lambda self, a, b, c: (a, b, c)) # Omit 'self' from hash key.
def bar(self, a, b, c) -> Any: ...
a, b = Foo(), Foo()
# Hash key will be (a, b, c)
a.bar(1, 2, 3) # LRU cache order [Foo.bar(a, 1, 2, 3)]
# Hash key will again be (a, b, c)
# Be aware, in this example the returned result comes from a.bar(...), not b.bar(...).
b.bar(1, 2, 3) # Function not called. Cached result returned.
```
- If part of the returned key from keygen is awaitable, it will be awaited.
```python3
async def awaitable_key_part() -> Hashable: ...
@memoize(keygen=lambda bar: (bar, awaitable_key_part()))
async def foo(bar) -> Any: ...
```
- If the memoized function is async and any part of the key is awaitable, it is awaited.
```python3
async def morph_a(a: int) -> int: ...
@memoize(keygen=lambda a, b, c: (morph_a(a), b, c))
def foo(a, b, c) -> Any: ...
```
- Properties can be memoized.
```python3
Class Foo:
@property
@memoize
def bar(self) -> Any: ...
a = Foo()
a.bar # Function actually called. Result cached.
a.bar # Function not called. Cached result returned.
b = Foo() # Memoize uses 'self' parameter in hash. 'b' does not share returns with 'a'
b.bar # Function actually called. Result cached.
b.bar # Function not called. Cached result returned.
```
- Be careful with eviction on instance methods. Memoize is not instance-specific.
```python3
Class Foo:
@memoize(size=1)
def bar(self, baz) -> Any: ...
a, b = Foo(), Foo()
a.bar(1) # LRU cache order [Foo.bar(a, 1)]
b.bar(1) # LRU cache order [Foo.bar(b, 1)], Foo.bar(a, 1) is evicted
a.bar(1) # Foo.bar(a, 1) is actually called and cached again.
```
- Values can persist to disk and be reloaded when memoize is initialized again.
```python3
@memoize(db_path=Path.home() / '.memoize')
def foo(a) -> Any: ...
foo(1) # Function actually called. Result cached.
# Process is restarted. Upon restart, the state of the memoize decorator is reloaded.
foo(1) # Function not called. Cached result returned.
```
- If not applied to a function, calling the decorator returns a partial application.
```python3
memoize_db = memoize(db_path=Path.home() / '.memoize')
@memoize_db(size=1)
def foo(a) -> Any: ...
@memoize_db(duration=datetime.timedelta(hours=1))
def bar(b) -> Any: ...
```
- Comparison equality does not affect memoize. Only hash equality matters.
```python3
# Inherits object.__hash__
class Foo:
# Don't be fooled. memoize only cares about the hash.
def __eq__(self, other: Foo) -> bool:
return True
@memoize
def bar(foo: Foo) -> Any: ...
foo0, foo1 = Foo(), Foo()
assert foo0 == foo1
bar(foo0) # Function called. Result cached.
bar(foo1) # Function called again, despite equality, due to different hash.
```
### A warning about arguments that inherit `object.__hash__`:
It doesn't make sense to keep a memo if it's impossible to generate the same input again. Inputs
that inherit the default `object.__hash__` are unique based on their id, and thus, their
location in memory. If such inputs are garbage-collected, they are gone forever. For that
reason, when those inputs are garbage collected, `memoize` will drop memos created using those
inputs.
- Memo lifetime is bound to the lifetime of any arguments that inherit `object.__hash__`.
```python3
# Inherits object.__hash__
class Foo:
...
@memoize
def bar(foo: Foo) -> Any: ...
bar(Foo()) # Memo is immediately deleted since Foo() is garbage collected.
foo = Foo()
bar(foo) # Memo isn't deleted until foo is deleted.
del foo # Memo is deleted at the same time as foo.
```
- Types that have specific, consistent hash functions (int, str, etc.) won't cause problems.
```python3
@memoize
def foo(a: int, b: str, c: Tuple[int, ...], d: range) -> Any: ...
foo(1, 'bar', (1, 2, 3), range(42)) # Function called. Result cached.
foo(1, 'bar', (1, 2, 3), range(42)) # Function not called. Cached result returned.
```
- Classmethods rely on classes, which inherit from `object.__hash__`. However, classes are
almost never garbage collected until a process exits so memoize will work as expected.
```python3
class Foo:
@classmethod
@memoize
def bar(cls) -> Any: ...
foo = Foo()
foo.bar() # Function called. Result cached.
foo.bar() # Function not called. Cached result returned.
del foo # Memo not cleared since lifetime is bound to class Foo.
foo = Foo()
foo.bar() # Function not called. Cached result returned.
foo.bar() # Function not called. Cached result returned.
```
- Long-lasting object instances that inherit from `object.__hash__`.
```python3
class Foo:
@memoize
def bar(self) -> Any: ...
foo = Foo()
foo.bar() # Function called. Result cached.
# foo instance is kept around somewhere and used later.
foo.bar() # Function not called. Cached result returned.
```
- Custom pickler may be specified for unpickleable return types.
```python3
import dill
@memoize(db_path='~/.memoize`, pickler=dill)
def foo() -> Callable[[], None]:
return lambda: None
```
"""
_all_decorators = WeakSet()
@staticmethod
def __call__(
_decoratee: Optional[Decoratee] = None,
*,
db_path: Optional[Path] = None,
duration: Optional[Union[int, float, timedelta]] = None,
keygen: Optional[Keygen] = None,
pickler: Optional[Pickler] = None,
size: Optional[int] = None,
) -> Union[Decoratee]:
if _decoratee is None:
return partial(memoize, db_path=db_path, duration=duration, keygen=keygen, pickler=pickler, size=size)
if inspect.isclass(_decoratee):
assert db_path is None, 'Class memoization not allowed with db.'
class WrappedMeta(type(_decoratee)):
# noinspection PyMethodParameters
@memoize(duration=duration, size=size)
def __call__(cls, *args, **kwargs):
return super().__call__(*args, **kwargs)
class Wrapped(_decoratee, metaclass=WrappedMeta):
pass
return type(_decoratee.__name__, (Wrapped,), {'__doc__': _decoratee.__doc__})
db = connect(f'{db_path}') if db_path is not None else None
duration = timedelta(seconds=duration) if isinstance(duration, (int, float)) else duration
assert (duration is None) or (duration.total_seconds() > 0)
pickler = pickle if pickler is None else pickler
assert (size is None) or (size > 0)
fn = _decoratee
default_kwargs: Mapping[str, Any] = {
k: v.default for k, v in inspect.signature(fn).parameters.items()
}
if inspect.iscoroutinefunction(_decoratee):
decorator_cls = _AsyncMemoize
else:
decorator_cls = _SyncMemoize
# noinspection PyArgumentList
decorator = decorator_cls(
db=db,
default_kwargs=default_kwargs,
duration=duration,
fn=fn,
keygen=keygen,
pickler=pickler,
size=size,
).get_decorator()
_Memoize._all_decorators.add(decorator)
return wraps(_decoratee)(decorator)
@staticmethod
def reset_all() -> None:
for decorator in _Memoize._all_decorators:
decorator.memoize.reset()
memoize = _Memoize()
| atools/_memoize_decorator.py | 25,461 | Decorates a function call and caches return value for given inputs.
- If `db_path` is provided, memos will persist on disk and reloaded during initialization.
- If `duration` is provided, memos will only be valid for given `duration`.
- If `keygen` is provided, memo hash keys will be created with given `keygen`.
- If `pickler` is provided, persistent memos will (de)serialize using given `pickler`.
- If `size` is provided, LRU memo will be evicted if current count exceeds given `size`.
### Examples
- Body will run once for unique input `bar` and result is cached.
```python3
@memoize
def foo(bar) -> Any: ...
foo(1) # Function actually called. Result cached.
foo(1) # Function not called. Cached result returned.
foo(2) # Function actually called. Result cached.
```
- Same as above, but async.
```python3
@memoize
async def foo(bar) -> Any: ...
# Concurrent calls from the same event loop are safe. Only one call is generated. The
# other nine calls in this example wait for the result.
await asyncio.gather(*[foo(1) for _ in range(10)])
```
- Classes may be memoized.
```python3
@memoize
Class Foo:
def init(self, _): ...
Foo(1) # Instance is actually created.
Foo(1) # Instance not created. Cached instance returned.
Foo(2) # Instance is actually created.
```
- Calls `foo(1)`, `foo(bar=1)`, and `foo(1, baz='baz')` are equivalent and only cached once.
```python3
@memoize
def foo(bar, baz='baz'): ...
```
- Only 2 items are cached. Acts as an LRU.
```python3
@memoize(size=2)
def foo(bar) -> Any: ...
foo(1) # LRU cache order [foo(1)]
foo(2) # LRU cache order [foo(1), foo(2)]
foo(1) # LRU cache order [foo(2), foo(1)]
foo(3) # LRU cache order [foo(1), foo(3)], foo(2) is evicted to keep cache size at 2
```
- Items are evicted after 1 minute.
```python3
@memoize(duration=datetime.timedelta(minutes=1))
def foo(bar) -> Any: ...
foo(1) # Function actually called. Result cached.
foo(1) # Function not called. Cached result returned.
sleep(61)
foo(1) # Function actually called. Cached result was too old.
```
- Memoize can be explicitly reset through the function's `.memoize` attribute
```python3
@memoize
def foo(bar) -> Any: ...
foo(1) # Function actually called. Result cached.
foo(1) # Function not called. Cached result returned.
foo.memoize.reset()
foo(1) # Function actually called. Cache was emptied.
```
- Current cache length can be accessed through the function's `.memoize` attribute
```python3
@memoize
def foo(bar) -> Any: ...
foo(1)
foo(2)
len(foo.memoize) # returns 2
```
- Alternate memo hash function can be specified. The inputs must match the function's.
```python3
Class Foo:
@memoize(keygen=lambda self, a, b, c: (a, b, c)) # Omit 'self' from hash key.
def bar(self, a, b, c) -> Any: ...
a, b = Foo(), Foo()
# Hash key will be (a, b, c)
a.bar(1, 2, 3) # LRU cache order [Foo.bar(a, 1, 2, 3)]
# Hash key will again be (a, b, c)
# Be aware, in this example the returned result comes from a.bar(...), not b.bar(...).
b.bar(1, 2, 3) # Function not called. Cached result returned.
```
- If part of the returned key from keygen is awaitable, it will be awaited.
```python3
async def awaitable_key_part() -> Hashable: ...
@memoize(keygen=lambda bar: (bar, awaitable_key_part()))
async def foo(bar) -> Any: ...
```
- If the memoized function is async and any part of the key is awaitable, it is awaited.
```python3
async def morph_a(a: int) -> int: ...
@memoize(keygen=lambda a, b, c: (morph_a(a), b, c))
def foo(a, b, c) -> Any: ...
```
- Properties can be memoized.
```python3
Class Foo:
@property
@memoize
def bar(self) -> Any: ...
a = Foo()
a.bar # Function actually called. Result cached.
a.bar # Function not called. Cached result returned.
b = Foo() # Memoize uses 'self' parameter in hash. 'b' does not share returns with 'a'
b.bar # Function actually called. Result cached.
b.bar # Function not called. Cached result returned.
```
- Be careful with eviction on instance methods. Memoize is not instance-specific.
```python3
Class Foo:
@memoize(size=1)
def bar(self, baz) -> Any: ...
a, b = Foo(), Foo()
a.bar(1) # LRU cache order [Foo.bar(a, 1)]
b.bar(1) # LRU cache order [Foo.bar(b, 1)], Foo.bar(a, 1) is evicted
a.bar(1) # Foo.bar(a, 1) is actually called and cached again.
```
- Values can persist to disk and be reloaded when memoize is initialized again.
```python3
@memoize(db_path=Path.home() / '.memoize')
def foo(a) -> Any: ...
foo(1) # Function actually called. Result cached.
# Process is restarted. Upon restart, the state of the memoize decorator is reloaded.
foo(1) # Function not called. Cached result returned.
```
- If not applied to a function, calling the decorator returns a partial application.
```python3
memoize_db = memoize(db_path=Path.home() / '.memoize')
@memoize_db(size=1)
def foo(a) -> Any: ...
@memoize_db(duration=datetime.timedelta(hours=1))
def bar(b) -> Any: ...
```
- Comparison equality does not affect memoize. Only hash equality matters.
```python3
# Inherits object.__hash__
class Foo:
# Don't be fooled. memoize only cares about the hash.
def __eq__(self, other: Foo) -> bool:
return True
@memoize
def bar(foo: Foo) -> Any: ...
foo0, foo1 = Foo(), Foo()
assert foo0 == foo1
bar(foo0) # Function called. Result cached.
bar(foo1) # Function called again, despite equality, due to different hash.
```
### A warning about arguments that inherit `object.__hash__`:
It doesn't make sense to keep a memo if it's impossible to generate the same input again. Inputs
that inherit the default `object.__hash__` are unique based on their id, and thus, their
location in memory. If such inputs are garbage-collected, they are gone forever. For that
reason, when those inputs are garbage collected, `memoize` will drop memos created using those
inputs.
- Memo lifetime is bound to the lifetime of any arguments that inherit `object.__hash__`.
```python3
# Inherits object.__hash__
class Foo:
...
@memoize
def bar(foo: Foo) -> Any: ...
bar(Foo()) # Memo is immediately deleted since Foo() is garbage collected.
foo = Foo()
bar(foo) # Memo isn't deleted until foo is deleted.
del foo # Memo is deleted at the same time as foo.
```
- Types that have specific, consistent hash functions (int, str, etc.) won't cause problems.
```python3
@memoize
def foo(a: int, b: str, c: Tuple[int, ...], d: range) -> Any: ...
foo(1, 'bar', (1, 2, 3), range(42)) # Function called. Result cached.
foo(1, 'bar', (1, 2, 3), range(42)) # Function not called. Cached result returned.
```
- Classmethods rely on classes, which inherit from `object.__hash__`. However, classes are
almost never garbage collected until a process exits so memoize will work as expected.
```python3
class Foo:
@classmethod
@memoize
def bar(cls) -> Any: ...
foo = Foo()
foo.bar() # Function called. Result cached.
foo.bar() # Function not called. Cached result returned.
del foo # Memo not cleared since lifetime is bound to class Foo.
foo = Foo()
foo.bar() # Function not called. Cached result returned.
foo.bar() # Function not called. Cached result returned.
```
- Long-lasting object instances that inherit from `object.__hash__`.
```python3
class Foo:
@memoize
def bar(self) -> Any: ...
foo = Foo()
foo.bar() # Function called. Result cached.
# foo instance is kept around somewhere and used later.
foo.bar() # Function not called. Cached result returned.
```
- Custom pickler may be specified for unpickleable return types.
```python3
import dill
@memoize(db_path='~/.memoize`, pickler=dill)
def foo() -> Callable[[], None]:
return lambda: None
```
Returns all params (args, kwargs, and missing default kwargs) for function as kwargs.
pragma: no cover pragma: no cover noinspection PyUnresolvedReferences The value has no significance. We're using the dict entirely for ordering keys. pragma: no cover noinspection PyMethodParameters noinspection PyArgumentList | 8,643 | en | 0.751141 |
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
| pyaxe/axesrc/configfile.py | 46,325 | Error for unknown beam
Error for wrong lengt in KeywordList
Error for missing keyword
Base class for exceptions in this module
Header class for the configuration file
Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
Configuration Beam object
Configuration Beam object
Configuration File Object
Configuration File Object
Configuration Beam object
Default header for a configuration file
Error for unknown dispersion
Error for unknown trace
Object for a polynomial with 2D variance
Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
String method for the class
The method transforms theconfiguration
beam object into its string representation.
The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
Returns string representation of the object
return string representation of the object
returns string representation of the object
String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
returns the string representation of the keyword.
Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
Get the filename used in aXeSIM
Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
Removes modifies some keywords
Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
Confirm that all keywords for the extraction exist
Check whether a 'lambda_psf' value is needed, provide one
Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
make sure there is a logger beam indices which might be found the file create the (visible) dictionary create the hidden beam list store the header load the general required keywords try to load beams as long as there are keywords and as long as there are candidate beam numbers try to load a beam no information on this beam is in the file enhance the counter inform about the useless keywords take the string of the header add the strings for the global keys return the total string find the index of the requested item check whether the item was found find the index of the requested item check whether the item was found return the identified item return NULL set the default return value go over all items check whether it is the right item set the return value to the index return the result initialize the liust open the file and parse through it strip the line check whether the line contains a keyword create and append the keyword close the file return the list initialize the return value go over all keys check whether the current key matches return it if it matches return the default split the line into items for more than one item the first item is the keyword check for a comment evaluate the keyvalue evalute keyvalue and comment something's wrong here create and return the keyword initialize the global keylist and the list with indices to be deleted go over the keylist read in, keeping and index variable identify the current keyword in the list of possible ones store the index create and append the new keyword delete the input keywords which have been 'used' return the list of global keys list of the root of all global keys indicating a file go over all file keywords identify the keyword in the list check for existence extract the keyvalue if the keyvalue is NOT None but the file does not exist report an error initialize the return value search for the index in the keyword list check whether the keyword exists return the keyword return the default search for the index in the keyword list if it matches, copy the data the keyword does not yet exist, just create and add it def drizzle_check(self): """Check for drizzle keywords The method assures that all necessary drizzle keywords are present. Nonexisting keywords are added with default values. Finally the value for the drizzle kernel is checked against all valid values. Returns ------- bool: True if the drizzle kernel is valid """ list with all valid kernels kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat', 'lanczos2', 'lanczos3'] make sure that some important drizzle keywords are there pself = self.setdefault('DRZPSCALE', 1.0) pfrac = self.setdefault('DRZPFRAC', 1.0) dkernel = self.setdefault('DRZKERNEL', 'square') droot = self.setdefault('DRZROOT', 'aXedrizzle') check for valid drizzle kernel if dkernel not in kernels: return False return True def setdefault(self, keyword, keyvalue, comment=None): """Add global keyword The method mimics the setdefault method for dictionary objects. A keyword is added with the given value and comment, but only in case that it does not yet exist. If it exists, nothing is done Parameters ---------- keyword: str name of the requested keyword keyvalue: any value of the requested keyword comment: str comment for the keyword Returns ------- The keyword value """ search for the index in the keyword list index = self._get_gkey_index(keyword) if index < 0: the keyword does not yet exist, just create and add it self.gkeys.append(ConfKey(keyword, keyvalue, comment)) extract the keyvalue value = self.gkeys[-1].keyvalue else: extract the keyvalue value = self.gkeys[index].keyvalue return the keyvalue return value set the default return value search for the keyword check whether it is non-NULL extract the value return the value destroy the old file open the new file write the string to the file close the file just use the more general method check global files if desired create the (visible) dictionary return the number of existing sensitivity files check if a filename is given load the default safe the file name create a keyword list load the header just add '.simul' and return the result default is true! check existence of 'POBJSIZE' check for reasonable value check existence of 'SMFACTOR' check for reasonable value return the value check whether 'lambda_psf' is needed split the term extract the defined range as float make 'lambda_psf' to the mean value leave it at None return the value derive the new configuration file name check whether the science extension has other than the allowed values find the index of the sceicne extension check whether the item was found set it to the allowed value check whether the telesocpe are is known set the telescope are to the Hubble default just make sure that the error=- and dq- extensions are set write the file back return the baseic filename of the simulation configuration file check if a filename is given load the default try to load the beam keywords store the ident load the general beam keywords load the trace keywords load the dispersion keywords catch a pure CKeyNotFound exception which is raised if a beam is competely absent in the keyword list initialize the return string add the strings for the global keys add the string for the trace add the string for the dispersion solution return the total string list of the root of all globale beamword keys list of optional keywords appen the beam identifier to the keyword roots to get a list of keywords to search for initiate and fill collect a list of optional keywords here is some kind of extra keyword ekey = 'DLD1P_' + ident + '_PRANGE' initialize the global keylist and the list with indices to be deleted go over the keylist read in, keeping and index variable identify the current keyword in the list of possible ones store the index create and append the new keyword enhance the nuber of keywords found store the index create and append the new keyword enhance the index check whether all keywords were found raise an exeption if not delete the input keywords which have been 'used' return the list of global keys initialize the return value go over all keys check whether the current key matches return it if it matches return the default initialize the return value search for the index in the keyword list ckeck whehter the keyword exists return the keyword return the default set the default return value search for the keyword check whether it is non-NULL extract the value return the value list of the root of all beamword keys indicating a file append the beam identifier to the keyword roots to get the full keyname go over all beam keys check whether the current keyword is right and whether the keyvalue is not 'None' check for the file report an error initialize the return string return the total string check whether the index exists raise an exception return the indexed object check whether the index exists raise an exception check whether the input type is correct raise an exception set the index to the input object create the name of the keyword with the polynomial order extract and return the keyword from the keyword list initialize an empty list for each expected keyword form the keyword name extract the new keyword extract the keyword and append it to the list return the list initialize the index set indicator to "not found" go over all keys in the list checke whether the keyword is the desired one create a list keyword if desired store the index enhance the index fire an exception if nothing was found delete the keyword from the inlist return the keyword determine the length of the list compute the 'order' of the xy-dependence chech whether the 'order' is integer no integer -> key length wrong integer -> key length correct pre-decoration add description add post-decoration return the result try to read in the keywords raise an exception if keywords are missing create the label or description get the string header get the data string return the result try to read in the keywords raise an exception if keywords are missing raise an exception if keywords are missing create the label or description get the string header get the data string return the result no filename -> default header initialize the data list intialize the start pointer open and parse through the file check whether the start pointer is still set strip the line check whether the first character is a comment, which qualifies the line as part of the header append the line to the header data set the starter pointer to 0, thus indicating the end of the header close the file initialize the keyvalue list create a traditional keyword instance split the string keyvalue append the floats to the list check whether the index exists raise an exception return the indexed object check whether the index exists raise an exception check whether the input type is correct raise an exception set the index to the input object first comes the keyword append the keyvalues using a default format append the comment append a linefeed return the complete string | 20,156 | en | 0.644144 |
# from imports import *
# import random
# class Docs(commands.Cog):
# def __init__(self, bot):
# self.bot = bot
# self.bot.loop.create_task(self.__ainit__())
# async def __ainit__(self):
# await self.bot.wait_until_ready()
# self.scraper = AsyncScraper(session = self.bot.session)
# async def rtfm_lookup(self, program = None, *, args = None):
# rtfm_dictionary = {
# "fusion.py": "https://fusion.senarc.org/en/master/",
# "development" : "https://fusion.senarc.org/en/development/"
# }
# if not args:
# return rtfm_dictionary.get(program)
# else:
# url = rtfm_dictionary.get(program)
# results = await self.scraper.search(args, page=url)
# if not results:
# return f"Could not find anything with {args}."
# else:
# return results
# def reference(self, message):
# reference = message.reference
# if reference and isinstance(reference.resolved, discord.Message):
# return reference.resolved.to_reference()
# return None
# async def rtfm_send(self, ctx, results):
# if isinstance(results, str):
# await ctx.send(results, allowed_mentions = discord.AllowedMentions.none())
# else:
# embed = discord.Embed(color = random.randint(0, 16777215))
# results = results[:10]
# embed.description = "\n".join(f"[`{result}`]({value})" for result, value in results)
# reference = self.reference(ctx.message)
# await ctx.send(embed=embed, reference = reference)
# @commands.group(slash_interaction=True, aliases=["rtd", "rtfs"], brief="Search for attributes from docs.")
# async def rtfm(self, ctx, *, args = None):
# await ctx.trigger_typing()
# results = await self.rtfm_lookup(program = "fusion.py", args = args)
# await self.rtfm_send(ctx, results)
# @rtfm.command(slash_interaction=True, brief = "a command using doc_search to look up at development's docs")
# async def development(self, ctx, *, args = None):
# await ctx.trigger_typing()
# results = await self.rtfm_lookup(program="development", args = args)
# await self.rtfm_send(ctx, results)
# def setup(bot):
# bot.add_cog(Docs(bot))
| cogs/docs.py | 2,196 | from imports import * import random class Docs(commands.Cog): def __init__(self, bot): self.bot = bot self.bot.loop.create_task(self.__ainit__()) async def __ainit__(self): await self.bot.wait_until_ready() self.scraper = AsyncScraper(session = self.bot.session) async def rtfm_lookup(self, program = None, *, args = None): rtfm_dictionary = { "fusion.py": "https://fusion.senarc.org/en/master/", "development" : "https://fusion.senarc.org/en/development/" } if not args: return rtfm_dictionary.get(program) else: url = rtfm_dictionary.get(program) results = await self.scraper.search(args, page=url) if not results: return f"Could not find anything with {args}." else: return results def reference(self, message): reference = message.reference if reference and isinstance(reference.resolved, discord.Message): return reference.resolved.to_reference() return None async def rtfm_send(self, ctx, results): if isinstance(results, str): await ctx.send(results, allowed_mentions = discord.AllowedMentions.none()) else: embed = discord.Embed(color = random.randint(0, 16777215)) results = results[:10] embed.description = "\n".join(f"[`{result}`]({value})" for result, value in results) reference = self.reference(ctx.message) await ctx.send(embed=embed, reference = reference) @commands.group(slash_interaction=True, aliases=["rtd", "rtfs"], brief="Search for attributes from docs.") async def rtfm(self, ctx, *, args = None): await ctx.trigger_typing() results = await self.rtfm_lookup(program = "fusion.py", args = args) await self.rtfm_send(ctx, results) @rtfm.command(slash_interaction=True, brief = "a command using doc_search to look up at development's docs") async def development(self, ctx, *, args = None): await ctx.trigger_typing() results = await self.rtfm_lookup(program="development", args = args) await self.rtfm_send(ctx, results) def setup(bot): bot.add_cog(Docs(bot)) | 2,074 | en | 0.461124 |
from django.conf.urls import url, include
import binder.router # noqa
import binder.websocket # noqa
import binder.views # noqa
import binder.history # noqa
import binder.models # noqa
import binder.plugins.token_auth.views # noqa
from binder.plugins.views.multi_request import multi_request_view
from .views import animal, caretaker, costume, custom, zoo, contact_person, gate # noqa
router = binder.router.Router().register(binder.views.ModelView)
room_controller = binder.websocket.RoomController().register(binder.views.ModelView)
urlpatterns = [
url(r'^custom/route', custom.custom, name='custom'),
# url(r'^user/$', custom.user, name='user'),
url(r'^multi/$', multi_request_view, name='multi_request'),
url(r'^', include(router.urls)),
url(r'^', binder.views.api_catchall, name='catchall'),
]
# FIXME: Hmm, this is a bit hackish. Especially here. But where else?
binder.models.install_history_signal_handlers(binder.models.BinderModel)
| tests/testapp/urls.py | 951 | noqa noqa noqa noqa noqa noqa noqa url(r'^user/$', custom.user, name='user'), FIXME: Hmm, this is a bit hackish. Especially here. But where else? | 145 | en | 0.525624 |
from .brand import BrandDataset, Brand
from .vehicle_id import VehicleIDDataset
from .comp_cars import CompCarsDataset
# from .veri import VeriDataset
from .box_cars import BoxCars116kDataset
# from .vric import VRICDataset
from .cars196 import Cars196Dataset | experiments/brand/dataset/__init__.py | 259 | from .veri import VeriDataset from .vric import VRICDataset | 59 | en | 0.338389 |
"""Certbot constants."""
import os
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "certbot.plugins"
"""Setuptools entry point group name for plugins."""
OLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Plugins Setuptools entry point before rename."""
CLI_DEFAULTS = dict(
config_files=[
"/etc/letsencrypt/cli.ini",
# http://freedesktop.org/wiki/Software/xdg-user-dirs/
os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
"letsencrypt", "cli.ini"),
],
dry_run=False,
verbose_count=-int(logging.INFO / 10),
server="https://acme-v01.api.letsencrypt.org/directory",
rsa_key_size=2048,
rollback_checkpoints=1,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
logs_dir="/var/log/letsencrypt",
no_verify_ssl=False,
http01_port=challenges.HTTP01Response.PORT,
http01_address="",
tls_sni_01_port=challenges.TLSSNI01Response.PORT,
tls_sni_01_address="",
auth_cert_path="./cert.pem",
auth_chain_path="./chain.pem",
strict_permissions=False,
debug_challenges=False,
)
STAGING_URI = "https://acme-staging.api.letsencrypt.org/directory"
# The set of reasons for revoking a certificate is defined in RFC 5280 in
# section 5.3.1. The reasons that users are allowed to submit are restricted to
# those accepted by the ACME server implementation. They are listed in
# `letsencrypt.boulder.revocation.reasons.go`.
REVOCATION_REASONS = {
"unspecified": 0,
"keycompromise": 1,
"affiliationchanged": 3,
"superseded": 4,
"cessationofoperation": 5}
"""Defaults for CLI flags and `.IConfig` attributes."""
QUIET_LOGGING_LEVEL = logging.WARNING
"""Logging level to use in quiet mode."""
RENEWER_DEFAULTS = dict(
renewer_enabled="yes",
renew_before_expiry="30 days",
# This value should ensure that there is never a deployment delay by
# default.
deploy_before_expiry="99 years",
)
"""Defaults for renewer script."""
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
"""List of possible :class:`certbot.interfaces.IInstaller`
enhancements.
List of expected options parameters:
- redirect: None
- http-header: TODO
- ocsp-stapling: certificate chain file path
- spdy: TODO
"""
ARCHIVE_DIR = "archive"
"""Archive directory, relative to `IConfig.config_dir`."""
CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``.IConfig.config_dir`` et al."""
ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""
BACKUP_DIR = "backups"
"""Directory (relative to `IConfig.work_dir`) where backups are kept."""
CSR_DIR = "csr"
"""See `.IConfig.csr_dir`."""
IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
`IConfig.work_dir`)."""
KEY_DIR = "keys"
"""Directory (relative to `IConfig.config_dir`) where keys are saved."""
LIVE_DIR = "live"
"""Live directory, relative to `IConfig.config_dir`."""
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory (relative to `IConfig.work_dir`)."""
RENEWAL_CONFIGS_DIR = "renewal"
"""Renewal configs directory, relative to `IConfig.config_dir`."""
FORCE_INTERACTIVE_FLAG = "--force-interactive"
"""Flag to disable TTY checking in IDisplay."""
EFF_SUBSCRIBE_URI = "https://supporters.eff.org/subscribe/certbot"
"""EFF URI used to submit the e-mail address of users who opt-in."""
| certbot/constants.py | 3,433 | Certbot constants.
http://freedesktop.org/wiki/Software/xdg-user-dirs/ The set of reasons for revoking a certificate is defined in RFC 5280 in section 5.3.1. The reasons that users are allowed to submit are restricted to those accepted by the ACME server implementation. They are listed in `letsencrypt.boulder.revocation.reasons.go`. This value should ensure that there is never a deployment delay by default. | 412 | en | 0.875103 |
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
from pathlib import Path
from subprocess import check_output
from time import sleep
import pytest
import yaml
from selenium import webdriver
from selenium.common.exceptions import JavascriptException, WebDriverException
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
@pytest.mark.abort_on_fail
async def test_build_and_deploy(ops_test):
my_charm = await ops_test.build_charm(".")
image_path = METADATA["resources"]["oci-image"]["upstream-source"]
await ops_test.model.deploy(my_charm, resources={"oci-image": image_path})
charm_name = METADATA["name"]
await ops_test.model.wait_for_idle(
[charm_name],
raise_on_blocked=True,
raise_on_error=True,
timeout=300,
)
assert ops_test.model.applications[charm_name].units[0].workload_status == "waiting"
assert (
ops_test.model.applications[charm_name].units[0].workload_status_message
== "Waiting for kubeflow-profiles relation data"
)
@pytest.mark.abort_on_fail
async def test_add_profile_relation(ops_test):
charm_name = METADATA["name"]
# TODO: Point kubeflow-profiles to latest/stable when Rev 54 or higher are promoted
await ops_test.model.deploy("kubeflow-profiles", channel="latest/edge")
await ops_test.model.add_relation("kubeflow-profiles", charm_name)
await ops_test.model.wait_for_idle(
["kubeflow-profiles", charm_name],
status="active",
raise_on_blocked=True,
raise_on_error=True,
timeout=300,
)
async def test_status(ops_test):
charm_name = METADATA["name"]
assert ops_test.model.applications[charm_name].units[0].workload_status == "active"
def fix_queryselector(elems):
"""Workaround for web components breaking querySelector.
Because someone thought it was a good idea to just yeet the moral equivalent
of iframes everywhere over a single page ๐คฆ
Shadow DOM was a terrible idea and everyone involved should feel professionally
ashamed of themselves. Every problem it tried to solved could and should have
been solved in better ways that don't break the DOM.
"""
selectors = '").shadowRoot.querySelector("'.join(elems)
return 'return document.querySelector("' + selectors + '")'
@pytest.fixture()
async def driver(request, ops_test):
status = yaml.safe_load(
check_output(
["juju", "status", "-m", ops_test.model_full_name, "--format=yaml"]
)
)
endpoint = status["applications"]["kubeflow-dashboard"]["address"]
application = ops_test.model.applications["kubeflow-dashboard"]
config = await application.get_config()
port = config["port"]["value"]
url = f"http://{endpoint}.nip.io:{port}/"
options = Options()
options.headless = True
with webdriver.Firefox(options=options) as driver:
wait = WebDriverWait(driver, 180, 1, (JavascriptException, StopIteration))
for _ in range(60):
try:
driver.get(url)
break
except WebDriverException:
sleep(5)
else:
driver.get(url)
yield driver, wait, url
driver.get_screenshot_as_file(f"/tmp/selenium-{request.node.name}.png")
def test_links(driver):
driver, wait, url = driver
# Ensure that sidebar links are set up properly
links = [
"/jupyter/",
"/katib/",
"/pipeline/#/experiments",
"/pipeline/#/pipelines",
"/pipeline/#/runs",
"/pipeline/#/recurringruns",
# Removed temporarily until https://warthogs.atlassian.net/browse/KF-175 is fixed
# "/pipeline/#/artifacts",
# "/pipeline/#/executions",
"/volumes/",
"/tensorboards/",
]
for link in links:
print("Looking for link: %s" % link)
script = fix_queryselector(["main-page", f"iframe-link[href='{link}']"])
wait.until(lambda x: x.execute_script(script))
# Ensure that quick links are set up properly
links = [
"/pipeline/",
"/pipeline/#/runs",
"/jupyter/new?namespace=kubeflow",
"/katib/",
]
for link in links:
print("Looking for link: %s" % link)
script = fix_queryselector(
[
"main-page",
"dashboard-view",
f"iframe-link[href='{link}']",
]
)
wait.until(lambda x: x.execute_script(script))
# Ensure that doc links are set up properly
links = [
"https://charmed-kubeflow.io/docs/kubeflow-basics",
"https://microk8s.io/docs/addon-kubeflow",
"https://www.kubeflow.org/docs/started/requirements/",
]
for link in links:
print("Looking for link: %s" % link)
script = fix_queryselector(
[
"main-page",
"dashboard-view",
f"a[href='{link}']",
]
)
wait.until(lambda x: x.execute_script(script))
| tests/integration/test_charm.py | 5,177 | Workaround for web components breaking querySelector.
Because someone thought it was a good idea to just yeet the moral equivalent
of iframes everywhere over a single page ๐คฆ
Shadow DOM was a terrible idea and everyone involved should feel professionally
ashamed of themselves. Every problem it tried to solved could and should have
been solved in better ways that don't break the DOM.
Copyright 2021 Canonical Ltd. See LICENSE file for licensing details. TODO: Point kubeflow-profiles to latest/stable when Rev 54 or higher are promoted Ensure that sidebar links are set up properly Removed temporarily until https://warthogs.atlassian.net/browse/KF-175 is fixed "/pipeline//artifacts", "/pipeline//executions", Ensure that quick links are set up properly Ensure that doc links are set up properly | 801 | en | 0.938021 |
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords | lib/abide_utils.py | 11,220 | subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. Output path Number of subjects Selected pipeline Files to fetch Get the root folder Specify file mappings for the possible file types The list to be filled Load subject ID lists Fill list with requested file paths Return N/A if subject ID is not found Load subject ID lists Graph Lasso estimator Graph Lasso estimator all_networks=np.array(all_networks) This is an alternative implementation Get Fisher transformed matrices Get upper diagonal indices Get vectorised matrices Each subject should be a row of the matrix | 4,229 | en | 0.74114 |
from typing import List
import numpy as np
def mask_nan(arrays: List[np.ndarray]) -> List[np.ndarray]:
"""
Drop indices from equal-sized arrays if the element at that index is NaN in
any of the input arrays.
Parameters
----------
arrays : List[np.ndarray]
list of ndarrays containing NaNs, to be masked
Returns
-------
List[np.ndarray]
masked arrays (free of NaNs)
Notes
-----
This function find the indices where one or more elements is NaN in one or
more of the input arrays, then drops those indices from all arrays.
For example:
>> a = np.array([0, 1, np.nan, 3])
>> b = np.array([np.nan, 5, np.nan, 7])
>> c = np.array([8, 9, 10, 11])
>> mask_nan([a, b, c])
[array([ 1., 3.]), array([ 5., 7.]), array([ 9, 11])]
"""
n = arrays[0].size
assert all(a.size == n for a in arrays[1:])
mask = np.array([False] * n)
for arr in arrays:
mask = np.logical_or(mask, np.isnan(arr))
return [arr[np.where(~mask)[0]] for arr in arrays]
| jburt/mask.py | 1,058 | Drop indices from equal-sized arrays if the element at that index is NaN in
any of the input arrays.
Parameters
----------
arrays : List[np.ndarray]
list of ndarrays containing NaNs, to be masked
Returns
-------
List[np.ndarray]
masked arrays (free of NaNs)
Notes
-----
This function find the indices where one or more elements is NaN in one or
more of the input arrays, then drops those indices from all arrays.
For example:
>> a = np.array([0, 1, np.nan, 3])
>> b = np.array([np.nan, 5, np.nan, 7])
>> c = np.array([8, 9, 10, 11])
>> mask_nan([a, b, c])
[array([ 1., 3.]), array([ 5., 7.]), array([ 9, 11])] | 622 | en | 0.483066 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pure Python crypto-related routines for oauth2client.
Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
certificates.
"""
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
import sixm
from oauth2client import _helpers
_PKCS12_ERROR = r"""\
PKCS12 format is not supported by the RSA library.
Either install PyOpenSSL, or please convert .p12 format
to .pem format:
$ cat key.p12 | \
> openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
> openssl rsa > key.pem
"""
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
'-----END RSA PRIVATE KEY-----')
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
'-----END PRIVATE KEY-----')
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1's and 0's to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in sixm.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start:start + 8]
char_val = sum(val * digit
for val, digit in zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RsaVerifier(object):
"""Verifies the signature on a message.
Args:
pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
"""
def __init__(self, pubkey):
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If
string, will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, key_pem, is_x509_cert):
"""Construct an RsaVerifier instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
RsaVerifier instance.
Raises:
ValueError: if the key_pem can't be parsed. In either case, error
will begin with 'No PEM start marker'. If
``is_x509_cert`` is True, will fail to find the
"-----BEGIN CERTIFICATE-----" error, otherwise fails
to find "-----BEGIN RSA PUBLIC KEY-----".
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b'':
raise ValueError('Unused bytes', remaining)
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
else:
pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
return cls(pubkey)
class RsaSigner(object):
"""Signs messages with a private key.
Args:
pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
"""
def __init__(self, pkey):
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return rsa.pkcs1.sign(message, self._key, 'SHA-256')
@classmethod
def from_string(cls, key, password='notasecret'):
"""Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers._from_bytes(key) # pem expects str in Py3
marker_id, key_bytes = pem.readPemBlocksFromFile(
sixm.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
format='DER')
elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
format='DER')
else:
raise ValueError('No key could be detected.')
return cls(pkey)
| test/lib/oauth2client/_pure_python_crypt.py | 6,370 | Signs messages with a private key.
Args:
pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
Verifies the signature on a message.
Args:
pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
Converts an iterable of 1's and 0's to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
Construct an RsaVerifier instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
RsaVerifier instance.
Raises:
ValueError: if the key_pem can't be parsed. In either case, error
will begin with 'No PEM start marker'. If
``is_x509_cert`` is True, will fail to find the
"-----BEGIN CERTIFICATE-----" error, otherwise fails
to find "-----BEGIN RSA PUBLIC KEY-----".
Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If
string, will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
Pure Python crypto-related routines for oauth2client.
Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
certificates.
Copyright 2016 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pem expects str in Py3 | 2,604 | en | 0.797836 |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Coupon, obj[2]: Education, obj[3]: Occupation, obj[4]: Restaurant20to50, obj[5]: Distance
# {"feature": "Passanger", "instances": 34, "metric_value": 0.9774, "depth": 1}
if obj[0]<=1:
# {"feature": "Restaurant20to50", "instances": 22, "metric_value": 0.7732, "depth": 2}
if obj[4]<=3.0:
# {"feature": "Education", "instances": 21, "metric_value": 0.7025, "depth": 3}
if obj[2]<=2:
# {"feature": "Occupation", "instances": 16, "metric_value": 0.8113, "depth": 4}
if obj[3]<=10:
# {"feature": "Coupon", "instances": 13, "metric_value": 0.6194, "depth": 5}
if obj[1]>2:
# {"feature": "Distance", "instances": 7, "metric_value": 0.8631, "depth": 6}
if obj[5]<=2:
return 'False'
elif obj[5]>2:
return 'False'
else: return 'False'
elif obj[1]<=2:
return 'False'
else: return 'False'
elif obj[3]>10:
# {"feature": "Coupon", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[1]<=2:
return 'True'
elif obj[1]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[2]>2:
return 'False'
else: return 'False'
elif obj[4]>3.0:
return 'True'
else: return 'True'
elif obj[0]>1:
# {"feature": "Coupon", "instances": 12, "metric_value": 0.8113, "depth": 2}
if obj[1]>0:
# {"feature": "Occupation", "instances": 11, "metric_value": 0.684, "depth": 3}
if obj[3]<=20:
# {"feature": "Restaurant20to50", "instances": 10, "metric_value": 0.469, "depth": 4}
if obj[4]>1.0:
return 'True'
elif obj[4]<=1.0:
# {"feature": "Education", "instances": 4, "metric_value": 0.8113, "depth": 5}
if obj[2]<=1:
return 'True'
elif obj[2]>1:
# {"feature": "Distance", "instances": 2, "metric_value": 1.0, "depth": 6}
if obj[5]<=2:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>20:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'True'
| duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/6_features/numtrees_30/rule_0.py | 2,105 | obj[0]: Passanger, obj[1]: Coupon, obj[2]: Education, obj[3]: Occupation, obj[4]: Restaurant20to50, obj[5]: Distance {"feature": "Passanger", "instances": 34, "metric_value": 0.9774, "depth": 1} {"feature": "Restaurant20to50", "instances": 22, "metric_value": 0.7732, "depth": 2} {"feature": "Education", "instances": 21, "metric_value": 0.7025, "depth": 3} {"feature": "Occupation", "instances": 16, "metric_value": 0.8113, "depth": 4} {"feature": "Coupon", "instances": 13, "metric_value": 0.6194, "depth": 5} {"feature": "Distance", "instances": 7, "metric_value": 0.8631, "depth": 6} {"feature": "Coupon", "instances": 3, "metric_value": 0.9183, "depth": 5} {"feature": "Coupon", "instances": 12, "metric_value": 0.8113, "depth": 2} {"feature": "Occupation", "instances": 11, "metric_value": 0.684, "depth": 3} {"feature": "Restaurant20to50", "instances": 10, "metric_value": 0.469, "depth": 4} {"feature": "Education", "instances": 4, "metric_value": 0.8113, "depth": 5} {"feature": "Distance", "instances": 2, "metric_value": 1.0, "depth": 6} | 1,048 | en | 0.545905 |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from yolov2_ros.srv import *
import rospy
from copy import deepcopy
from core import YOLO
from vision_msgs.msg import Detection2DArray, Detection2D, BoundingBox2D, ObjectHypothesisWithPose
from geometry_msgs.msg import PoseWithCovariance, Pose2D
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class YoloServer(object):
def __init__(self):
self.bridge = CvBridge()
self.n_gpu = rospy.get_param('~n_gpu', default=1)
self.backend = rospy.get_param('~backend', default='full_yolo') # Either 'tiny_yolo', full_yolo, 'mobile_net, 'squeeze_net', or 'inception3'
self.backend_path = rospy.get_param('~weights_path') # Weights directory
self.input_size = rospy.get_param('~input_size', default=416) # DO NOT change this. 416 is default for YOLO.
self.labels = rospy.get_param('~labels') # Eg: ['trafficcone', 'person', 'dog']
self.max_number_detections = rospy.get_param('~max_number_detections', default=5) # Max number of detections
self.anchors = rospy.get_param('~anchors', default=[0.57273, 0.677385, 1.87446, # The anchors to use. Use the anchor generator and copy these into the config.
2.06253, 3.33843, 5.47434, 7.88282,
3.52778, 9.77052, 9.16828])
self.weights_path = rospy.get_param('~weights_path', default='../weights/full_yolo.h5') # Path to the weights.h5 file
self.weight_file = rospy.get_param('~weight_file')
self.yolo = YOLO(
n_gpu=self.n_gpu,
backend = self.backend,
backend_path=self.backend_path,
input_size = self.input_size,
labels = self.labels,
max_box_per_image = self.max_number_detections,
anchors = self.anchors
)
self.yolo.load_weights(self.weights_path + '/' + self.weight_file)
rospy.loginfo('YOLO detector ready...')
s = rospy.Service('yolo_detect', YoloDetect, self._handle_yolo_detect, buff_size=10000000)
s.spin()
def _handle_yolo_detect(self, req):
cv_image = None
detection_array = Detection2DArray()
detections = []
boxes = None
try:
cv_image = self.bridge.imgmsg_to_cv2(req.image, "bgr8")
except CvBridgeError as e:
rospy.logerr(e)
try:
boxes = self.yolo.predict(cv_image)
except SystemError:
pass
# rospy.loginfo('Found {} boxes'.format(len(boxes)))
for box in boxes:
detection = Detection2D()
results = []
bbox = BoundingBox2D()
center = Pose2D()
detection.header = Header()
detection.header.stamp = rospy.get_rostime()
# detection.source_img = deepcopy(req.image)
labels = box.get_all_labels()
for i in range(0,len(labels)):
object_hypothesis = ObjectHypothesisWithPose()
object_hypothesis.id = i
object_hypothesis.score = labels[i]
results.append(object_hypothesis)
detection.results = results
x, y = box.get_xy_center()
center.x = x
center.y = y
center.theta = 0.0
bbox.center = center
size_x, size_y = box.get_xy_extents()
bbox.size_x = size_x
bbox.size_y = size_y
detection.bbox = bbox
detections.append(detection)
detection_array.header = Header()
detection_array.header.stamp = rospy.get_rostime()
detection_array.detections = detections
return YoloDetectResponse(detection_array)
if __name__ == '__main__':
rospy.init_node('yolo_server')
try:
ys = YoloServer()
except rospy.ROSInterruptException:
pass | scripts/yolo_server.py | 4,391 | !/usr/bin/env python Either 'tiny_yolo', full_yolo, 'mobile_net, 'squeeze_net', or 'inception3' Weights directory DO NOT change this. 416 is default for YOLO. Eg: ['trafficcone', 'person', 'dog'] Max number of detections The anchors to use. Use the anchor generator and copy these into the config. Path to the weights.h5 file rospy.loginfo('Found {} boxes'.format(len(boxes))) detection.source_img = deepcopy(req.image) | 419 | en | 0.495968 |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from test_framework.mininode import *
from test_framework.test_framework import RitoTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.script import CScript, OP_TRUE
# TestNode: A peer we use to send messages to ritod, and store responses.
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, conn, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(RitoTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes ritod to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# ritod's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that ritod requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert("getblocktxn" in peer.last_message)
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert("getblocktxn" not in test_node.last_message)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for ritod to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# ritod will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests, pre-segwit activation:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
self.log.info("Advancing to segwit activation")
self.activate_segwit(self.nodes[1])
self.log.info("Running tests, post-segwit activation...")
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
self.log.info("Testing getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
self.log.info("Syncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| test/functional/p2p_compactblocks.py | 43,952 | Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block.
Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
!/usr/bin/env python3 Copyright (c) 2018 The Bitcoin Core developers Copyright (c) 2017 The Raven Core developers Copyright (c) 2018 The Rito Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. TestNode: A peer we use to send messages to ritod, and store responses. Store the hashes of blocks we've seen announced. This is for synchronizing the p2p message traffic, so we can eg wait until a particular block is announced. Requires caller to hold mininode_lock Block until a block announcement for a particular block hash is received. Node0 = pre-segwit, node1 = segwit-aware Create 10 more anyone-can-spend utxo's for testing. Doesn't matter which node we use, just use node0. Test "sendcmpct" (between peers preferring the same version): - No compact block announcements unless sendcmpct is sent. - If sendcmpct is sent with version > preferred_version, the message is ignored. - If sendcmpct is sent with boolean 0, then block announcements are not made with compact blocks. - If sendcmpct is then sent with boolean 1, then new block announcements are made with compact blocks. If old_node is passed in, request compact blocks with version=preferred-1 and verify that it receives block announcements via compact block. Make sure we get a SENDCMPCT message from our peer Check that the first version received is the preferred one And that we receive versions down to 1. We shouldn't get any block announcements via cmpctblock yet. Try one more time, this time after requesting headers. Test a few ways of using sendcmpct that should NOT result in compact block announcements. Before each test, sync the headers chain. Now try a SENDCMPCT message with too-high version Headers sync before next test. Now try a SENDCMPCT message with valid version, but announce=False Headers sync before next test. Finally, try a SENDCMPCT message with announce=True Try one more time (no headers sync should be needed!) Try one more time, after turning on sendheaders Try one more time, after sending a version-1, announce=false message. Now turn off announcements Verify that a peer using an older protocol version can receive announcements from this node. Header sync This test actually causes ritod to (reasonably!) disconnect us, so do this last. This index will be too high Compare the generated shortids to what we expect based on BIP 152, given ritod's choice of nonce. Generate a bunch of transactions. Want at least one segwit spend, so move all funds to a witness address. check that our test is not broken Wait until we've seen the block announcement for the resulting tip Make sure we will receive a fast-announce compact block Now mine a block, and look at the resulting compact block. Store the raw block in our internal format. Wait until the block was announced (via compact blocks) Now fetch and check the compact block Convert the on-the-wire representation to absolute indexes Now fetch the compact block using a normal non-announce getdata 4 == "CompactBlock" Now fetch and check the compact block Convert the on-the-wire representation to absolute indexes Check that we got the right block! Make sure the prefilled_txn appears to have included the coinbase Check that all prefilled_txn entries match what's in the block. This checks the non-witness parts of the tx agree And this checks the witness Shouldn't have received a witness Check that the cmpctblock message announced all the transactions. And now check that all the shortids are as expected as well. Determine the siphash keys to use. Already checked prefilled transactions above Test that ritod requests compact blocks when we announce new blocks via header or inv, and that responding to getblocktxn causes the block to be successfully reconstructed. Post-segwit: upgraded nodes would only make this request of cb-version-2, NODE_WITNESS peers. Unupgraded nodes would still make this request of any cb-version-1-supporting peer. Try announcing a block with an inv or header, expect a compactblock request Send back a compactblock message that omits the coinbase Expect a getblocktxn message. should be a coinbase request Send the coinbase, and verify that the tip advances. Create a chain of transactions from given utxo, and add to a new block. Test that we only receive getblocktxn requests for transactions that the node needs, and that responding to them causes the block to be reconstructed. First try announcing compactblocks that won't reconstruct, and verify that we receive getblocktxn messages back. serialize with witnesses Now try interspersing the prefilled transactions Now try giving one transaction ahead of time. Prefill 4 out of the 6 transactions, and verify that only the one that was not in the mempool is requested. Now provide all transactions to the node before the block is announced and verify reconstruction happens immediately. Make sure all transactions were accepted. Clear out last request. Send compact block Shouldn't have gotten a request for any transaction Incorrectly responding to a getblocktxn shouldn't cause the block to be permanently failed. Relay the first 5 transactions from the block in advance Make sure all transactions were accepted. Send compact block Now give an incorrect response. Note that it's possible for ritod to be smart enough to know we're lying, since it could check to see if the shortid matches what we're sending, and eg disconnect us for misbehavior. If that behavior change were made, we could just modify this test by having a different peer provide the block further down, so that we're still verifying that the block isn't marked bad permanently. This is good enough for now. Tip should not have updated We should receive a getdata request Deliver the block ritod will not send blocktxn responses for blocks whose height is more than 10 blocks deep. Witnesses should have been stripped Check that the witness matches Next request should send a full block response, as we're past the allowed depth for a blocktxn response. Test that requesting old compactblocks doesn't work. Generate an old compactblock, and verify that it's not accepted. Requesting this block via getblocktxn should silently fail (to avoid fingerprinting attacks). ToHex() won't serialize with witness, but this block has no witnesses anyway. TODO: repeat this test with witness tx's to a segwit node. Test that we don't get disconnected if we relay a compact block with valid header, but invalid transactions. If we're testing with segwit, also drop the coinbase witness, but include the witness commitment. Now send the compact block with all transactions prefilled, and verify that we don't get disconnected. Check that the tip didn't advance Helper for enabling cb announcements Send the sendcmpct request and sync headers Now test that delivering an invalid compact block won't break relay Setup the p2p connections and start up the network thread. version 1 peer <--> segwit node Start up network handling in another thread Test logic begins here We will need UTXOs to construct transactions in later tests. End-to-end block relay tests Advance to segwit activation Need to manually sync node0 and node1, because post-segwit activation, node1 will not download blocks from node0. Test that if we submitblock to node1, we'll get a compact block announcement to all peers. (Post-segwit activation, blocks won't propagate from node0 to node1 automatically, so don't bother testing a block announced to node0.) | 7,828 | en | 0.910607 |
import logging
import io
from homeassistant.core import callback
from homeassistant.components.ais_dom import ais_global
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.components.camera import Camera
from homeassistant.helpers.event import async_track_state_change
from datetime import timedelta
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "qr_code"
SCAN_INTERVAL = timedelta(seconds=2000)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the QRCode image platform."""
add_entities([QRCodeCamera(hass, "remote_access", "remote_access")])
class QRCodeCamera(Camera):
"""Representation of an QRCode image."""
def __init__(self, hass, name, entity_ids):
"""Initialize the QRCode entity."""
super().__init__()
self._hass = hass
self._name = name
self._entities = entity_ids
self._image = io.BytesIO()
self._refresh_()
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def qr_state_listener(entity, old_state, new_state):
"""Handle device state changes."""
self._refresh_()
@callback
def qr_sensor_startup(event):
"""Update template on startup."""
async_track_state_change(self.hass, self._entities, qr_state_listener)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, qr_sensor_startup)
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def should_poll(self):
"""Update the recording state periodically."""
return True
@property
def state(self):
gate_id = ais_global.get_sercure_android_id_dom()
return "https://" + gate_id + ".paczka.pro"
def camera_image(self):
"""Process the image."""
return self._image.getvalue()
def turn_on(self):
"""Turn on camera."""
self._refresh_()
def turn_off(self):
pass
def enable_motion_detection(self):
pass
def disable_motion_detection(self):
pass
def _refresh_(self):
import pyqrcode
import png
gate_id = ais_global.get_sercure_android_id_dom()
_template = "https://" + gate_id + ".paczka.pro"
qr_code = pyqrcode.create(_template)
self._image.truncate(0)
self._image.seek(0)
qr_code.png(
self._image, scale=6, module_color=[0, 0, 0], background=[0xFF, 0xFF, 0xFF]
)
| homeassistant/components/ais_qrcode/camera.py | 2,581 | Representation of an QRCode image.
Initialize the QRCode entity.
Process the image.
Return the name of the image processor.
Update template on startup.
Handle device state changes.
Set up the QRCode image platform.
Update the recording state periodically.
Turn on camera. | 271 | en | 0.724557 |
from datetime import datetime
import logging
import os
import subprocess
import sys
from argparse import Namespace
logging.getLogger("transformers").setLevel(logging.WARNING)
import click
import torch
from luke.utils.model_utils import ModelArchive
from zero.utils.experiment_logger import commet_logger_args, CometLogger, NullLogger
LOG_FORMAT = "[%(asctime)s] [%(levelname)s] %(message)s (%(funcName)s@%(filename)s:%(lineno)s)"
try:
import absl.logging
# https://github.com/tensorflow/tensorflow/issues/27045#issuecomment-519642980
logging.getLogger().removeHandler(absl.logging._absl_handler)
absl.logging._warn_preinit_stderr = False
except ImportError:
pass
logger = logging.getLogger(__name__)
@click.group()
@click.option(
"--output-dir", default="models", type=click.Path()
)
@click.option("--num-gpus", default=1)
@click.option("--experiment-logger", "--logger", type=click.Choice(["comet"]))
@click.option("--master-port", default=29500)
@click.option("--local-rank", "--local_rank", default=-1)
@click.option("--model-file", type=click.Path(exists=True))
@click.option("--device-id", type=int)
@commet_logger_args
@click.pass_context
def cli(ctx, **kwargs):
args = Namespace(**kwargs)
if args.local_rank == -1 and args.num_gpus > 1:
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = "127.0.0.1"
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(args.num_gpus)
processes = []
for args.local_rank in range(0, args.num_gpus):
current_env["RANK"] = str(args.local_rank)
current_env["LOCAL_RANK"] = str(args.local_rank)
cmd = [sys.executable, "-u", "-m", "examples.cli", "--local-rank={}".format(args.local_rank)]
cmd.extend(sys.argv[1:])
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
sys.exit(0)
else:
if args.local_rank not in (-1, 0):
logging.basicConfig(format=LOG_FORMAT, level=logging.WARNING)
else:
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Output dir: %s", args.output_dir)
# NOTE: ctx.obj is documented here: http://click.palletsprojects.com/en/7.x/api/#click.Context.obj
ctx.obj = dict(local_rank=args.local_rank, output_dir=args.output_dir)
if args.num_gpus == 0:
ctx.obj["device"] = torch.device("cpu")
elif args.local_rank == -1:
ctx.obj["device"] = torch.device("cuda:{}".format(args.device_id))
else:
torch.cuda.set_device(args.local_rank)
ctx.obj["device"] = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
experiment_logger = NullLogger()
if args.local_rank in (-1, 0) and args.experiment_logger == "comet":
experiment_logger = CometLogger(args)
experiment_logger.log_parameters({p.name: getattr(args, p.name) for p in cli.params})
ctx.obj["experiment"] = experiment_logger
if args.model_file:
model_archive = ModelArchive.load(args.model_file)
ctx.obj["tokenizer"] = model_archive.tokenizer
ctx.obj["entity_vocab"] = model_archive.entity_vocab
ctx.obj["bert_model_name"] = model_archive.bert_model_name
ctx.obj["model_config"] = model_archive.config
ctx.obj["max_mention_length"] = model_archive.max_mention_length
ctx.obj["model_weights"] = model_archive.state_dict
experiment_logger.log_parameter("model_file_name", os.path.basename(args.model_file))
from zero.ner.main import cli as ner_cli
cli.add_command(ner_cli)
if __name__ == "__main__":
cli() | zero/cli.py | 4,131 | https://github.com/tensorflow/tensorflow/issues/27045issuecomment-519642980 NOTE: ctx.obj is documented here: http://click.palletsprojects.com/en/7.x/api/click.Context.obj | 171 | en | 0.548465 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0026_event_image'),
]
operations = [
migrations.CreateModel(
name='EventParticipation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ranking', models.PositiveIntegerField(null=True, verbose_name='Ranking', blank=True)),
('song_ranking', models.PositiveIntegerField(null=True, verbose_name='Song Ranking', blank=True)),
('points', models.PositiveIntegerField(null=True, verbose_name='Points', blank=True)),
('account', models.ForeignKey(related_name='events', to='api.Account')),
('event', models.ForeignKey(related_name='participations', to='api.Event')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='eventparticipation',
unique_together=set([('event', 'account')]),
),
migrations.AlterField(
model_name='card',
name='event',
field=models.ForeignKey(related_name='cards', blank=True, to='api.Event', null=True),
preserve_default=True,
),
]
| api/migrations/0027_auto_20150227_2321.py | 1,427 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# import the definition of the steps and input files:
from Configuration.PyReleaseValidation.relval_steps import *
# here only define the workflows as a combination of the steps defined above:
workflows = Matrix()
# each workflow defines a name and a list of steps to be done.
# if no explicit name/label given for the workflow (first arg),
# the name of step1 will be used
from Configuration.PyReleaseValidation.relval_upgrade import workflows as _upgrade_workflows
#just define all of them
#WFs to run in IB:
# 2017 (ele guns 10, 35, 1000; pho guns 10, 35; mu guns 1, 10, 100, 1000, QCD 3TeV, QCD Flat)
# (ZMM, TTbar, ZEE, MinBias, TTbar PU, ZEE PU, TTbar design)
# (TTbar trackingOnly, trackingRun2, trackingOnlyRun2, trackingLowPU, pixelTrackingOnly)
# 2018 (ele guns 10, 35, 1000; pho guns 10, 35; mu guns 1, 10, 100, 1000, QCD 3TeV, QCD Flat)
# 2018 (ZMM, TTbar, ZEE, MinBias, TTbar PU, ZEE PU, TTbar design)
# (TTbar trackingOnly, pixelTrackingOnly)
# (HE collapse: TTbar, TTbar PU, TTbar design)
# (ParkingBPH: TTbar)
# (Patatrack pixel-only: ZMM - on CPU, on GPU, both, auto)
# (Patatrack pixel-only: TTbar - on CPU, on GPU, both, auto)
# (Patatrack ECAL-only: TTbar - on CPU, on GPU, both, auto)
# (Patatrack HCAL-only: TTbar - on CPU, on GPU, both, auto)
# 2021 (ZMM, TTbar, ZEE, MinBias, TTbar PU, TTbar PU premix, ZEE PU, TTbar design)
# (TTbar trackingMkFit)
# (Patatrack pixel-only: ZMM - on CPU, on GPU, both, auto)
# (Patatrack pixel-only: TTbar - on CPU, on GPU, both, auto)
# (Patatrack ECAL-only: TTbar - on CPU, on GPU, both, auto)
# (Patatrack HCAL-only: TTbar - on CPU, on GPU, both, auto)
# 2023 (TTbar, TTbar PU, TTbar PU premix)
# 2024 (TTbar, TTbar PU, TTbar PU premix)
numWFIB = [10001.0,10002.0,10003.0,10004.0,10005.0,10006.0,10007.0,10008.0,10009.0,10059.0,10071.0,
10042.0,10024.0,10025.0,10026.0,10023.0,10224.0,10225.0,10424.0,
10024.1,10024.2,10024.3,10024.4,10024.5,
10801.0,10802.0,10803.0,10804.0,10805.0,10806.0,10807.0,10808.0,10809.0,10859.0,10871.0,
10842.0,10824.0,10825.0,10826.0,10823.0,11024.0,11025.0,11224.0,
10824.1,10824.5,
10824.6,11024.6,11224.6,
10824.8,
10842.501,10842.502, # 10842.503,10842.504,
10824.501,10824.502, # 10824.503,10824.504,
# 10824.511,10824.512,10824.513,10824.514,
# 10824.521,10824.522,10824.523,10824.524,
11650.0,11634.0,11646.0,11640.0,11834.0,11834.99,11846.0,12024.0,
11634.7,
11650.501,11650.502, # 11650.503,11650.504,
11634.501,11634.502, # 11634.503,11634.504,
# 11634.511,11634.512,11634.513,11634.514,
# 11634.521,11634.522,11634.523,11634.524,
12434.0,12634.0,12634.99,
12834.0,13034.0,13034.99]
for numWF in numWFIB:
if not numWF in _upgrade_workflows: continue
workflows[numWF] = _upgrade_workflows[numWF]
| Configuration/PyReleaseValidation/python/relval_2017.py | 3,030 | import the definition of the steps and input files: here only define the workflows as a combination of the steps defined above: each workflow defines a name and a list of steps to be done. if no explicit name/label given for the workflow (first arg), the name of step1 will be usedjust define all of themWFs to run in IB: 2017 (ele guns 10, 35, 1000; pho guns 10, 35; mu guns 1, 10, 100, 1000, QCD 3TeV, QCD Flat) (ZMM, TTbar, ZEE, MinBias, TTbar PU, ZEE PU, TTbar design) (TTbar trackingOnly, trackingRun2, trackingOnlyRun2, trackingLowPU, pixelTrackingOnly) 2018 (ele guns 10, 35, 1000; pho guns 10, 35; mu guns 1, 10, 100, 1000, QCD 3TeV, QCD Flat) 2018 (ZMM, TTbar, ZEE, MinBias, TTbar PU, ZEE PU, TTbar design) (TTbar trackingOnly, pixelTrackingOnly) (HE collapse: TTbar, TTbar PU, TTbar design) (ParkingBPH: TTbar) (Patatrack pixel-only: ZMM - on CPU, on GPU, both, auto) (Patatrack pixel-only: TTbar - on CPU, on GPU, both, auto) (Patatrack ECAL-only: TTbar - on CPU, on GPU, both, auto) (Patatrack HCAL-only: TTbar - on CPU, on GPU, both, auto) 2021 (ZMM, TTbar, ZEE, MinBias, TTbar PU, TTbar PU premix, ZEE PU, TTbar design) (TTbar trackingMkFit) (Patatrack pixel-only: ZMM - on CPU, on GPU, both, auto) (Patatrack pixel-only: TTbar - on CPU, on GPU, both, auto) (Patatrack ECAL-only: TTbar - on CPU, on GPU, both, auto) (Patatrack HCAL-only: TTbar - on CPU, on GPU, both, auto) 2023 (TTbar, TTbar PU, TTbar PU premix) 2024 (TTbar, TTbar PU, TTbar PU premix) 10842.503,10842.504, 10824.503,10824.504, 10824.511,10824.512,10824.513,10824.514, 10824.521,10824.522,10824.523,10824.524, 11650.503,11650.504, 11634.503,11634.504, 11634.511,11634.512,11634.513,11634.514, 11634.521,11634.522,11634.523,11634.524, | 1,826 | en | 0.54541 |
import MPS_class as MPS
import MPO_class as MPO
from ncon import ncon
import numpy as np
from scipy.linalg import expm
#%%
def TEBD_evo(MPS_,Lx,Ly,J=1,epsilon=0.1,etrunc=0,chiMAX=256,chiMAXswap=256,info=True):
L = Lx*Ly
config = np.arange(0,L).reshape(Lx,Ly)
theta = (np.pi+2*epsilon)
flip_op = np.eye(2)*np.cos(theta/2) - 1j*np.sin(theta/2)*np.array([[0,1],[1,0]])
sigma_z = np.array([[1,0],[0,-1]])
Uprop = expm(-1j*np.kron(sigma_z,-J*sigma_z)).reshape(2,2,2,2)
nn_list_forward = [[] for x in range(L)]
for x in range(L):
i,j = np.where(config == x)
if j != Ly-1: nn_list_forward[x].append( config[i,j+1])
if i != Lx-1: nn_list_forward[x].append( config[i+1,j])
nn_list_forward[x] = np.array(nn_list_forward[x]).ravel()
nn_list_backward = [[] for x in range(L)]
for x in reversed(range(L)):
i,j = np.where(config == x)
if j != 0: nn_list_backward[x].append( config[i,j-1])
if i != 0: nn_list_backward[x].append( config[i-1,j])
nn_list_backward[x] = np.array(nn_list_backward[x]).ravel()
for x in range(L):
for nn in nn_list_forward[x]:
# If they are nearest neighbours
if nn == x+1:
shpM1,shpM2 = MPS_.M[x].shape, MPS_.M[nn].shape
Theta = ncon([MPS_.M[x],MPS_.M[nn],Uprop],[[-1,2,1],[1,3,-4],[2,3,-2,-3]])
Theta = Theta.reshape(shpM1[0]*shpM1[1], shpM2[1]*shpM2[2])
U,S,V = MPS.svdtruncate(Theta, etrunc, chiMAX,info=info)
MPS_.M[x] = U.reshape(shpM1[0],shpM1[1],S.size)
MPS_.M[nn] = (np.diag(S)@V).reshape(S.size,shpM2[1],shpM2[2])
else:
for index in range(x,nn-1):
MPS_.swap(index,chiMAX=chiMAXswap,info=info)
shpM1,shpM2 = MPS_.M[nn-1].shape, MPS_.M[nn].shape
Theta = ncon([MPS_.M[nn-1],MPS_.M[nn],Uprop],[[-1,2,1],[1,3,-4],[2,3,-2,-3]])
Theta = Theta.reshape(shpM1[0]*shpM1[1], shpM2[1]*shpM2[2])
U,S,V = MPS.svdtruncate(Theta, etrunc, chiMAX,info=info)
MPS_.M[nn-1] = (U@np.diag(S)).reshape(shpM1[0],shpM1[1],S.size)
MPS_.M[nn] = V.reshape(S.size,shpM2[1],shpM2[2])
for index in reversed(range(x,nn-1)):
MPS_.swap(index,chiMAX=chiMAXswap,info=info)
MPS_.M[x] = ncon([MPS_.M[x],flip_op],[[-1,1,-3],[1,-2]])
for x in reversed(range(L)):
for nn in nn_list_backward[x]:
# If they are nearest neighbours
if nn == x-1:
shpM1,shpM2 = MPS_.M[nn].shape, MPS_.M[x].shape
Theta = ncon([MPS_.M[nn],MPS_.M[x],Uprop],[[-1,2,1],[1,3,-4],[2,3,-2,-3]])
Theta = Theta.reshape(shpM1[0]*shpM1[1], shpM2[1]*shpM2[2])
U,S,V = MPS.svdtruncate(Theta, etrunc, chiMAX,info=info)
MPS_.M[nn] = (U@np.diag(S)).reshape(shpM1[0],shpM1[1],S.size)
MPS_.M[x] = (V).reshape(S.size,shpM2[1],shpM2[2])
else:
for index in range(x-1,nn,-1):
MPS_.swap(index,chiMAX=chiMAXswap,center='i',info=info)
shpM1,shpM2 = MPS_.M[nn].shape, MPS_.M[nn+1].shape
Theta = ncon([MPS_.M[nn],MPS_.M[nn+1],Uprop],[[-1,2,1],[1,3,-4],[2,3,-2,-3]])
Theta = Theta.reshape(shpM1[0]*shpM1[1], shpM2[1]*shpM2[2])
U,S,V = MPS.svdtruncate(Theta, etrunc, chiMAX,info=info)
MPS_.M[nn] = U.reshape(shpM1[0],shpM1[1],S.size)
MPS_.M[nn+1] = (np.diag(S)@V).reshape(S.size,shpM2[1],shpM2[2])
for index in reversed(range(x-1,nn,-1)):
MPS_.swap(index,chiMAX=chiMAXswap,center='i',info=info)
MPS_.M[x] = ncon([MPS_.M[x],flip_op],[[-1,1,-3],[1,-2]])
Lx = 5
Ly = Lx
L = Lx*Ly
psi_state = MPS.getAllUp(L)
mag = []
err = 0.
info = True
mag.append(MPO.return_LocalMz(psi_state).real.reshape(Lx,Ly))
for k in range(20):
print('k',k,np.max(mag[k]-mag[k].T))
for x in psi_state.M:
print(x.shape)
TEBD_evo(psi_state, Lx, Ly,J = -1j,chiMAX=256,chiMAXswap=256,etrunc=1e-12,info=info)
mag.append(MPO.return_LocalMz(psi_state).real.reshape(Lx,Ly))
mag = np.array(mag)
#%%
from scipy.sparse.linalg import expm_multiply
import scipy.sparse as sps
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
import seaborn as sns
def nested_tensor(lst):
if len(lst) == 2:
return sps.kron(lst[0],lst[1],format='csc')
else:
return sps.kron(lst[0], nested_tensor(lst[1:]),format='csc')
def spin_op_construct(sigma, j, L):
before = [sps.eye(2) for _ in range(j)]
mid = [sigma]
after = [sps.eye(2) for _ in range(j+1,L)]
return nested_tensor(before+mid+after)
def int_spin_op_construct(sigma1,sigma2,i1,i2,L):
if i2 < i1:
i1,i2 = i2,i1
before1 = [sps.eye(2) for _ in range(i1)]
mid1 = [sigma1]
after1 = [sps.eye(2) for _ in range(i1+1,i2)]
mid2 = [sigma2]
after2 = [sps.eye(2) for _ in range(i2+1,L)]
return nested_tensor(before1+mid1+after1+mid2+after2)
def H1(L, epsilon):
sigma_x = sps.csc_matrix(np.array([[0,1],[1,0]]))
op1 = 0
for i in range(L):
op1 += spin_op_construct(-sigma_x*(np.pi/2+epsilon),i,L)
return op1
def H2(Lx, Ly, J=1):
L = Lx*Ly
sigma_z = sps.csc_matrix(np.array([[1,0],[0,-1]]))
op2 = 0
config = np.arange(L).reshape(Lx,Ly)
for i in range(Lx):
for j in range(Ly):
nn = []
if i != Lx-1:
nn.append(config[i+1,j])
if j != Ly-1:
nn.append(config[i,j+1])
op = 0
for x in nn:
op += int_spin_op_construct(-J*sigma_z,sigma_z,config[i,j],x,L)
op2 += op
return op2
def H2_pbc(Lx, Ly, J=1):
L = Lx*Ly
sigma_z = sps.csc_matrix(np.array([[1,0],[0,-1]]))
op2 = 0
config = np.arange(L).reshape(Lx,Ly)
for i in range(Lx):
for j in range(Ly):
nn = []
nn.append(config[(i+1)%(Lx-1),j])
nn.append(config[i,(j+1)%(Ly-1)])
op = 0
for x in nn:
op += int_spin_op_construct(-J*sigma_z,sigma_z,config[i,j],x,L)
op2 += op
return op2
def H2_pbc1D(L, J=1):
sigma_z = sps.csc_matrix(np.array([[1,0],[0,-1]]))
op2 = 0
for i in range(L):
op2 += int_spin_op_construct(-J*sigma_z,sigma_z,i,(i+1)%L,L)
return op2
def H2_pbc1D_var(L, J=1):
sigma_z = sps.csc_matrix(np.array([[1,0],[0,-1]]))
op2 = 0
for i in range(1,L-1):
op2 += int_spin_op_construct(-J*sigma_z,sigma_z,i,(i+1),L)
op2 += spin_op_construct(-J*0.5*np.eye(2), L-1, L)
op2 += spin_op_construct(-J*0.5*np.eye(2), 0, L)
return op2
Lx = 4;
Ly = Lx;
L = Lx*Ly;
D = 2**L
en = []
mz_config = np.zeros(D)
for i,state in enumerate(np.vectorize(np.binary_repr)(np.arange(2**L),L)):
mz_config[i] = (L-2*state.count('1'))/L
Hdouble_1d = H2_pbc1D(L)
Hdouble_1dv = H2_pbc1D_var(L)
epsilon = 0.1
Hsingle = H1(L,epsilon)
psi0 = np.zeros(D)
psi0[0] = 1
psi1dv = [psi0]
psi1d = [psi0]
for n in range(200):
print(n,' ',end='')
psi1dv.append(expm_multiply(-1j*Hsingle,expm_multiply(-1j*Hdouble_1dv,psi1dv[-1])))
psi1d.append(expm_multiply(-1j*Hsingle,expm_multiply(-1j*Hdouble_1d,psi1d[-1])))
#%%
psi1dv = np.array(psi1dv)
psi1d = np.array(psi1d)
mag_ED = np.abs(psi1dv)**2@mz_config.reshape(D,1)
mag_ED = mag_ED.reshape(mag_ED.size)
mag_ED1d = np.abs(psi1d)**2@mz_config.reshape(D,1)
mag_ED1d = mag_ED1d.reshape(mag_ED1d.size)
plt.plot(np.abs(mag_ED))
plt.plot(np.abs(mag_ED1d))
#%%
L1d = np.zeros(psi1d.shape[0])
L2d = np.zeros(psi1d.shape[0])
for x in range(psi1d.shape[0]):
if x%2 == 0: k = 0
else: k=-1
L1d[x] = np.abs(psi1d[x,k])**2
L2d[x] = np.abs(psi[x,k])**2
#%%
K = (sps.eye(2**L)-1j*Hsingle)
U = (sps.eye(2**L)-1j*Hdouble)
psi_1 = []
psi_1.append(psi0)
for n in range(100):
print(n,' ',end='')
psi_1.append(K.dot(U.dot(psi_1[-1])))
psi_1[-1] /= np.linalg.norm(psi_1[-1])
psi_1 = np.array(psi_1)
mag_ED1 = np.abs(psi_1)**2@mz_config.reshape(D,1)
plt.plot(mag_ED)
plt.plot(mag_ED1) | tebd_floquet.py | 8,295 | %% If they are nearest neighbours If they are nearest neighbours%%%%%%%% | 72 | en | 0.982266 |
"""
link: https://leetcode-cn.com/problems/smallest-rectangle-enclosing-black-pixels
problem: ็ปๅฎ 0, 1 ็ฉ้ต๏ผไปฅๅไธไธช็ฉ้ตไธญไธบ 1 ็็นๅๆ ๏ผๆฑๅ
ๅซ็ฉ้ตไธญๆๆ็1็ๆๅฐ็ฉๅฝข้ข็งฏ
solution: ๆดๆใๅฟฝ็ฅๅๆ ๏ผ็ดๆฅ้ๅๆๆ่็น๏ผๆพๅฐไธไธๅทฆๅณๅไธช่พน็็น๏ผๆถ้ดO(nm)ใ
solution-fix: ไบๅใๅฐx่ฝดๆๅฝฑๅฐy่ฝด๏ผy่ฝดๆๅฝฑๅฐx่ฝด๏ผๅฝขๆไธคไธชไธ็ปดๆฐ็ปใๆพ็ถๆฐ็ปๅฝขๅฆไธๅพใ่ x, y ๅๆ ไธบ็๏ผไธคไพงๅไธบ้ไธฅๆ ผ้ๅขๅ้ๅ
1: +------+
0: -----+ +-----
ๅๆฌกไบๅๆพๅฐ้ๅข้ๅ่พน็๏ผๆถ้ดๅคๆๅบฆ O(nlogn*mlogm)
"""
class Solution:
def minArea(self, image: List[List[str]], x: int, y: int) -> int:
if not image or not image[0]:
return 0
n, m = len(image), len(image[0])
a, b, c, d = n, m, 0, 0
for i in range(n):
for j in range(m):
if image[i][j] == '1':
a = min(a, i)
b = min(b, j)
c = max(c, i)
d = max(d, j)
return (c + 1 - a) * (d + 1 - b)
# ---
class Solution:
def minArea(self, image: List[List[str]], x: int, y: int) -> int:
if not image or not image[0]:
return 0
n, m = len(image), len(image[0])
def search_column(l: int, r: int, up: bool) -> int:
k = r if up else l
while l <= r:
mid, mid_k = (l + r) >> 1, 0
for i in range(m):
if image[mid][i] == '1':
mid_k = 1
break
if mid_k:
k = min(k, mid) if up else max(k, mid)
if mid_k ^ up:
l = mid + 1
else:
r = mid - 1
return k
def search_row(l: int, r: int, up: bool) -> int:
k = r if up else l
while l <= r:
mid, mid_k = (l + r) >> 1, 0
for i in range(n):
if image[i][mid] == '1':
mid_k = 1
break
if mid_k:
k = min(k, mid) if up else max(k, mid)
if mid_k ^ up:
l = mid + 1
else:
r = mid - 1
return k
a = search_column(0, x, True)
b = search_row(0, y, True)
c = search_column(x, n - 1, False)
d = search_row(y, m - 1, False)
return (c + 1 - a) * (d + 1 - b)
| leetcode/302.py | 2,601 | link: https://leetcode-cn.com/problems/smallest-rectangle-enclosing-black-pixels
problem: ็ปๅฎ 0, 1 ็ฉ้ต๏ผไปฅๅไธไธช็ฉ้ตไธญไธบ 1 ็็นๅๆ ๏ผๆฑๅ
ๅซ็ฉ้ตไธญๆๆ็1็ๆๅฐ็ฉๅฝข้ข็งฏ
solution: ๆดๆใๅฟฝ็ฅๅๆ ๏ผ็ดๆฅ้ๅๆๆ่็น๏ผๆพๅฐไธไธๅทฆๅณๅไธช่พน็็น๏ผๆถ้ดO(nm)ใ
solution-fix: ไบๅใๅฐx่ฝดๆๅฝฑๅฐy่ฝด๏ผy่ฝดๆๅฝฑๅฐx่ฝด๏ผๅฝขๆไธคไธชไธ็ปดๆฐ็ปใๆพ็ถๆฐ็ปๅฝขๅฆไธๅพใ่ x, y ๅๆ ไธบ็๏ผไธคไพงๅไธบ้ไธฅๆ ผ้ๅขๅ้ๅ
1: +------+
0: -----+ +-----
ๅๆฌกไบๅๆพๅฐ้ๅข้ๅ่พน็๏ผๆถ้ดๅคๆๅบฆ O(nlogn*mlogm)
--- | 383 | zh | 0.722328 |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
import config
import command
LEADER = 1
ROUTER1 = 2
DUT_ROUTER2 = 3
ROUTER3 = 4
MED1 = 5
MED1_TIMEOUT = 3
class Cert_5_3_3_AddressQuery(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,6):
self.nodes[i] = node.Node(i, (i == MED1), simulator=self.simulator)
self.nodes[LEADER].set_panid()
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[DUT_ROUTER2].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid()
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[DUT_ROUTER2].set_panid()
self.nodes[DUT_ROUTER2].set_mode('rsdn')
self.nodes[DUT_ROUTER2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[DUT_ROUTER2].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[DUT_ROUTER2].add_whitelist(self.nodes[MED1].get_addr64())
self.nodes[DUT_ROUTER2].enable_whitelist()
self.nodes[DUT_ROUTER2].set_router_selection_jitter(1)
self.nodes[ROUTER3].set_panid()
self.nodes[ROUTER3].set_mode('rsdn')
self.nodes[ROUTER3].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER3].add_whitelist(self.nodes[DUT_ROUTER2].get_addr64())
self.nodes[ROUTER3].enable_whitelist()
self.nodes[ROUTER3].set_router_selection_jitter(1)
self.nodes[MED1].set_panid()
self.nodes[MED1].set_mode('rsn')
self.nodes[MED1].add_whitelist(self.nodes[DUT_ROUTER2].get_addr64())
self.nodes[MED1].set_timeout(MED1_TIMEOUT)
self.nodes[MED1].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
del self.simulator
def test(self):
# 1
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.nodes[DUT_ROUTER2].start()
self.nodes[ROUTER3].start()
self.nodes[MED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.assertEqual(self.nodes[DUT_ROUTER2].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER3].get_state(), 'router')
self.assertEqual(self.nodes[MED1].get_state(), 'child')
# 2
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
router3_mleid = self.nodes[ROUTER3].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[MED1].ping(router3_mleid))
# Verify DUT_ROUTER2 sent an Address Query Request to the Realm local address.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/aq')
command.check_address_query(msg, self.nodes[DUT_ROUTER2], config.REALM_LOCAL_ALL_ROUTERS_ADDRESS)
# 3
# Wait the finish of address resolution traffic triggerred by previous ping.
self.simulator.go(5)
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
med1_mleid = self.nodes[MED1].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[ROUTER1].ping(med1_mleid))
# Verify DUT_ROUTER2 responded with an Address Notification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/an')
command.check_address_notification(msg, self.nodes[DUT_ROUTER2], self.nodes[ROUTER1])
# 4
# Wait the finish of address resolution traffic triggerred by previous ping.
self.simulator.go(5)
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
self.assertTrue(self.nodes[MED1].ping(router3_mleid))
# Verify DUT_ROUTER2 didn't send an Address Query Request.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/aq', False)
assert msg is None, "The Address Query Request is not expected."
# 5
self.nodes[ROUTER3].stop()
# Wait for the Leader to expire its Router ID.
# MAX_NEIGHBOR_AGE + INFINITE_COST_TIMEOUT + ID_REUSE_DELAY + propagation time + transmission time ~ 580s.
self.simulator.go(580)
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
self.assertFalse(self.nodes[MED1].ping(router3_mleid))
# Verify DUT_ROUTER2 sent an Address Query Request to the Realm local address.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/aq')
command.check_address_query(msg, self.nodes[DUT_ROUTER2], config.REALM_LOCAL_ALL_ROUTERS_ADDRESS)
# 6
self.nodes[MED1].stop()
self.simulator.go(MED1_TIMEOUT)
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
self.assertFalse(self.nodes[ROUTER1].ping(med1_mleid))
self.assertFalse(self.nodes[ROUTER1].ping(med1_mleid))
# Verify DUT_ROUTER2 didn't respond with an Address Notification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/an', False)
assert msg is None, "The Address Notification is not expected."
if __name__ == '__main__':
unittest.main()
| tests/scripts/thread-cert/Cert_5_3_03_AddressQuery.py | 7,962 | !/usr/bin/env python Copyright (c) 2016, The OpenThread Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 1 2 Flush the message queue to avoid possible impact on follow-up verification. Verify DUT_ROUTER2 sent an Address Query Request to the Realm local address. 3 Wait the finish of address resolution traffic triggerred by previous ping. Flush the message queue to avoid possible impact on follow-up verification. Verify DUT_ROUTER2 responded with an Address Notification. 4 Wait the finish of address resolution traffic triggerred by previous ping. Flush the message queue to avoid possible impact on follow-up verification. Verify DUT_ROUTER2 didn't send an Address Query Request. 5 Wait for the Leader to expire its Router ID. MAX_NEIGHBOR_AGE + INFINITE_COST_TIMEOUT + ID_REUSE_DELAY + propagation time + transmission time ~ 580s. Flush the message queue to avoid possible impact on follow-up verification. Verify DUT_ROUTER2 sent an Address Query Request to the Realm local address. 6 Flush the message queue to avoid possible impact on follow-up verification. Verify DUT_ROUTER2 didn't respond with an Address Notification. | 2,573 | en | 0.87048 |
import re
from haystack.inputs import Exact, Clean, BaseInput
from api.helpers.parse_helper import has_balanced_parentheses, matched_parens
class ElasticSearchExtendedAutoQuery(BaseInput):
"""
A convenience class that handles common user queries.
In addition to cleaning all tokens, it handles double quote bits as
exact matches & terms with '-' in front as NOT queries.
"""
input_type_name = 'auto_query'
post_process = False
exact_match_re = re.compile(r'"(?P<phrase>.*?)"')
uncleaned_tokens = [
'OR',
'AND',
'NOT',
'TO',
]
to_be_removed_special_chars_translation_table = {ord(c): None for c in matched_parens}
def prepare(self, query_obj):
query_string = super(ElasticSearchExtendedAutoQuery, self).prepare(query_obj)
# Remove parens if they are not balanced
if not has_balanced_parentheses(query_string):
query_string = query_string.translate(self.to_be_removed_special_chars_translation_table)
exacts = self.exact_match_re.findall(query_string)
tokens = []
query_bits = []
for rough_token in self.exact_match_re.split(query_string):
if not rough_token:
continue
elif rough_token not in exacts:
# We have something that's not an exact match but may have more
# than on word in it.
tokens.extend(rough_token.split(' '))
else:
tokens.append(rough_token)
for token in tokens:
if not token:
continue
if token in exacts:
query_bits.append(Exact(token, clean=True).prepare(query_obj))
elif token in self.uncleaned_tokens:
query_bits.append(token)
else:
query_bits.append(Clean(token).prepare(query_obj))
return u' '.join(query_bits)
| compass-api/G4SE/api/helpers/input.py | 1,932 | A convenience class that handles common user queries.
In addition to cleaning all tokens, it handles double quote bits as
exact matches & terms with '-' in front as NOT queries.
Remove parens if they are not balanced We have something that's not an exact match but may have more than on word in it. | 301 | en | 0.96914 |
"""Mark channels in an existing BIDS dataset as "bad".
example usage:
$ mne_bids mark_bad_channels --ch_name="MEG 0112" --description="noisy" \
--ch_name="MEG 0131" --description="flat" \
--subject_id=01 --task=experiment --session=test \
--bids_root=bids_root --overwrite
"""
# Authors: Richard Hรถchenberger <richard.hoechenberger@gmail.com>
#
# License: BSD-3-Clause
from mne.utils import logger
import mne_bids
from mne_bids.config import reader
from mne_bids import BIDSPath, mark_bad_channels
def run():
"""Run the mark_bad_channels command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--ch_name', dest='ch_names', action='append',
default=[],
help='The names of the bad channels. If multiple '
'channels are bad, pass the --ch_name parameter '
'multiple times.')
parser.add_option('--description', dest='descriptions', action='append',
default=[],
help='Descriptions as to why the channels are bad. '
'Must match the number of bad channels provided. '
'Pass multiple times to supply more than one '
'value in that case.')
parser.add_option('--bids_root', dest='bids_root',
help='The path of the folder containing the BIDS '
'dataset')
parser.add_option('--subject_id', dest='subject',
help=('Subject name'))
parser.add_option('--session_id', dest='session',
help='Session name')
parser.add_option('--task', dest='task',
help='Task name')
parser.add_option('--acq', dest='acquisition',
help='Acquisition parameter')
parser.add_option('--run', dest='run',
help='Run number')
parser.add_option('--proc', dest='processing',
help='Processing label.')
parser.add_option('--rec', dest='recording',
help='Recording name')
parser.add_option('--type', dest='datatype',
help='Recording data type, e.g. meg, ieeg or eeg')
parser.add_option('--suffix', dest='suffix',
help='The filename suffix, i.e. the last part before '
'the extension')
parser.add_option('--ext', dest='extension',
help='The filename extension, including the leading '
'period, e.g. .fif')
parser.add_option('--overwrite', dest='overwrite', action='store_true',
help='Replace existing channel status entries')
parser.add_option('--verbose', dest='verbose', action='store_true',
help='Whether do generate additional diagnostic output')
opt, args = parser.parse_args()
if args:
parser.print_help()
parser.error(f'Please do not specify arguments without flags. '
f'Got: {args}.\n')
if opt.bids_root is None:
parser.print_help()
parser.error('You must specify bids_root')
if opt.ch_names is None:
parser.print_help()
parser.error('You must specify some --ch_name parameters.')
ch_names = [] if opt.ch_names == [''] else opt.ch_names
bids_path = BIDSPath(subject=opt.subject, session=opt.session,
task=opt.task, acquisition=opt.acquisition,
run=opt.run, processing=opt.processing,
recording=opt.recording, datatype=opt.datatype,
suffix=opt.suffix, extension=opt.extension,
root=opt.bids_root)
bids_paths = bids_path.match()
# Only keep data we can actually read & write.
allowed_extensions = list(reader.keys())
bids_paths = [p for p in bids_paths
if p.extension in allowed_extensions]
if not bids_paths:
logger.info('No matching files found. Please consider using a less '
'restrictive set of entities to broaden the search.')
return # XXX should be return with an error code?
logger.info(f'Marking channels {", ".join(ch_names)} as bad in '
f'{len(bids_paths)} recording(s) โฆ')
for bids_path in bids_paths:
logger.info(f'Processing: {bids_path.basename}')
mark_bad_channels(ch_names=ch_names, descriptions=opt.descriptions,
bids_path=bids_path, overwrite=opt.overwrite,
verbose=opt.verbose)
if __name__ == '__main__':
run()
| mne_bids/commands/mne_bids_mark_bad_channels.py | 4,940 | Run the mark_bad_channels command.
Mark channels in an existing BIDS dataset as "bad".
example usage:
$ mne_bids mark_bad_channels --ch_name="MEG 0112" --description="noisy" --ch_name="MEG 0131" --description="flat" --subject_id=01 --task=experiment --session=test --bids_root=bids_root --overwrite
Authors: Richard Hรถchenberger <richard.hoechenberger@gmail.com> License: BSD-3-Clause Only keep data we can actually read & write. XXX should be return with an error code? | 560 | en | 0.426959 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import json
import os
from pymisp import ExpandedPyMISP
from settings import url, key, ssl, outputdir, filters, valid_attribute_distribution_levels
try:
from settings import with_distribution
except ImportError:
with_distribution = False
try:
from settings import include_deleted
except ImportError:
include_deleted = False
try:
from settings import exclude_attribute_types
except ImportError:
exclude_attribute_types = []
valid_attribute_distributions = []
def init():
# If we have an old settings.py file then this variable won't exist
global valid_attribute_distributions
try:
valid_attribute_distributions = [int(v) for v in valid_attribute_distribution_levels]
except Exception:
valid_attribute_distributions = [0, 1, 2, 3, 4, 5]
return ExpandedPyMISP(url, key, ssl)
def saveEvent(event):
try:
with open(os.path.join(outputdir, f'{event["Event"]["uuid"]}.json'), 'w') as f:
json.dump(event, f, indent=2)
except Exception as e:
print(e)
sys.exit('Could not create the event dump.')
def saveHashes(hashes):
try:
with open(os.path.join(outputdir, 'hashes.csv'), 'w') as hashFile:
for element in hashes:
hashFile.write('{},{}\n'.format(element[0], element[1]))
except Exception as e:
print(e)
sys.exit('Could not create the quick hash lookup file.')
def saveManifest(manifest):
try:
manifestFile = open(os.path.join(outputdir, 'manifest.json'), 'w')
manifestFile.write(json.dumps(manifest))
manifestFile.close()
except Exception as e:
print(e)
sys.exit('Could not create the manifest file.')
if __name__ == '__main__':
misp = init()
try:
events = misp.search_index(minimal=True, **filters, pythonify=False)
except Exception as e:
print(e)
sys.exit("Invalid response received from MISP.")
if len(events) == 0:
sys.exit("No events returned.")
manifest = {}
hashes = []
counter = 1
total = len(events)
for event in events:
try:
e = misp.get_event(event['uuid'], deleted=include_deleted, pythonify=True)
if exclude_attribute_types:
for i, attribute in enumerate(e.attributes):
if attribute.type in exclude_attribute_types:
e.attributes.pop(i)
e_feed = e.to_feed(valid_distributions=valid_attribute_distributions, with_meta=True, with_distribution=with_distribution)
except Exception as err:
print(err, event['uuid'])
continue
if not e_feed:
print(f'Invalid distribution {e.distribution}, skipping')
continue
hashes += [[h, e.uuid] for h in e_feed['Event'].pop('_hashes')]
manifest.update(e_feed['Event'].pop('_manifest'))
saveEvent(e_feed)
print("Event " + str(counter) + "/" + str(total) + " exported.")
counter += 1
saveManifest(manifest)
print('Manifest saved.')
saveHashes(hashes)
print('Hashes saved. Feed creation completed.')
| examples/feed-generator/generate.py | 3,206 | !/usr/bin/env python3 -*- coding: utf-8 -*- If we have an old settings.py file then this variable won't exist | 109 | en | 0.793859 |
#!/usr/bin/env python3
from hopla.hoplalib.user.usermodels import HabiticaUser
class TestHabiticaUser:
def test_get_stats(self):
user_test_stat_values = {
"buffs": {
"str": 50, "int": 50, "per": 3206, "con": 50, "stealth": 0, "streaks": False,
"snowball": False, "spookySparkles": False, "shinySeed": False, "seafoam": False
},
"training": {"int": 0, "per": 0, "str": 0, "con": 0},
"hp": 50, "mp": 65.7, "exp": 2501, "gp": 1072327.9, "lvl": 121,
"class": "wizard", "points": 0, "str": 0, "con": 0, "int": 12, "per": 88,
"toNextLevel": 5010, "maxHealth": 50, "maxMP": 304
}
user = HabiticaUser(user_dict={"stats": user_test_stat_values})
assert user.get_stats() == user_test_stat_values
def test_get_auth(self):
user_test_auth_values = {
"local": {"username": "hopla", "lowerCaseUsername": "hopla",
"email": "something+habitica@gmail.com"
},
"timestamps": {"created": "2022-03-22T24:23:38.119Z",
"loggedin": "2022-09-18T08:47:45.286Z",
"updated": "2022-09-18T14:20:55.530Z"
},
"facebook": {}, "google": {}, "apple": {}
}
user = HabiticaUser(user_dict={"auth": user_test_auth_values})
assert user.get_auth() == user_test_auth_values
def test_get_inventory(self):
inventory = {
"gear": {"equipped": {"back": "back_special_aetherCloak"},
"costume": {"armor": "armor_armoire_bluePartyDress", "body": "body_base_0"},
"owned": {"armor_special_fall2019Healer": True}},
"special": {"goodluck": 9000},
"lastDrop": {"count": 80, "date": "2021-10-12T15:45:30.384Z"},
"pets": {"Cactus-Golden": 0, "Unicorn-Red": -1, "Wolf-CottonCandyPink": 5},
"eggs": {"Dragon": 338, "Nudibranch": 3, "TRex": 0},
"hatchingPotions": {"Desert": 456, "MossyStone": 1},
"food": {"RottenMeat": 846},
"mounts": {"Fox-RoyalPurple": True, "Dragon-Skeleton": None, "Wolf-MossyStone": True},
"quests": {"trex_undead": 0},
"currentPet": "Egg-Base",
"currentMount": "Aether-Invisible"
}
user = HabiticaUser(user_dict={"items": inventory})
assert user.get_inventory() == inventory
def test_get_gp(self):
gp = 12.0
user = HabiticaUser(user_dict={"stats": {"gp": gp}})
assert user.get_gp() == gp
def test_get_mp(self):
mp = 112.0
user = HabiticaUser(user_dict={"stats": {"mp": mp}})
assert user.get_mp() == mp
def test_get_pets(self):
pets = {"Spider-Base": -1, "TRex-Skeleton": 5}
user = HabiticaUser(user_dict={"items": {"pets": pets}})
assert user.get_pets() == pets
def test_get_mounts(self):
mounts = {"Spider-Base": None, "TRex-Skeleton": True}
user = HabiticaUser(user_dict={"items": {"mounts": mounts}})
assert user.get_mounts() == mounts
def test_get_food(self):
food = {"CottonCandyBlue": 10, "Fish": 830}
user = HabiticaUser(user_dict={"items": {"food": food}})
assert user.get_food() == food
def test_get_hatch_potions(self):
hatch_potions = {"Base": 10, "SolarSystem": 1009}
user = HabiticaUser(user_dict={"items": {"hatchingPotions": hatch_potions}})
assert user.get_hatch_potions() == hatch_potions
def test_get_eggs(self):
eggs = {"Fox": 1001, "Nudibranch": 9}
user = HabiticaUser(user_dict={"items": {"eggs": eggs}})
assert user.get_eggs() == eggs
| src/tests/hoplalib/user/test_usermodels.py | 3,780 | !/usr/bin/env python3 | 21 | fr | 0.448822 |
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myshop.settings')
app = Celery('myshop')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| myshop/celery.py | 343 | set the default Django settings module for the 'celery' program. | 64 | en | 0.157211 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ๅธๅฎๆจ่็ : ่ฟไฝฃ10%
https://www.binancezh.pro/cn/register?ref=AIR1GC70
ๅธๅฎๅ็บฆๆจ่็ : ่ฟไฝฃ10%
https://www.binancezh.com/cn/futures/ref/51bitquant
if you don't have a binance account, you can use the invitation link to register one:
https://www.binancezh.com/cn/futures/ref/51bitquant
or use the inviation code: 51bitquant
็ฝๆ ผไบคๆ: ้ๅๅธๅ็้ซๆณขๅจ็็ๅ็ง๏ผ้ๅ็ฐ่ดง๏ผ ๅฆๆไบคๆๅ็บฆ๏ผ้่ฆๆณจๆ้ฒๆญขๆ็ซฏ่กๆ
็ไปใ
ๆๅกๅจ่ดญไนฐๅฐๅ: https://www.ucloud.cn/site/global.html?invitation_code=C1x2EA81CD79B8C#dongjing
"""
from gateway import BinanceSpotHttp, OrderStatus, OrderType, OrderSide
from utils import config
from utils import utility, round_to
from enum import Enum
import logging
from datetime import datetime
class BinanceTrader(object):
def __init__(self):
"""
:param api_key:
:param secret:
:param trade_type: ไบคๆ็็ฑปๅ๏ผ only support future and spot.
"""
self.http_client = BinanceSpotHttp(api_key=config.api_key, secret=config.api_secret, proxy_host=config.proxy_host, proxy_port=config.proxy_port)
self.buy_orders = [] # ไนฐๅ.
self.sell_orders = [] # ๅๅ.
def get_bid_ask_price(self):
ticker = self.http_client.get_ticker(config.symbol)
bid_price = 0
ask_price = 0
if ticker:
bid_price = float(ticker.get('bidPrice', 0))
ask_price = float(ticker.get('askPrice', 0))
return bid_price, ask_price
def grid_trader(self):
"""
ๆง่กๆ ธๅฟ้ป่พ๏ผ็ฝๆ ผไบคๆ็้ป่พ.
:return:
"""
bid_price, ask_price = self.get_bid_ask_price()
print(f"bid_price: {bid_price}, ask_price: {ask_price}")
quantity = round_to(float(config.quantity), float(config.min_qty))
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=True) # ๆ้ซไปทๅฐๆไฝไปท.
self.sell_orders.sort(key=lambda x: float(x['price']), reverse=True) # ๆ้ซไปทๅฐๆไฝไปท.
print(f"buy orders: {self.buy_orders}")
print("------------------------------")
print(f"sell orders: {self.sell_orders}")
buy_delete_orders = [] # ้่ฆๅ ้คไนฐๅ
sell_delete_orders = [] # ้่ฆๅ ้ค็ๅๅ
# ไนฐๅ้ป่พ,ๆฃๆฅๆไบค็ๆ
ๅต.
for buy_order in self.buy_orders:
check_order = self.http_client.get_order(buy_order.get('symbol', config.symbol),client_order_id=buy_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
buy_delete_orders.append(buy_order)
print(f"buy order status was canceled: {check_order.get('status')}")
elif check_order.get('status') == OrderStatus.FILLED.value:
# ไนฐๅๆไบค๏ผๆๅๅ.
logging.info(f"ไนฐๅๆไบคๆถ้ด: {datetime.now()}, ไปทๆ ผ: {check_order.get('price')}, ๆฐ้: {check_order.get('origQty')}")
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)), float(config.min_price))
if 0 < sell_price < ask_price:
# ้ฒๆญขไปทๆ ผ
sell_price = round_to(ask_price, float(config.min_price))
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL, order_type=OrderType.LIMIT, quantity=quantity, price=sell_price)
if new_sell_order:
buy_delete_orders.append(buy_order)
self.sell_orders.append(new_sell_order)
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)),
config.min_price)
if buy_price > bid_price > 0:
buy_price = round_to(bid_price, float(config.min_price))
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY, order_type=OrderType.LIMIT, quantity=quantity, price=buy_price)
if new_buy_order:
self.buy_orders.append(new_buy_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print("buy order status is: New")
else:
print(f"buy order status is not above options: {check_order.get('status')}")
# ่ฟๆๆ่
ๆ็ป็่ฎขๅๅ ้คๆ.
for delete_order in buy_delete_orders:
self.buy_orders.remove(delete_order)
# ๅๅ้ป่พ, ๆฃๆฅๅๅๆไบคๆ
ๅต.
for sell_order in self.sell_orders:
check_order = self.http_client.get_order(sell_order.get('symbol', config.symbol),
client_order_id=sell_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
sell_delete_orders.append(sell_order)
print(f"sell order status was canceled: {check_order.get('status')}")
elif check_order.get('status') == OrderStatus.FILLED.value:
logging.info(
f"ๅๅๆไบคๆถ้ด: {datetime.now()}, ไปทๆ ผ: {check_order.get('price')}, ๆฐ้: {check_order.get('origQty')}")
# ๅๅๆไบค๏ผๅ
ไธไนฐๅ.
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)), float(config.min_price))
if buy_price > bid_price > 0:
buy_price = round_to(bid_price, float(config.min_price))
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY,
order_type=OrderType.LIMIT, quantity=quantity, price=buy_price)
if new_buy_order:
sell_delete_orders.append(sell_order)
self.buy_orders.append(new_buy_order)
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)), float(config.min_price))
if 0 < sell_price < ask_price:
# ้ฒๆญขไปทๆ ผ
sell_price = round_to(ask_price, float(config.min_price))
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL,
order_type=OrderType.LIMIT, quantity=quantity,
price=sell_price)
if new_sell_order:
self.sell_orders.append(new_sell_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print("sell order status is: New")
else:
print(f"sell order status is not in above options: {check_order.get('status')}")
# ่ฟๆๆ่
ๆ็ป็่ฎขๅๅ ้คๆ.
for delete_order in sell_delete_orders:
self.sell_orders.remove(delete_order)
# ๆฒกๆไนฐๅ็ๆถๅ.
if len(self.buy_orders) <= 0:
if bid_price > 0:
price = round_to(bid_price * (1 - float(config.gap_percent)), float(config.min_price))
buy_order = self.http_client.place_order(symbol=config.symbol,order_side=OrderSide.BUY, order_type=OrderType.LIMIT, quantity=quantity,price=price)
if buy_order:
self.buy_orders.append(buy_order)
elif len(self.buy_orders) > int(config.max_orders): # ๆๅคๅ
่ฎธ็ๆๅๆฐ้.
# ่ฎขๅๆฐ้ๆฏ่พๅค็ๆถๅ.
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=False) # ๆไฝไปทๅฐๆ้ซไปท
delete_order = self.buy_orders[0]
order = self.http_client.cancel_order(delete_order.get('symbol'), client_order_id=delete_order.get('clientOrderId'))
if order:
self.buy_orders.remove(delete_order)
# ๆฒกๆๅๅ็ๆถๅ.
if len(self.sell_orders) <= 0:
if ask_price > 0:
price = round_to(ask_price * (1 + float(config.gap_percent)), float(config.min_price))
order = self.http_client.place_order(symbol=config.symbol,order_side=OrderSide.SELL, order_type=OrderType.LIMIT, quantity=quantity,price=price)
if order:
self.sell_orders.append(order)
elif len(self.sell_orders) > int(config.max_orders): # ๆๅคๅ
่ฎธ็ๆๅๆฐ้.
# ่ฎขๅๆฐ้ๆฏ่พๅค็ๆถๅ.
self.sell_orders.sort(key=lambda x: x['price'], reverse=True) # ๆ้ซไปทๅฐๆไฝไปท
delete_order = self.sell_orders[0]
order = self.http_client.cancel_order(delete_order.get('symbol'),
client_order_id=delete_order.get('clientOrderId'))
if order:
self.sell_orders.remove(delete_order)
| trader/binance_trader.py | 9,326 | :param api_key:
:param secret:
:param trade_type: ไบคๆ็็ฑปๅ๏ผ only support future and spot.
ๆง่กๆ ธๅฟ้ป่พ๏ผ็ฝๆ ผไบคๆ็้ป่พ.
:return:
ๅธๅฎๆจ่็ : ่ฟไฝฃ10%
https://www.binancezh.pro/cn/register?ref=AIR1GC70
ๅธๅฎๅ็บฆๆจ่็ : ่ฟไฝฃ10%
https://www.binancezh.com/cn/futures/ref/51bitquant
if you don't have a binance account, you can use the invitation link to register one:
https://www.binancezh.com/cn/futures/ref/51bitquant
or use the inviation code: 51bitquant
็ฝๆ ผไบคๆ: ้ๅๅธๅ็้ซๆณขๅจ็็ๅ็ง๏ผ้ๅ็ฐ่ดง๏ผ ๅฆๆไบคๆๅ็บฆ๏ผ้่ฆๆณจๆ้ฒๆญขๆ็ซฏ่กๆ
็ไปใ
ๆๅกๅจ่ดญไนฐๅฐๅ: https://www.ucloud.cn/site/global.html?invitation_code=C1x2EA81CD79B8C#dongjing
!/usr/bin/env python -*- coding: utf-8 -*- ไนฐๅ. ๅๅ. ๆ้ซไปทๅฐๆไฝไปท. ๆ้ซไปทๅฐๆไฝไปท. ้่ฆๅ ้คไนฐๅ ้่ฆๅ ้ค็ๅๅ ไนฐๅ้ป่พ,ๆฃๆฅๆไบค็ๆ
ๅต. ไนฐๅๆไบค๏ผๆๅๅ. ้ฒๆญขไปทๆ ผ ่ฟๆๆ่
ๆ็ป็่ฎขๅๅ ้คๆ. ๅๅ้ป่พ, ๆฃๆฅๅๅๆไบคๆ
ๅต. ๅๅๆไบค๏ผๅ
ไธไนฐๅ. ้ฒๆญขไปทๆ ผ ่ฟๆๆ่
ๆ็ป็่ฎขๅๅ ้คๆ. ๆฒกๆไนฐๅ็ๆถๅ. ๆๅคๅ
่ฎธ็ๆๅๆฐ้. ่ฎขๅๆฐ้ๆฏ่พๅค็ๆถๅ. ๆไฝไปทๅฐๆ้ซไปท ๆฒกๆๅๅ็ๆถๅ. ๆๅคๅ
่ฎธ็ๆๅๆฐ้. ่ฎขๅๆฐ้ๆฏ่พๅค็ๆถๅ. ๆ้ซไปทๅฐๆไฝไปท | 814 | zh | 0.472292 |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
| mac/google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/daisy_utils.py | 23,491 | Subclass of CloudBuildClient that allows filtering.
Exception for builds that did not succeed.
Subclass of LogTailer that allows for filtering.
Enum representing image operation.
Exception for subnet related errors.
Common arguments for Daisy builds.
Extra common arguments for Daisy builds.
Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
Override PrintLogLine method to use self.filter.
Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
Utilities for running Daisy builds on Google Container Builder.
-*- coding: utf-8 -*- Copyright 2017 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Poll the logs one final time to ensure we have everything. We know this final poll will get the full log contents because GCS is strongly consistent and Container Builder waits for logs to finish pushing before marking the build complete. If the user's project doesn't have cloudbuild enabled yet, then the service account won't even exist. If so, then ask to enable it before continuing. Also prompt them to enable Stackdriver Logging if they haven't yet. TODO(b/112757283): Split this out into a separate library. Now that we're sure the service account exists, actually check permissions. TODO (b/117668144): Make Daisy scratch bucket ACLs same as source/destination bucket Rules are from https://cloud.google.com/storage/docs/naming. Bucket name can't contain "google". Bucket name can't start with "goog". Workaround for b/128691621 Make Daisy time out before gcloud by shaving off 2% from the timeout time, up to a max of 5m (300s). Create the build request. Start the build. If the command is run --async, we just print out a reference to the build. Otherwise, logs are streamed from GCS. Make OVF import time-out before gcloud by shaving off 2% from the timeout time, up to a max of 5m (300s). | 8,854 | en | 0.831401 |
# ----------------------------------------------------------------------
# Distributed Lock
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional, List
import datetime
import time
import random
from logging import getLogger
# Third-party modules
import pymongo
from pymongo.collection import Collection
from bson import ObjectId
# NOC modules
from noc.core.mongo.connection import get_db
from noc.core.perf import metrics
from .base import BaseLock, DEFAULT_TTL
DEFAULT_LOCK_WAIT = 1.0
DEFAULT_LOCK_WAIT_JITTER = 0.1
logger = getLogger(__name__)
class DistributedLock(BaseLock):
"""
Distributed locking primitive.
Allows exclusive access to all requested items within category
between the group of processes.
Example
-------
```
lock = DistributedLock("test", "test:12")
with lock.acquire(["obj1", "obj2"]):
...
```
"""
def __init__(self, category: str, owner: str, ttl: Optional[float] = None):
"""
:param category: Lock category name
:param owner: Lock owner id
:param ttl: Default lock ttl in seconds
"""
super().__init__(category, owner, ttl=ttl)
self.collection = self.get_collection()
self.release_all()
def release_all(self):
"""
Release all locks held by owner
"""
self.collection.delete_many({"owner": self.owner})
def get_collection_name(self) -> str:
"""
Get name of the lock collection
"""
return f"locks.{self.category}"
def get_collection(self) -> Collection:
"""
Ensure the collection is exists and indexed properly
"""
coll = get_db()[self.get_collection_name()]
coll.create_index([("items", pymongo.ASCENDING)], unique=True)
coll.create_index([("expires", pymongo.ASCENDING)], expireAfterSeconds=0)
return coll
def acquire_by_items(self, items: List[str], ttl: Optional[float] = None) -> str:
"""
Acquire lock by list of items
"""
lock_id = ObjectId()
ttl = ttl or self.ttl or DEFAULT_TTL
metrics[f"lock_{self.category}_requests"] += 1
logger.debug(
"[%s|%s] Acquiring lock for %s (%s seconds)",
self.category,
self.owner,
", ".join(items),
ttl,
)
while True:
try:
self.collection.insert_one(
{
"_id": lock_id,
"items": items,
"owner": self.owner,
"expire": datetime.datetime.now() + datetime.timedelta(seconds=ttl),
}
)
return str(lock_id)
except pymongo.errors.DuplicateKeyError:
metrics[f"lock_{self.category}_misses"] += 1
jitter = random.random() * DEFAULT_LOCK_WAIT_JITTER * DEFAULT_LOCK_WAIT
timeout = DEFAULT_LOCK_WAIT + jitter
logger.debug(
"[%s|%s] Cannnot get lock. Waiting %s seconds",
self.category,
self.owner,
timeout,
)
time.sleep(timeout)
def release_by_lock_id(self, lock_id: str):
"""
Release lock by id
"""
self.collection.delete_one({"_id": ObjectId(lock_id)})
| core/lock/distributed.py | 3,629 | Distributed locking primitive.
Allows exclusive access to all requested items within category
between the group of processes.
Example
-------
```
lock = DistributedLock("test", "test:12")
with lock.acquire(["obj1", "obj2"]):
...
```
:param category: Lock category name
:param owner: Lock owner id
:param ttl: Default lock ttl in seconds
Acquire lock by list of items
Ensure the collection is exists and indexed properly
Get name of the lock collection
Release all locks held by owner
Release lock by id
---------------------------------------------------------------------- Distributed Lock ---------------------------------------------------------------------- Copyright (C) 2007-2021 The NOC Project See LICENSE for details ---------------------------------------------------------------------- Python modules Third-party modules NOC modules | 853 | en | 0.643845 |
# Copyright (c) 2020 Aiven, Helsinki, Finland. https://aiven.io/
from .object_storage.gcs import GCSProvider
from argparse import ArgumentParser
from tempfile import TemporaryDirectory
import codecs
import datetime
import dateutil
import gzip
import json
import kafka
import logging
import os
import re
class KafkaRestore:
def __init__(self, *, config):
self.log = logging.getLogger(self.__class__.__name__)
self.config = config
object_storage_config = self.config.get("object_storage", {})
object_storage_type = object_storage_config.get("type")
if object_storage_type == "gcs":
self.object_storage = GCSProvider(config=object_storage_config)
else:
raise ValueError(f"Unknown object storage type: {object_storage_type}")
kafka_config = self.config.get("kafka", {})
if ("ssl_cafile" in kafka_config and
"ssl_access_certificate_file" in kafka_config and
"ssl_access_key_file" in kafka_config):
self.kafka_producer = kafka.KafkaProducer(
bootstrap_servers=kafka_config["kafka_url"],
security_protocol="SSL",
ssl_cafile=kafka_config["ssl_ca_file"],
ssl_certfile=kafka_config["ssl_access_certificate_file"],
ssl_keyfile=kafka_config["ssl_access_key_file"],
)
else:
self.kafka_producer = kafka.KafkaProducer(
bootstrap_servers=kafka_config["kafka_url"],
)
def list_topic_data_files(self, *, topic):
topic_re = re.compile(
(
r"(?P<topic>" + re.escape(topic) + r")"
r"-(?P<partition>[0-9]+)"
r"-(?P<offset>[0-9]+)"
r"(?P<suffix>[.a-z]*)"
)
)
topic_partition_files = {}
for item in self.object_storage.list_items():
matches = topic_re.match(item.name)
if matches:
partition = int(matches.group("partition"))
if partition not in topic_partition_files:
topic_partition_files[partition] = []
begin_offset = matches.group("offset")
record = {
"begin_offset": int(begin_offset),
"last_modified": item.last_modified,
"object_name": item.name,
}
if matches.group("suffix") == ".gz":
record["compression"] = "gzip"
topic_partition_files[partition].append(record)
for partition in topic_partition_files:
topic_partition_files[partition] = sorted(topic_partition_files[partition], key=lambda x: x["begin_offset"])
return topic_partition_files
def parse_record(self, record_line):
fields = record_line.split(",")
if fields[0]:
key = codecs.decode(codecs.encode(fields[0], "ascii"), "base64")
else:
key = None
if fields[1]:
value = codecs.decode(codecs.encode(fields[1], "ascii"), "base64")
else:
value = None
offset = int(fields[2])
if fields[3]:
timestamp = int(fields[3])
else:
timestamp = None
return key, value, offset, timestamp
def restore(self, *, topic):
topic_partition_files = self.list_topic_data_files(topic=topic)
partition_offset_records = {}
since = self.config.get("since")
with TemporaryDirectory() as working_directory:
while True:
progress = False
for partition in topic_partition_files:
if topic_partition_files[partition]:
object_record = topic_partition_files[partition][0]
topic_partition_files[partition] = topic_partition_files[partition][1:]
progress = True
object_name = object_record["object_name"]
if since is not None and since > object_record["last_modified"]:
self.log.info("Skipping object %r due to timestamp", object_name)
continue
local_name = f"{working_directory}/{topic}-{partition}"
self.object_storage.get_contents_to_file(object_name, local_name)
if object_record.get("compression") == "gzip":
fh = gzip.open(local_name, "rt")
else:
fh = open(local_name, "r")
nrecords = 0
for line in fh.readlines():
key, value, offset, timestamp = self.parse_record(line.strip())
future_record = self.kafka_producer.send(
topic,
partition=partition,
key=key,
value=value,
timestamp_ms=timestamp,
)
nrecords += 1
partition_offset_records[partition] = {
"last_original_offset": offset,
"last_produced_record": future_record,
}
self.log.info("Restored %d messages from object %r", nrecords, object_name)
fh.close()
os.unlink(local_name)
if not progress:
self.kafka_producer.flush()
break
for partition in sorted(partition_offset_records):
self.log.info(
"Partition %d original offset %d new offset %d",
partition,
partition_offset_records[partition]["last_original_offset"],
partition_offset_records[partition]["last_produced_record"].get().offset,
)
def main():
logging.basicConfig(level=logging.INFO, format="%(name)-20s %(levelname)-8s %(message)s")
parser = ArgumentParser()
parser.add_argument("-c", "--config", required=True, help="Path to config file")
parser.add_argument("-t", "--topic", required=True, help="Topic name")
parser.add_argument("--since", help="Skip objects that are older than given timestamp")
args = parser.parse_args()
with open(args.config) as fh:
restore_config = json.load(fh)
if args.since:
dt = dateutil.parser.parse(args.since)
if dt.tzinfo is None:
# assume UTC if no timezone is present
dt = dt.replace(tzinfo=datetime.timezone.utc)
restore_config["since"] = dt
kafka_restore = KafkaRestore(config=restore_config)
kafka_restore.restore(topic=args.topic)
if __name__ == "__main__":
main()
| kafka_restore/__main__.py | 6,968 | Copyright (c) 2020 Aiven, Helsinki, Finland. https://aiven.io/ assume UTC if no timezone is present | 99 | en | 0.755911 |
# Generated by Django 4.0 on 2022-01-10 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funblog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='fblog',
name='DOC',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date of creating'),
),
migrations.AlterField(
model_name='fblog',
name='DOU',
field=models.DateTimeField(auto_now=True, verbose_name='Date of updating'),
),
migrations.AlterField(
model_name='fblog',
name='comment',
field=models.TextField(max_length=128, verbose_name='Comment'),
),
]
| fun/funblog/migrations/0002_alter_fblog_doc_alter_fblog_dou_alter_fblog_comment.py | 774 | Generated by Django 4.0 on 2022-01-10 10:35 | 43 | en | 0.729144 |
try:
xrange
except:
xrange = range
def totalvalue(comb):
' Totalise a particular combination of items'
totwt = totval = 0
for item, wt, val in comb:
totwt += wt
totval += val
return (totval, -totwt) if totwt <= 400 else (0, 0)
items = (
("map", 9, 150), ("compass", 13, 35), ("water", 153, 200), ("sandwich", 50, 160),
("glucose", 15, 60), ("tin", 68, 45), ("banana", 27, 60), ("apple", 39, 40),
("cheese", 23, 30), ("beer", 52, 10), ("suntan cream", 11, 70), ("camera", 32, 30),
("t-shirt", 24, 15), ("trousers", 48, 10), ("umbrella", 73, 40),
("waterproof trousers", 42, 70), ("waterproof overclothes", 43, 75),
("note-case", 22, 80), ("sunglasses", 7, 20), ("towel", 18, 12),
("socks", 4, 50), ("book", 30, 10),
)
def knapsack01_dp(items, limit):
table = [[0 for w in range(limit + 1)] for j in range(len(items) + 1)]
for j in range(1, len(items) + 1):
item, wt, val = items[j-1]
for w in range(1, limit + 1):
if wt > w:
table[j][w] = table[j-1][w]
else:
table[j][w] = max(table[j-1][w],
table[j-1][w-wt] + val)
result = []
w = limit
for j in range(len(items), 0, -1):
was_added = table[j][w] != table[j-1][w]
if was_added:
item, wt, val = items[j-1]
result.append(items[j-1])
w -= wt
return result
bagged = knapsack01_dp(items, 400)
print(("Bagged the following items\n " +
'\n '.join(sorted(item for item,_,_ in bagged))))
val, wt = totalvalue(bagged)
print(("for a total value of %i and a total weight of %i" % (val, -wt)))
| lang/Python/knapsack-problem-0-1-2.py | 1,702 | Totalise a particular combination of items | 42 | en | 0.549833 |
#
# voice-skill-sdk
#
# (C) 2020, Deutsche Telekom AG
#
# This file is distributed under the terms of the MIT license.
# For details see the file LICENSE in the top directory.
#
#
# Circuit breaker for skills requesting external services
#
from .config import config
from circuitbreaker import CircuitBreaker
from requests.exceptions import RequestException
class SkillCircuitBreaker(CircuitBreaker):
""" Circuit breaker's defaults from skill config """
FAILURE_THRESHOLD = config.getint('circuit_breakers', 'threshold', fallback=5)
RECOVERY_TIMEOUT = config.getint('circuit_breakers', 'timeout', fallback=30)
EXPECTED_EXCEPTION = RequestException
# Default circuit breaker will be used if no custom breaker supplied
DEFAULT_CIRCUIT_BREAKER = SkillCircuitBreaker()
| skill_sdk/circuit_breaker.py | 788 | Circuit breaker's defaults from skill config
voice-skill-sdk (C) 2020, Deutsche Telekom AG This file is distributed under the terms of the MIT license. For details see the file LICENSE in the top directory. Circuit breaker for skills requesting external services Default circuit breaker will be used if no custom breaker supplied | 332 | en | 0.699027 |
from django.apps import AppConfig
class ActivityFeedConfig(AppConfig):
"""App config for activity_feed."""
name = 'datahub.activity_feed'
| datahub/activity_feed/apps.py | 149 | App config for activity_feed. | 29 | en | 0.615776 |
#crie um tupla com o nome dos produtos, seguidos do preรงo.
#mostre uma listagem de preรงos, de forma tabular.
lista = ('Lรกpis', 1.5, 'Borracha', 2.5, 'Caderno', 10.8,
'Estojo', 20, 'Mochila', 100.5)
print('\033[31m--'*20)
print(f'{"LISTAGEM DE PREรOS":^40}')
print('--'*20, '\033[m')
for i in range(0, len(lista), 2):
print(f'{lista[i]:.<30}R${lista[i+1]:>5.2f}')
print('\033[31m--\033[m'*20)
''' Formataรงรฃo:
print(f'{"LISTAGEM DE PREรOS":^40}')
centralizado = {elemento:^quantidade}
ร direita = {:<quantidade} > preenche com espaรงo
ร direita = {:.<quantidade} > preenche com ponto
ร esquerda = {:>quantidade} > preenche com espaรงo
ร esquerda = {:->quantidade} > preenche com -
'''
| PacoteDownload/ex076.py | 711 | crie um tupla com o nome dos produtos, seguidos do preรงo.mostre uma listagem de preรงos, de forma tabular. | 105 | pt | 0.99895 |
#!/usr/bin/env python
import os
import json
import pprint as pp
from time import time
import torch
import torch.optim as optim
from tensorboard_logger import Logger as TbLogger
from nets.critic_network import CriticNetwork
from options import get_options
from train import train_epoch, validate, get_inner_model
from reinforce_baselines import NoBaseline, ExponentialBaseline, CriticBaseline, RolloutBaseline, WarmupBaseline
from nets.attention_model import AttentionModel
from nets.pointer_network import PointerNetwork, CriticNetworkLSTM
from utils import torch_load_cpu, load_problem
import pickle
# for hyperparameter tuning using wanb
# https://docs.wandb.ai/sweeps/quickstart
import torch.nn.functional as F
import torchvision.datasets as datasets
import torch.nn as nn
import wandb
from torchvision import datasets, transforms
def run(opts):
# start time
start_time = time()
train_run = []
opts.save_hrs.sort()
run_name = opts.run_name
# Pretty print the run args
pp.pprint(vars(opts))
# Set the random seed
torch.manual_seed(opts.seed)
# Optionally configure tensorboard
tb_logger = None
if not opts.no_tensorboard:
tb_logger = TbLogger(os.path.join(opts.log_dir, "{}_{}".format(opts.problem, opts.graph_size), opts.run_name))
os.makedirs(opts.save_dir)
# Save arguments so exact configuration can always be found
with open(os.path.join(opts.save_dir, "args.json"), 'w') as f:
json.dump(vars(opts), f, indent=True)
# Set the device
opts.device = torch.device("cuda:0" if opts.use_cuda else "cpu")
# Figure out what's the problem
problem = load_problem(opts.problem)
# Load data from load_path
load_data = {}
assert opts.load_path is None or opts.resume is None, "Only one of load path and resume can be given"
load_path = opts.load_path if opts.load_path is not None else opts.resume
if load_path is not None:
print(' [*] Loading data from {}'.format(load_path))
load_data = torch_load_cpu(load_path)
# hyperparameter search
# default (user specified) config
config_defaults = {
'batch_size': opts.batch_size,
'lr_model': opts.lr_model,
'lr_critic': opts.lr_critic,
'lr_decay': opts.lr_decay,
}
# determine the parameter space
"""sweep_config = {
'parameters': {
'batch_size': {
'values': [256, 128, 64, 32]
},
'lr_model': {
'values': [1e-2, 1e-3, 1e-4, 3e-4, 3e-5, 1e-5]
},
'lr_critic': {
'values': [1e-2, 1e-3, 1e-4, 3e-4, 3e-5, 1e-5]
},
'lr_decay': {
'lr_decay': [0.9, 0.95, 1.0, 1.05, 1.1, 1.15]
},
}
}"""
# initialize the sweep
# sweep_id = wandb.sweep(sweep_config, project="Pytorch-sweeps")
# Initialize a new wandb run
wandb.init(config=config_defaults)
# Config is a variable that holds and saves hyperparameters and inputs
config = wandb.config
# ??? any code for setting up hyperparameters interested should use config.parameter to set instead of opt.parameter
# including functions in other files-> pass config to other functions
# Initialize model
model_class = {
'attention': AttentionModel,
'pointer': PointerNetwork
}.get(opts.model, None)
assert model_class is not None, "Unknown model: {}".format(model_class)
model = model_class(
opts.embedding_dim,
opts.hidden_dim,
problem,
n_encode_layers=opts.n_encode_layers,
mask_inner=True,
mask_logits=True,
normalization=opts.normalization,
tanh_clipping=opts.tanh_clipping,
checkpoint_encoder=opts.checkpoint_encoder,
shrink_size=opts.shrink_size
).to(opts.device)
if opts.use_cuda and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# Overwrite model parameters by parameters to load
model_ = get_inner_model(model)
model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})
# Initialize baseline
if opts.baseline == 'exponential':
baseline = ExponentialBaseline(opts.exp_beta)
elif opts.baseline == 'critic' or opts.baseline == 'critic_lstm':
assert problem.NAME == 'tsp', "Critic only supported for TSP"
baseline = CriticBaseline(
(
CriticNetworkLSTM(
2,
opts.embedding_dim,
opts.hidden_dim,
opts.n_encode_layers,
opts.tanh_clipping
)
if opts.baseline == 'critic_lstm'
else
CriticNetwork(
2,
opts.embedding_dim,
opts.hidden_dim,
opts.n_encode_layers,
opts.normalization
)
).to(opts.device)
)
elif opts.baseline == 'rollout':
baseline = RolloutBaseline(model, problem, opts)
else:
assert opts.baseline is None, "Unknown baseline: {}".format(opts.baseline)
baseline = NoBaseline()
if opts.bl_warmup_epochs > 0:
baseline = WarmupBaseline(baseline, opts.bl_warmup_epochs, warmup_exp_beta=opts.exp_beta)
# Load baseline from data, make sure script is called with same type of baseline
if 'baseline' in load_data:
baseline.load_state_dict(load_data['baseline'])
# Initialize optimizer
optimizer = optim.Adam(
[{'params': model.parameters(), 'lr': config.lr_model}]
+ (
[{'params': baseline.get_learnable_parameters(), 'lr': config.lr_critic}]
if len(baseline.get_learnable_parameters()) > 0
else []
)
)
# Load optimizer state
if 'optimizer' in load_data:
optimizer.load_state_dict(load_data['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
# if isinstance(v, torch.Tensor):
if torch.is_tensor(v):
state[k] = v.to(opts.device)
# Initialize learning rate scheduler, decay by lr_decay once per epoch!
lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: config.lr_decay ** epoch)
# Start the actual training loop
val_dataset = problem.make_dataset(
size=opts.graph_size, num_samples=opts.val_size, filename=opts.val_dataset, distribution=opts.data_distribution)
if opts.resume:
epoch_resume = int(os.path.splitext(os.path.split(opts.resume)[-1])[0].split("-")[1])
torch.set_rng_state(load_data['rng_state'])
if opts.use_cuda:
torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])
# Set the random states
# Dumping of state was done before epoch callback, so do that now (model is loaded)
baseline.epoch_callback(model, epoch_resume)
print("Resuming after {}".format(epoch_resume))
opts.epoch_start = epoch_resume + 1
torch.save(model, os.path.join('.', 'empty.pt'))
if opts.eval_only:
validate(model, val_dataset, opts)
else:
for epoch in range(opts.epoch_start, opts.epoch_start + opts.n_epochs):
avg_time = train_epoch(
model,
optimizer,
baseline,
lr_scheduler,
epoch,
val_dataset,
problem,
tb_logger,
opts,
start_time,
config
)
train_run.append(avg_time)
for hr in opts.save_hrs:
if (time() - start_time) > hr*3600:
opts.save_hrs.remove(hr)
print('Saving model and state...')
hr_time = int(round((time()-start_time)/3600))
with open('../models/att/hist_{}_{}hr.pickle'.format(run_name,hr_time), 'wb') as handle:
pickle.dump(train_run, handle, protocol=pickle.HIGHEST_PROTOCOL)
torch.save(
{
'model': get_inner_model(model).state_dict(),
'optimizer': optimizer.state_dict(),
'rng_state': torch.get_rng_state(),
'cuda_rng_state': torch.cuda.get_rng_state_all(),
'baseline': baseline.state_dict()
},
os.path.join('../models/att', '{}_{}hr-model-att-only.pt'.format(run_name,hr_time))
)
torch.save(model, os.path.join('../models/att', '{}_{}hr-model.pt'.format(run_name,hr_time)))
if __name__ == "__main__":
run(get_options())
| hyper_attention/run.py | 8,902 | !/usr/bin/env python for hyperparameter tuning using wanb https://docs.wandb.ai/sweeps/quickstart start time Pretty print the run args Set the random seed Optionally configure tensorboard Save arguments so exact configuration can always be found Set the device Figure out what's the problem Load data from load_path hyperparameter search default (user specified) config determine the parameter space initialize the sweep sweep_id = wandb.sweep(sweep_config, project="Pytorch-sweeps") Initialize a new wandb run Config is a variable that holds and saves hyperparameters and inputs ??? any code for setting up hyperparameters interested should use config.parameter to set instead of opt.parameter including functions in other files-> pass config to other functions Initialize model Overwrite model parameters by parameters to load Initialize baseline Load baseline from data, make sure script is called with same type of baseline Initialize optimizer Load optimizer state if isinstance(v, torch.Tensor): Initialize learning rate scheduler, decay by lr_decay once per epoch! Start the actual training loop Set the random states Dumping of state was done before epoch callback, so do that now (model is loaded) | 1,206 | en | 0.649407 |
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
import pytest
from models_library.basic_types import LogLevel
from simcore_service_director_v2.core.settings import (
AppSettings,
BootModeEnum,
DynamicSidecarProxySettings,
DynamicSidecarSettings,
RegistrySettings,
)
def test_settings_with_project_env_devel(project_env_devel_environment):
# loads from environ
settings = AppSettings.create_from_envs()
print("captured settings: \n", settings.json(indent=2))
assert settings.SC_BOOT_MODE == BootModeEnum.DEBUG
assert settings.LOG_LEVEL == LogLevel.DEBUG
assert settings.POSTGRES.dsn == "postgresql://test:test@localhost:5432/test"
def test_settings_with_env_devel(mock_env_devel_environment):
settings = AppSettings.create_from_envs()
print("captured settings: \n", settings.json(indent=2))
assert settings
@pytest.mark.parametrize(
"image",
[
"local/dynamic-sidecar:development",
"local/dynamic-sidecar:production",
"itisfoundation/dynamic-sidecar:merge-github-testbuild-latest",
"itisfoundation/dynamic-sidecar:1.0.0",
"local/dynamic-sidecar:0.0.1",
"dynamic-sidecar:production",
"/dynamic-sidecar:latest",
"/local/dynamic-sidecar:latest",
],
)
def test_dynamic_sidecar_settings(image: str) -> None:
required_kwards = dict(
DYNAMIC_SIDECAR_IMAGE=image,
SIMCORE_SERVICES_NETWORK_NAME="test",
TRAEFIK_SIMCORE_ZONE="",
SWARM_STACK_NAME="",
DYNAMIC_SIDECAR_PROXY_SETTINGS=DynamicSidecarProxySettings(),
REGISTRY=RegistrySettings(
REGISTRY_URL="http://te.st",
REGISTRY_AUTH=True,
REGISTRY_USER="test",
REGISTRY_PW="test",
REGISTRY_SSL=False,
),
)
settings = DynamicSidecarSettings(**required_kwards)
assert settings.DYNAMIC_SIDECAR_IMAGE == image.lstrip("/")
| services/director-v2/tests/unit/test_core_settings.py | 1,985 | pylint:disable=unused-variable pylint:disable=unused-argument pylint:disable=redefined-outer-name loads from environ | 116 | en | 0.263885 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class MonitoringSettingsOperations(object):
"""MonitoringSettingsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
service_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.MonitoringSettingResource"
"""Get the Monitoring Setting and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MonitoringSettingResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def _update_put_initial(
self,
resource_group_name, # type: str
service_name, # type: str
monitoring_setting_resource, # type: "_models.MonitoringSettingResource"
**kwargs # type: Any
):
# type: (...) -> "_models.MonitoringSettingResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_put_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(monitoring_setting_resource, 'MonitoringSettingResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_put_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def begin_update_put(
self,
resource_group_name, # type: str
service_name, # type: str
monitoring_setting_resource, # type: "_models.MonitoringSettingResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.MonitoringSettingResource"]
"""Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_put_initial(
resource_group_name=resource_group_name,
service_name=service_name,
monitoring_setting_resource=monitoring_setting_resource,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def _update_patch_initial(
self,
resource_group_name, # type: str
service_name, # type: str
monitoring_setting_resource, # type: "_models.MonitoringSettingResource"
**kwargs # type: Any
):
# type: (...) -> "_models.MonitoringSettingResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_patch_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(monitoring_setting_resource, 'MonitoringSettingResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_patch_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def begin_update_patch(
self,
resource_group_name, # type: str
service_name, # type: str
monitoring_setting_resource, # type: "_models.MonitoringSettingResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.MonitoringSettingResource"]
"""Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_patch_initial(
resource_group_name=resource_group_name,
service_name=service_name,
monitoring_setting_resource=monitoring_setting_resource,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
| sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_11_01_preview/operations/_monitoring_settings_operations.py | 19,130 | MonitoringSettingsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]
:raises ~azure.core.exceptions.HttpResponseError:
Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]
:raises ~azure.core.exceptions.HttpResponseError:
Get the Monitoring Setting and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MonitoringSettingResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:raises: ~azure.core.exceptions.HttpResponseError
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=unused-import,ungrouped-imports type: str type: str type: Any type: (...) -> "_models.MonitoringSettingResource" type: ClsType["_models.MonitoringSettingResource"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: str type: str type: "_models.MonitoringSettingResource" type: Any type: (...) -> "_models.MonitoringSettingResource" type: ClsType["_models.MonitoringSettingResource"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: str type: str type: "_models.MonitoringSettingResource" type: Any type: (...) -> LROPoller["_models.MonitoringSettingResource"] type: Union[bool, PollingMethod] type: ClsType["_models.MonitoringSettingResource"] type: Optional[str] type: ignore type: str type: str type: "_models.MonitoringSettingResource" type: Any type: (...) -> "_models.MonitoringSettingResource" type: ClsType["_models.MonitoringSettingResource"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: str type: str type: "_models.MonitoringSettingResource" type: Any type: (...) -> LROPoller["_models.MonitoringSettingResource"] type: Union[bool, PollingMethod] type: ClsType["_models.MonitoringSettingResource"] type: Optional[str] type: ignore | 5,645 | en | 0.525905 |
# coding: utf-8
# In[ ]:
def choice():
print("1-create,2-update,3-read,4-delete")
try:
x=int(input("\nEnter your choice:"))
except ValueError:
print("Enter integer choice:....")
choice()
else:
if(x==1):
create()
elif(x==2):
update()
elif(x==3):
read()
else:
delete()
def create():
try:
id=int(input("\nEnter your id:"))
except ValueError:
f=int(input("Enter a valid integer number or press 0 to exit:"))
if(f==0):
choice()
else:
create()
else:
name=str(input("Enter your name:"))
college=str(input("Enter the college name:"))
branch=str(input("Enter the branch:"))
print("\n")
lid.append(id)
lname.append(name)
lcollege.append(college)
lbranch.append(branch)
choice()
def update():
try:
id=int(input("Enter your id:"))
except ValueError:
print("\nEnter valid integer id.......")
update()
else:
if id in lid:
r=lid.index(id)
newname=str(input("Enter the name"))
lname[r]=newname
newcollege=str(input("Enter the college name:"))
lcollege[r]=newcollege
newbranch=str(input("Enter the branch:"))
lbranch[r]=newbranch
else:
print("id didnot match........")
print("please register yourself....")
choice()
def read():
try:
db=int(input("\nTo access database enter id:"))
except ValueError:
print("Enter integer id.....")
read()
else:
if db in lid:
print("ID:-",lid)
print("NAMES:-",lname)
print("COLLEGE:-",lcollege)
print("BRANCH:-",lbranch)
elif(lid==dummy):
print("\nno records......")
else:
print("\nRegister inorder to access database.....")
choice()
def delete():
if(lid==dummy):
print("No records found to delete.....")
else:
try:
id=int(input("\nEnter your id:"))
except ValueError:
print("\nEnter the valid integer id.....")
delete()
else:
if id in lid:
delid.append(id)
d=lid.index(id)
del lid[d]
del lname[d]
del lcollege[d]
del lbranch[d]
print("\ndetails of your id has been deleted sucessfully......")
elif id in delid:
print("\nDetails of this id has been deleted......")
else:
print("\nregister the id... ")
choice()
#creating lists
lid=[]
lname=[]
lcollege=[]
lbranch=[]
dummy=[]
delid=[] #list of deleted id
choice()
| database.py | 2,870 | coding: utf-8 In[ ]:creating lists list of deleted id | 56 | en | 0.926506 |
#!/usr/bin/python3
import socket
import re
import time
pattern = re.compile('N=\d+\sC=\d+')
s = socket.socket()
s.connect(('localhost', 9007))
s.recv(1024) # what received is just a introduction, we do not need it.
time.sleep(4)
while True:
received = s.recv(1024).decode('ascii')
print(received, end='')
received = received.replace('\n', '')
matches = re.findall(pattern, received)
if len(matches) == 0:
break
match = matches[0].split(' ')
N = int(match[0].replace('N=', ''))
C = int(match[1].replace('C=', ''))
start = 0
end = N
for i in range(0, C):
if end - start == 1:
print(start)
s.send(str(start).encode('ascii') + b'\n')
else:
sd = ' '.join(str(j) for j in range(start, (end + start) // 2))
print(sd)
s.send(sd.encode('ascii') + b'\n')
result = s.recv(1024).decode('ascii')
print(result, end = '')
if result.startswith('Correct'):
break
try:
result = int(result.replace('\n', ''))
except:
exit(-1)
if result == ((end + start) // 2 - start) * 10:
start = (end + start) // 2
else:
end = (end + start) // 2
s.send(str(start).encode('ascii') + b'\n')
print(s.recv(1024).decode('ascii'), end = '')
| pwnable.kr/Toddler's Bottle/coin1/coin1.py | 1,371 | !/usr/bin/python3 what received is just a introduction, we do not need it. | 74 | en | 0.981509 |
import os
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from skimage import io
from tensorflow import keras
class BimodalDenoiseDataGen(keras.utils.Sequence):
'''
Generate train/validation/test samples for our multimodal
denoise network. Inputs are static images, spectrograms and
corresponding noisy labels. Outputs are noisy labels. In
order to decorrelate training samples, we randonly shuffle
movie sequences, sequentially fetch sel_movie movie clips
from that sequence, then randomly select sel_frames frames
from each moive clip.
'''
def __init__(self,
label_file,
length_file,
sample_rate,
video_root,
audio_root,
video_shape,
audio_shape,
video_preproc,
audio_preproc,
sel_movies,
sel_frames,
n_classes,
affective_type,
ret_label_X=True,
ret_label_y=True):
self.__parse_label_file (label_file , affective_type)
self.__parse_length_file(length_file, sample_rate)
self.file_list = list(self.label_dict.keys())
self.video_root = video_root
self.audio_root = audio_root
self.video_preproc = video_preproc
self.audio_preproc = audio_preproc
self.sel_movies = sel_movies
self.sel_frames = sel_frames
self._video_shape = video_shape
self._audio_shape = audio_shape
self._n_classes = n_classes
self._batch_size = self.sel_movies*self.sel_frames
self.ret_label_X = ret_label_X
self.ret_label_y = ret_label_y
self.on_epoch_end()
def on_epoch_end(self):
np.random.shuffle(self.file_list)
def __parse_label_file(self, label_file, affective_type):
label_table = pd.read_table(label_file)
self.label_dict = dict(
zip(
label_table["name"],
label_table["valenceClass"] if affective_type == "val"
else label_table["arousalClass"]
))
def __parse_length_file(self, length_file, sample_rate):
length_table = pd.read_table(length_file)
self.length_dict = dict(
zip(
length_table["name"],
[l//sample_rate for l in length_table["length"]]
))
def __len__(self):
num = len(self.label_dict)
return num // self.sel_movies
def __getitem__(self, i):
batch_file_list = self.file_list[i*self.sel_movies:(i+1)*self.sel_movies]
X, y = self._data_generator(batch_file_list)
return X, y
def _data_generator(self, batch_file_list):
videos = np.zeros((self._batch_size, *self.video_shape), dtype=np.float32)
audios = np.zeros((self._batch_size, *self.audio_shape), dtype=np.float32)
labels = []
for i, filename in enumerate(batch_file_list):
length = self.length_dict[filename]
frame_idx = np.random.choice(length, self.sel_frames)
for j, idx in enumerate(frame_idx):
videos[i*self.sel_frames+j] = io.imread(
Path(self.video_root)/"{}_{}.jpg".format(filename, idx)
)
audios[i*self.sel_frames+j] = np.load(
Path(self.audio_root)/"{}_{}.npy".format(filename, idx)
)[..., None]
labels += [self.label_dict[filename]]*self.sel_frames
if self.video_preproc:
videos = self.video_preproc(videos)
if self.audio_preproc:
audios = self.audio_preproc(audios)
labels = keras.utils.to_categorical(labels, self._n_classes)
X = [videos, audios]
y = []
if self.ret_label_X:
X += [labels]
if self.ret_label_y:
y += [labels]
return X, y
@property
def batch_size(self):
return self._batch_size
@property
def video_shape(self):
return self._video_shape
@property
def audio_shape(self):
return self._audio_shape
@property
def n_classes(self):
return self._n_classes
class BimodalClassifierDataGen(BimodalDenoiseDataGen):
def __init__(self,
training,
denoise_model=None,
**kwargs):
super(BimodalClassifierDataGen, self).__init__(**kwargs)
self.training = training
if self.training:
assert denoise_model is not None, \
"must specify denoise model in training mode!"
self.denoise_model = denoise_model
def __getitem__(self, i):
batch_file_list = self.file_list[i*self.sel_movies:(i+1)*self.sel_movies]
X, _ = self._data_generator(batch_file_list)
#if self.training == True:
# y = self.denoise_model.predict(X)
#else:
y = X[-1]
X = [X[0], X[1]]
return X, y
class DenoiseDataGen(keras.utils.Sequence):
def __init__(self,
label_file,
length_file,
sample_rate,
video_root,
audio_root,
video_shape,
audio_shape,
video_preproc,
audio_preproc,
sel_movies,
sel_frames,
n_classes,
affective_type,
modality,
ret_label_X=True,
ret_label_y=True):
self.__parse_label_file (label_file , affective_type)
self.__parse_length_file(length_file, sample_rate)
self.file_list = list(self.label_dict.keys())
self.video_root = video_root
self.audio_root = audio_root
self.video_preproc = video_preproc
self.audio_preproc = audio_preproc
self.sel_movies = sel_movies
self.sel_frames = sel_frames
self._video_shape = video_shape
self._audio_shape = audio_shape
self._n_classes = n_classes
self._batch_size = self.sel_movies*self.sel_frames
self.ret_label_X = ret_label_X
self.ret_label_y = ret_label_y
self.modality = modality
assert modality in ["visual", "aural"]
self.on_epoch_end()
def on_epoch_end(self):
np.random.shuffle(self.file_list)
def __parse_label_file(self, label_file, affective_type):
label_table = pd.read_table(label_file)
self.label_dict = dict(
zip(
label_table["name"],
label_table["valenceClass"] if affective_type == "val"
else label_table["arousalClass"]
))
def __parse_length_file(self, length_file, sample_rate):
length_table = pd.read_table(length_file)
self.length_dict = dict(
zip(
length_table["name"],
[l//sample_rate for l in length_table["length"]]
))
def __len__(self):
num = len(self.label_dict)
return num // self.sel_movies
def __getitem__(self, i):
batch_file_list = self.file_list[i*self.sel_movies:(i+1)*self.sel_movies]
X, y = self._data_generator(batch_file_list)
return X, y
def _data_generator(self, batch_file_list):
videos = np.zeros((self._batch_size, *self.video_shape), dtype=np.float32)
audios = np.zeros((self._batch_size, *self.audio_shape), dtype=np.float32)
labels = []
for i, filename in enumerate(batch_file_list):
length = self.length_dict[filename]
frame_idx = np.random.choice(length, self.sel_frames)
if self.modality == "visual":
for j, idx in enumerate(frame_idx):
videos[i*self.sel_frames+j] = io.imread(
Path(self.video_root)/"{}_{}.jpg".format(filename, idx)
)
labels += [self.label_dict[filename]]*self.sel_frames
elif self.modality == "aural":
for j, idx in enumerate(frame_idx):
audios[i*self.sel_frames+j] = np.load(
Path(self.audio_root)/"{}_{}.npy".format(filename, idx)
)[..., None]
labels += [self.label_dict[filename]]*self.sel_frames
if self.video_preproc and self.modality == "visual":
videos = self.video_preproc(videos)
if self.audio_preproc and self.modality == "aural":
audios = self.audio_preproc(audios)
labels = keras.utils.to_categorical(labels, self._n_classes)
X = [videos] if self.modality == "visual" else [audios]
y = []
if self.ret_label_X:
X += [labels]
if self.ret_label_y:
y += [labels]
return X, y
@property
def batch_size(self):
return self._batch_size
@property
def video_shape(self):
return self._video_shape
@property
def audio_shape(self):
return self._audio_shape
@property
def n_classes(self):
return self._n_classes
class ClassifierDataGen(DenoiseDataGen):
def __init__(self,
training,
denoise_model=None,
**kwargs):
super(ClassifierDataGen, self).__init__(**kwargs)
self.training = training
if self.training:
assert denoise_model is not None, \
"must specify denoise model in training mode!"
self.denoise_model = denoise_model
def __getitem__(self, i):
batch_file_list = self.file_list[i*self.sel_movies:(i+1)*self.sel_movies]
X, _ = self._data_generator(batch_file_list)
#if self.training == True:
# y = self.denoise_model.predict(X)
#else:
y = X[-1]
X = X[0]
return X, y | data.py | 10,424 | Generate train/validation/test samples for our multimodal
denoise network. Inputs are static images, spectrograms and
corresponding noisy labels. Outputs are noisy labels. In
order to decorrelate training samples, we randonly shuffle
movie sequences, sequentially fetch sel_movie movie clips
from that sequence, then randomly select sel_frames frames
from each moive clip.
if self.training == True: y = self.denoise_model.predict(X)else:if self.training == True: y = self.denoise_model.predict(X)else: | 512 | en | 0.716424 |
# -*- coding: utf-8 -*-
"""layers.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fCQ_zLCcWNzgE99LK9B2cWrql8J3HgBO
"""
# Author : Vedant Shah
# E-mail : vedantshah2012@gmail.com
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class gcn_layer(nn.Module):
def __init__(self, ip_size, op_size):
super(gcn_layer, self).__init__()
self.ip_size = ip_size # number of features for each node in the input
self.op_size = op_size # number of features for each node in the output
self.weights = Parameter(
torch.rand(
self.ip_size, self.op_size, dtype=torch.float32, requires_grad=True
)
)
def compute(self, admat, features):
""" Forward Propagation through the layer according to the spectral rule """
self.D = torch.diag(admat.sum(1), diagonal=0)
self.out = torch.empty(admat.size[0], self.op_size)
self.a_hat = admat + torch.eye(
admat.size[0]
) # Counting the contribution of each node to itself
self.D_inv = self.D ** (-0.5)
self.a_hat = (
self.D_inv * self.a_hat * self.D_inv
) # Normalising according to the spectral rule
self.out = torch.dot(
torch.dot(self.a_hat, features), self.weights
) # Forward propagate trhough the layer
return self.out
| gcn/layers.py | 1,467 | Forward Propagation through the layer according to the spectral rule
layers.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fCQ_zLCcWNzgE99LK9B2cWrql8J3HgBO
-*- coding: utf-8 -*- Author : Vedant Shah E-mail : vedantshah2012@gmail.com number of features for each node in the input number of features for each node in the output Counting the contribution of each node to itself Normalising according to the spectral rule Forward propagate trhough the layer | 531 | en | 0.842585 |
import os
import portalocker
from deep_architect.contrib.communicators.communicator import Communicator
from deep_architect.contrib.communicators.file_utils import (consume_file,
read_file,
write_file)
class FileCommunicator(Communicator):
def __init__(self,
num_procs,
dirname='file_comm',
worker_queue_file='worker_queue',
worker_results_prefix='worker_results_'):
# make directory where communication files are created
try:
os.makedirs(dirname)
except OSError:
pass
# claim a rank for the process
lock = portalocker.Lock(os.path.join(dirname, 'init'),
mode='a+',
flags=portalocker.LOCK_EX)
lock.acquire()
fh = lock.fh
fh.seek(0)
curnum = fh.read()
if len(curnum) is 0:
rank = 0
else:
rank = int(curnum)
if rank >= num_procs:
raise ValueError('Number of processes > the number of workers')
fh.seek(0)
fh.truncate(0)
fh.write(str(rank + 1))
lock.release()
super(FileCommunicator, self).__init__(num_procs - 1, rank)
self.worker_queue_file = os.path.join(dirname, worker_queue_file)
self.worker_results_prefix = os.path.join(dirname,
worker_results_prefix)
self.done = False
def _publish_results_to_master(self, results, evaluation_id,
searcher_eval_token):
write_file(self.worker_results_prefix + str(self.rank),
(results, evaluation_id, searcher_eval_token))
def _receive_architecture_in_worker(self):
while not self.done:
file_data = consume_file(self.worker_queue_file)
# continue looping until there is something in the queue file
if file_data is None:
continue
# if kill signal is given, return None, otherwise return contents of file
vs, evaluation_id, searcher_eval_token, kill = file_data
if kill:
write_file(self.worker_results_prefix + str(self.rank), 'done')
self.done = True
return None
return vs, evaluation_id, searcher_eval_token
return None
def _is_ready_to_publish_architecture(self):
file_data = read_file(self.worker_queue_file)
return file_data is None
def _publish_architecture_to_worker(self, vs, current_evaluation_id,
searcher_eval_token):
write_file(self.worker_queue_file,
(vs, current_evaluation_id, searcher_eval_token, False))
def _receive_results_in_master(self, src):
result = consume_file(self.worker_results_prefix + str(src + 1))
if result == 'done':
self.finished += 1
return None
return result
def _kill_worker(self):
write_file(self.worker_queue_file, (0, 0, 0, True))
| deep_architect/contrib/communicators/file_communicator.py | 3,239 | make directory where communication files are created claim a rank for the process continue looping until there is something in the queue file if kill signal is given, return None, otherwise return contents of file | 213 | en | 0.861692 |
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for ๐ค Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
| src/transformers/trainer_tf.py | 34,717 | TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for ๐ค Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
Train method to train the model.
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
Tensorflow trainer class.
Reset the past mems state at the beginning of the evaluation if necessary. Clean the state at the end of training In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because the dataset is repeated before being batched. It has the effect only when TPU is used which requires explicit tensor shape in order to make the gradient accumulation implementation work. At least one update for each epoch. Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always. TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ? Reset the past mems state at the beginning of each epoch if necessary. Skip past any already trained steps if resuming training Clean the state at the end of training need to make a `PerReplica` objects for ``nb_instances`` | 9,110 | en | 0.788276 |
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
from cfnlint.helpers import RESOURCE_SPECS
class AllowedValue(CloudFormationLintRule):
"""Check if properties have a valid value"""
id = 'E3030'
shortdesc = 'Check if properties have a valid value'
description = 'Check if properties have a valid value in case of an enumator'
source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue'
tags = ['resources', 'property', 'allowed value']
def initialize(self, cfn):
"""Initialize the rule"""
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
self.resource_property_types.append(resource_type_spec)
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_property_types.append(property_type_spec)
def check_value(self, value, path, property_name, **kwargs):
"""Check Value"""
matches = []
allowed_value_specs = kwargs.get('value_specs', {}).get('AllowedValues', {})
if allowed_value_specs:
# Always compare the allowed value as a string, strict typing is not of concern for this rule
if str(value) not in allowed_value_specs:
message = 'You must specify a valid value for {0} ({1}).\nValid values are {2}'
matches.append(RuleMatch(path, message.format(property_name, value, allowed_value_specs)))
return matches
def check(self, cfn, properties, value_specs, property_specs, path):
"""Check itself"""
matches = list()
for p_value, p_path in properties.items_safe(path[:]):
for prop in p_value:
if prop in value_specs:
value = value_specs.get(prop).get('Value', {})
if value:
value_type = value.get('ValueType', '')
property_type = property_specs.get('Properties').get(prop).get('Type')
matches.extend(
cfn.check_value(
p_value, prop, p_path,
check_value=self.check_value,
value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),
cfn=cfn, property_type=property_type, property_name=prop
)
)
return matches
def match_resource_sub_properties(self, properties, property_type, path, cfn):
"""Match for sub properties"""
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
matches.extend(self.check(cfn, properties, specs, property_specs, path))
return matches
def match_resource_properties(self, properties, resource_type, path, cfn):
"""Check CloudFormation Properties"""
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
matches.extend(self.check(cfn, properties, specs, resource_specs, path))
return matches
| src/cfnlint/rules/resources/properties/AllowedValue.py | 4,477 | Check if properties have a valid value
Check itself
Check Value
Initialize the rule
Check CloudFormation Properties
Match for sub properties
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Always compare the allowed value as a string, strict typing is not of concern for this rule | 1,165 | en | 0.868102 |
#!/usr/bin/env python3
import os
import sys
from utils import config, logger, env
from librarian.librarian import Librarian
log = logger.get_log('KodiLibrarian')
kodi = Librarian(config.hosts, update_while_playing=config.update_while_playing)
if env.event == 'download':
if env.calledBy == 'radarr':
log.info('Radarr has downloaded "{}" {}. Initiating update process.'.format(env.movieTitle, env.moviePath))
kodi.updateMovie(env.movieTitle, env.movieDirectory, env.moviePath)
if config.clean_after_update:
kodi.cleanLibrary('movies')
elif env.calledBy == 'sonarr':
log.info('Sonarr has downloaded "{}" {}. Initiating update process.'.format(env.showTitle, env.episodePath))
kodi.updateTVShow(env.episodePath, env.showDirectory)
if config.clean_after_update:
kodi.cleanLibrary('tvshows')
elif env.calledBy == 'lidarr':
log.info('Lidarr not supported yet!! Aborting.')
elif env.event == 'test':
log.debug('Called with test environment from {}'.format(env.calledBy))
sys.exit(0)
else:
log.critical('Could not find any recognizable environment variables. Aborting.')
| KodiLibrarian.py | 1,175 | !/usr/bin/env python3 | 21 | fr | 0.448822 |
import json
import platform
from django.db.models import Q
from django.http import HttpResponse
from django.http import HttpResponseNotFound
from morango.models import InstanceIDModel
from rest_framework import viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
import kolibri
from .. import error_constants
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.serializers import PublicChannelSerializer
class InfoViewSet(viewsets.ViewSet):
"""
An equivalent endpoint in studio which allows kolibri devices to know
if this device can serve content.
Spec doc: https://docs.google.com/document/d/1XKXQe25sf9Tht6uIXvqb3T40KeY3BLkkexcV08wvR9M/edit#
"""
def list(self, request):
"""Returns metadata information about the device"""
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
info = {
"application": "kolibri",
"kolibri_version": kolibri.__version__,
"instance_id": instance_model.id,
"device_name": instance_model.hostname,
"operating_system": platform.system(),
}
return Response(info)
def _get_channel_list(version, params, identifier=None):
if version == "v1":
return _get_channel_list_v1(params, identifier=identifier)
else:
raise LookupError()
def _get_channel_list_v1(params, identifier=None):
keyword = params.get("keyword", "").strip()
language_id = params.get("language", "").strip()
channels = None
if identifier:
channels = ChannelMetadata.objects.filter(pk=identifier)
else:
channels = ChannelMetadata.objects.all()
if keyword != "":
channels = channels.filter(
Q(name__icontains=keyword) | Q(description__icontains=keyword)
)
if language_id != "":
matching_tree_ids = (
ContentNode.objects.prefetch_related("files")
.filter(
Q(lang__id__icontains=language_id)
| Q(files__lang__id__icontains=language_id)
)
.values_list("tree_id", flat=True)
)
channels = channels.filter(
Q(root__lang__id__icontains=language_id)
| Q(root__tree_id__in=matching_tree_ids)
)
return channels.filter(root__available=True).distinct()
@api_view(["GET"])
def get_public_channel_list(request, version):
""" Endpoint: /public/<version>/channels/?=<query params> """
try:
channel_list = _get_channel_list(version, request.query_params)
except LookupError:
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
return HttpResponse(
json.dumps(PublicChannelSerializer(channel_list, many=True).data),
content_type="application/json",
)
@api_view(["GET"])
def get_public_channel_lookup(request, version, identifier):
""" Endpoint: /public/<version>/channels/lookup/<identifier> """
try:
channel_list = _get_channel_list(
version,
request.query_params,
identifier=identifier.strip().replace("-", ""),
)
except LookupError:
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
if not channel_list.exists():
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
return HttpResponse(
json.dumps(PublicChannelSerializer(channel_list, many=True).data),
content_type="application/json",
)
| kolibri/core/public/api.py | 3,900 | An equivalent endpoint in studio which allows kolibri devices to know
if this device can serve content.
Spec doc: https://docs.google.com/document/d/1XKXQe25sf9Tht6uIXvqb3T40KeY3BLkkexcV08wvR9M/edit#
Endpoint: /public/<version>/channels/?=<query params>
Endpoint: /public/<version>/channels/lookup/<identifier>
Returns metadata information about the device | 358 | en | 0.632436 |
def power(x, y, serialId):
r = x + 10
p = r * y
p += serialId
p *= r
p = (p%1000)//100
return p-5
if __name__ == '__main__':
serialId = 1788
# serialId = 42
# serialId = 18
cum_sum_square = {}
for i in range(0, 301):
cum_sum_square[(0,i)] = 0
cum_sum_square[(i,0)] = 0
for i in range(1, 301):#row(y)
for j in range(1, 301):#col(x)
# print(j,i)
value = cum_sum_square[(j-1,i-1)]
for k in range(1, j):
value += power(k, i, serialId)
for k in range(1, i):
value += power(j, k, serialId)
cum_sum_square[(j,i)] = value + power(j, i, serialId)
largest_v = -1000000000
largest_cord = None
largest_s = 0
for k in range(1, 301):
for i in range(1, 301-k+1):
for j in range(1, 301-k+1):
v = cum_sum_square[(j+k-1,i+k-1)] + cum_sum_square[(j-1,i-1)] - cum_sum_square[(j+k-1,i-1)] - cum_sum_square[(j-1,i+k-1)]
if v>largest_v:
largest_v = v
largest_cord = (j,i)
largest_s = k
print(largest_cord, largest_v, largest_s) | 11/2.py | 1,207 | serialId = 42 serialId = 18row(y)col(x) print(j,i) | 50 | en | 0.280733 |
"""Tests for 2d flow around a cylinder with a conforming mesh and rans3p"""
from builtins import range
from builtins import object
from proteus.iproteus import *
from proteus import Comm
from proteus import Context
import tables
import importlib
comm = Comm.get()
Profiling.logLevel = 7
Profiling.verbose = False
import numpy as np
class Test_HotStart_rans3p(object):
@classmethod
def setup_class(cls):
cls._scriptdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0,cls._scriptdir)
@classmethod
def teardown_class(cls):
sys.path.remove(cls._scriptdir)
pass
def setup_method(self, method):
"""Initialize the test problem. """
self.aux_names = []
def teardown_method(self, method):
pass
def test_hotstart_p1(self):
self.compare_name = "T01P1_hotstart"
self.example_setting("T=0.1 vspaceOrder=1 onlySaveFinalSolution=True",h5_filename="solution_p1")
self.example_setting("T=0.1 vspaceOrder=1 onlySaveFinalSolution=True isHotStart=True", h5_filename="solution_p1", check_result=True, isHotstart=True,hotstart_t=0.1)
def test_hotstart_p2(self):
self.compare_name = "T01P2_hotstart"
self.example_setting("T=0.1 vspaceOrder=2 onlySaveFinalSolution=True",h5_filename="solution_p2")
self.example_setting("T=0.1 vspaceOrder=2 onlySaveFinalSolution=True isHotStart=True", h5_filename="solution_p2", check_result=True, isHotstart=True,hotstart_t=0.1)
def example_setting(self, pre_setting, h5_filename, check_result=False, isHotstart=False, hotstart_t=0.0):
Context.contextOptionsString = pre_setting
from . import NS_hotstart_so as my_so
reload(my_so)
# defined in iproteus
opts.profile = False
opts.gatherArchive = True
opts.hotStart = isHotstart
opts.hotStartTime = hotstart_t
pList=[]
nList=[]
sList=[]
for (pModule,nModule) in my_so.pnList:
pList.append(
importlib.import_module("."+pModule,
"proteus.tests.HotStart_3P"))
nList.append(
importlib.import_module("."+nModule,
"proteus.tests.HotStart_3P"))
if pList[-1].name == None:
pList[-1].name = pModule
reload(pList[-1]) # Serious error
reload(nList[-1])
if my_so.sList == []:
for i in range(len(my_so.pnList)):
s = default_s
sList.append(s)
else:
sList = my_so.sList
my_so.name = h5_filename#"_hotstart_"+self.compare_name #save data with different filename
# NUMERICAL SOLUTION #
ns = proteus.NumericalSolution.NS_base(my_so,
pList,
nList,
sList,
opts)
self.aux_names.append(ns.modelList[0].name)
ns.calculateSolution(my_so.name)
if check_result:
# COMPARE VS SAVED FILES #
expected_path = 'comparison_files/' + self.compare_name + '.h5'
with tables.open_file(os.path.join(self._scriptdir, expected_path)) as expected, \
tables.open_file( my_so.name + '.h5') as actual:
assert np.allclose(expected.root.u_t2,
actual.root.u_t2,
atol=1e-10)
| proteus/tests/HotStart_3P/test_HotStart_rans3p.py | 3,596 | Initialize the test problem.
Tests for 2d flow around a cylinder with a conforming mesh and rans3p
defined in iproteus Serious error"_hotstart_"+self.compare_name save data with different filename NUMERICAL SOLUTION COMPARE VS SAVED FILES | 242 | en | 0.714572 |
import json
import hashlib
import os
import pickle
import re
import shutil
class Block:
def __init__(self, numberBlock, data, previousHash, idHash):
self._idBlock = numberBlock
self._data = data
self._previousHash = previousHash
self._idHash = idHash
self._checker = True
def getIdBlock(self):
return self._idBlock
def getData(self):
return self._data
def getPreviousHash(self):
return self._previousHash
def getIdHash(self):
return self._idHash
def getChecker(self):
return self._checker
def setData(self, data):
self._data = data
def setIdHash(self, idHash):
self._idHash = idHash
def setChecker(self, boolInfo):
self._checker = boolInfo
def getBlock(self):
return [self._idBlock, self._data, self._previousHash, self._idHash]
def getInfoGraph(self):
info = "Bloque: " + str(self._idBlock) + "\\nData: " + str(self._data) + "\\nHash Bloque: " + str(self._idHash)\
+ "\\nHash Ant.: " + str(self._previousHash)
return info
def verifyBlock(self, hashAnteriorBA):
if hashAnteriorBA == self._previousHash:
return True
# self._checker = False
return False
class Blockchain:
def __init__(self):
self.idChain = 1
self.previous = 0
self.blocks_list = []
self.firstHash = ""
self.checkerChain = True
def generate_hash(self, data):
pattern = r'[0-9a-zA-Z]+'
objectStr = pickle.dumps(data)
while True:
id_hash = hashlib.sha256(objectStr).hexdigest()
if re.match(pattern, id_hash):
return id_hash
def verifyFirstBlock(self, hashActual):
if self.firstHash == hashActual:
return True
return False
def insertBlock(self, tupla, nameJson):
id_hash = self.generate_hash(tupla)
newBlock = Block(self.idChain, tupla, self.previous, id_hash)
self.blocks_list.append(newBlock)
file = self.load_json(nameJson)
file.write(json.dumps([j.getBlock() for j in self.blocks_list]))
file.close()
# only for the first
if self.idChain == 1:
self.firstHash = id_hash
self.idChain += 1
self.previous = id_hash
def graphBlockchain(self, nombreImagen):
graph = 'digraph G{\n'
graph += 'rankdir=LR;\n'
graph += "node[shape = \"box\"]\n"
graph += self.__graficar()
graph += '}'
direccion = self.pathImageGraph()
file = open(f"{direccion}\\{nombreImagen}.dot", "w")
file.write(graph)
file.close()
os.system(f'dot -Tpng {direccion}\\{nombreImagen}.dot -o {direccion}\\{nombreImagen}.png')
def __graficar(self):
graph = ""
bandera = True
for i in range(len(self.blocks_list)):
info = self.blocks_list[i].getInfoGraph()
nodo = 'node' + str(self.blocks_list[i].getIdBlock())
color = "green"
# If is not the first, verify the previous hash
if not (i == 0):
hashAnterior = self.blocks_list[i-1].getIdHash()
brokeChain = self.blocks_list[i].verifyBlock(str(hashAnterior))
# If is the first, verify the actual hash, because the first always has previous in 0
else:
hashActual = self.blocks_list[i].getIdHash()
brokeChain = self.verifyFirstBlock(hashActual)
if not brokeChain:
self.checkerChain = False
bandera = False
if bandera is False:
color = "red"
# If is not the last to put the next pointer
if not (i == (len(self.blocks_list) - 1)):
nextId = self.blocks_list[i + 1].getIdBlock()
nextNodo = 'node' + str(nextId)
graph += nodo + f'[label="{info}", color="{color}", penwidth=3]\n'
graph += nodo + '->' + nextNodo + '\n'
# If is the Last not put the next pointer
else:
graph += nodo + f'[label="{info}", color="{color}", penwidth=3]\n'
# If is not the First to the Back pointer
if not (i == 0):
nodoAnterior = "node" + str(self.blocks_list[i-1].getIdBlock())
if color == "green":
graph += nodo + '->' + nodoAnterior + "\n"
graph += nodoAnterior + f"[color={color}]"
return graph
def updateBlock(self, oldTuple, newTuple, nameJson):
# Cambiando valores de la lista y generando nuevo hash
file = open(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".json", "r")
JSblock_list = json.loads(file.read())
file.close()
newHash = self.generate_hash(newTuple)
# Recorriendo y actualizando JSON
for blockJS in JSblock_list:
if oldTuple == blockJS[1]:
blockJS[1] = newTuple
blockJS[3] = newHash
# recorriendo y actualizando Block list
for block in self.blocks_list:
if oldTuple == block.getData():
block.setData(newTuple)
block.setIdHash(newHash)
file = open(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".json", "w+")
file.write(json.dumps(JSblock_list))
file.close()
# ------------------------------------------------------- FILES ----------------------------------------------------
def load_json(self, nombre):
if os.path.isdir(os.getcwd() + "\\DataJsonBC"):
file = open(os.getcwd() + "\\DataJsonBC\\" + nombre + ".json", "+w")
return file
os.makedirs(os.getcwd() + "\\DataJsonBC")
file = open(os.getcwd() + "\\DataJsonBC\\" + nombre + ".json", "+w")
return file
def pathImageGraph(self):
if not os.path.isdir(os.getcwd() + "\\ImageBlockChain"):
os.makedirs(os.getcwd() + "\\ImageBlockChain")
direccion = os.getcwd() + "\\ImageBlockChain"
return direccion
def removeFilesBlock(self, nameJson):
if os.path.isdir(os.getcwd() + "\\DataJsonBC"):
if os.path.isfile(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".json"):
os.remove(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".json")
# os.remove(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".dot")
| storage/fase2/team13/Blockchain.py | 6,499 | self._checker = False only for the first If is not the first, verify the previous hash If is the first, verify the actual hash, because the first always has previous in 0 If is not the last to put the next pointer If is the Last not put the next pointer If is not the First to the Back pointer Cambiando valores de la lista y generando nuevo hash Recorriendo y actualizando JSON recorriendo y actualizando Block list ------------------------------------------------------- FILES ---------------------------------------------------- os.remove(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".dot") | 593 | en | 0.445422 |
from d2lbook2 import notebook
from d2lbook2 import rst
import unittest
import nbconvert
_markdown_src = r'''
# Test
:label:`test`
first para
python is good
another para
This is :eqref:`sec_1`
```python2
1+2+3
```
python3 is better
- here
- haha
```{.input .python}
1+2+3
```
```{.input .python}
#@tab python2
1+2+3
```
```bash
````
aa
````
```
## Section 2
:label:`sec_2`
```eval_rst
.. only:: html
Table of Contents
-----------------
```
```toc
:numbered:
:maxdepth: 2
install
user/index
develop/index
```

:width:`400px`
$x=1$, :numref:`sec_2`
'''
class TestRst(unittest.TestCase):
# TODO(mli) add some asserts
def test_convert_notebook(self):
nb = notebook.read_markdown(_markdown_src)
body, _ = rst.convert_notebook(nb, {})
lines = body.split('\n')
for l in lines:
if l.startswith(':math:`x=1`'):
self.assertEqual(l, ':math:`x=1`, :numref:`sec_2`')
| d2lbook2/rst_test.py | 1,003 | TODO(mli) add some asserts | 26 | pt | 0.157909 |
from .default import DefaultAttackEval
from ..classifier import Classifier
from ..attacker import Attacker
import json
from tqdm import tqdm
class InvokeLimitException(Exception):
pass
class InvokeLimitClassifierWrapper(Classifier):
def __init__(self, clsf, invoke_limit):
self.__invoke_limit = invoke_limit
self.__clsf = clsf
self.__brk = False
self.__invoke = 0
def clear(self):
self.__invoke = 0
def test(self, limit=True):
self.__brk = limit
def get_invoke(self):
return self.__invoke
def get_pred(self, input_, data):
if self.__brk and self.__invoke >= self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_pred(input_, data)
def get_prob(self, input_, data):
if self.__brk and self.__invoke >= self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_prob(input_, data)
def get_grad(self, input_, labels, data):
if self.__brk and self.__invoke > self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_grad(input_, labels, data)
class InvokeLimitAttackerWrapper(Attacker):
def __init__(self, attacker, clsf):
self.__attacker = attacker
self.__clsf = clsf
self.__exceed = False
def __call__(self, *args, **kwargs):
self.__clsf.test()
self.__clsf.clear()
self.__exceed = False
try:
ret = self.__attacker(*args, **kwargs)
except InvokeLimitException:
ret = None
self.__exceed = True
self.__clsf.test(limit=False)
return ret
def exceed(self):
return self.__exceed
class InvokeLimitedAttackEval(DefaultAttackEval):
"""
Evaluate attackers and classifiers with invoke limitation.
"""
def __init__(self, attacker, classifier, invoke_limit=100,
average_invoke=False, **kwargs):
"""
:param Attacker attacker: The attacker you use.
:param Classifier classifier: The classifier you want to attack.
:param int invoke_limit: Limitation of invoke for each instance.
:param bool average_invoke: If true, returns "Avg. Victim Model Queries".
:param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` for detail.
"""
super().__init__(attacker, classifier, **kwargs)
# wrap classifier, attacker after super().__init__
self.classifier = InvokeLimitClassifierWrapper(self.classifier, invoke_limit)
self.attacker = InvokeLimitAttackerWrapper(self.attacker, self.classifier)
# keep a private version
self.__attacker = self.attacker
self.__classifier = self.classifier
self.__average_invoke = average_invoke
def measure(self, sentA, sentB):
info = super().measure(sentA, sentB)
if self.__attacker.exceed():
info["Query Exceeded"] = True
else:
info["Query Exceeded"] = False
# only records succeed attacks
if info["Succeed"] and self.__average_invoke:
info["Queries"] = self.__classifier.get_invoke()
return info
def update(self, info):
info = super().update(info)
if "Queries" in info:
if "invoke" not in self.__result:
self.__result["invoke"] = 0
self.__result["invoke"] += info["Queries"]
if info["Query Exceeded"]:
if "out_of_invoke" not in self.__result:
self.__result["out_of_invoke"] = 0
self.__result["out_of_invoke"] += 1
return info
def clear(self):
super().clear()
self.__result = {}
def get_result(self):
ret = super().get_result()
if self.__average_invoke and "invoke" in self.__result:
ret["Avg. Victim Model Queries"] = self.__result["invoke"] / ret["Successful Instances"]
return ret
| OpenAttack/attack_evals/invoke_limit_eval.py | 4,148 | Evaluate attackers and classifiers with invoke limitation.
:param Attacker attacker: The attacker you use.
:param Classifier classifier: The classifier you want to attack.
:param int invoke_limit: Limitation of invoke for each instance.
:param bool average_invoke: If true, returns "Avg. Victim Model Queries".
:param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` for detail.
wrap classifier, attacker after super().__init__ keep a private version only records succeed attacks | 493 | en | 0.470667 |
"""
Common utilities for the library
"""
import shutil
import sys
import os
import logging
from aws_lambda_builders.architecture import X86_64, ARM64
LOG = logging.getLogger(__name__)
def copytree(source, destination, ignore=None, include=None):
"""
Similar to shutil.copytree except that it removes the limitation that the destination directory should
be present.
:type source: str
:param source:
Path to the source folder to copy
:type destination: str
:param destination:
Path to destination folder
:type ignore: function
:param ignore:
A function that returns a set of file names to ignore, given a list of available file names. Similar to the
``ignore`` property of ``shutils.copytree`` method
:type include: Callable[[str], bool]
:param include:
A function that will decide whether a file should be copied or skipped it. It accepts file name as parameter
and return True or False. Returning True will continue copy operation, returning False will skip copy operation
for that file
"""
if not os.path.exists(source):
LOG.warning("Skipping copy operation since source %s does not exist", source)
return
if not os.path.exists(destination):
LOG.debug("Creating target folders at %s", destination)
os.makedirs(destination)
try:
# Let's try to copy the directory metadata from source to destination
LOG.debug("Copying directory metadata from source (%s) to destination (%s)", source, destination)
shutil.copystat(source, destination)
except OSError as ex:
# Can't copy file access times in Windows
LOG.debug("Unable to copy file access times from %s to %s", source, destination, exc_info=ex)
names = os.listdir(source)
if ignore is not None:
ignored_names = ignore(source, names)
else:
ignored_names = set()
for name in names:
# Skip ignored names
if name in ignored_names:
LOG.debug("File (%s) is in ignored set, skipping it", name)
continue
new_source = os.path.join(source, name)
new_destination = os.path.join(destination, name)
if include and not os.path.isdir(new_source) and not include(name):
LOG.debug("File (%s) doesn't satisfy the include rule, skipping it", name)
continue
if os.path.isdir(new_source):
copytree(new_source, new_destination, ignore=ignore, include=include)
else:
LOG.debug("Copying source file (%s) to destination (%s)", new_source, new_destination)
shutil.copy2(new_source, new_destination)
# NOTE: The below function is copied from Python source code and modified
# slightly to return a list of paths that match a given command
# instead of returning just the first match
# The function "which" at aws_lambda_builders/utils.py was copied from https://github.com/python/cpython/blob/3.7/Lib/shutil.py
# SPDX-License-Identifier: Python-2.0
# Copyright 2019 by the Python Software Foundation
def which(cmd, mode=os.F_OK | os.X_OK, executable_search_paths=None): # pragma: no cover
"""Given a command, mode, and executable search paths list, return the paths which
conforms to the given mode on the PATH with the prepended additional search paths,
or None if there is no such file.
`mode` defaults to os.F_OK | os.X_OK. the default search `path` defaults
to the result of os.environ.get("PATH")
Note: This function was backported from the Python 3 source code.
:type cmd: str
:param cmd:
Executable to be looked up in PATH.
:type mode: str
:param mode:
Modes of access for the executable.
:type executable_search_paths: list
:param executable_search_paths:
List of paths to look for `cmd` in preference order.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if executable_search_paths:
path = executable_search_paths + path
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
paths = []
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
paths.append(name)
return paths
def get_goarch(architecture):
"""
Parameters
----------
architecture : str
name of the type of architecture
Returns
-------
str
returns a valid GO Architecture value
"""
return "arm64" if architecture == ARM64 else "amd64"
| aws_lambda_builders/utils.py | 6,232 | Similar to shutil.copytree except that it removes the limitation that the destination directory should
be present.
:type source: str
:param source:
Path to the source folder to copy
:type destination: str
:param destination:
Path to destination folder
:type ignore: function
:param ignore:
A function that returns a set of file names to ignore, given a list of available file names. Similar to the
``ignore`` property of ``shutils.copytree`` method
:type include: Callable[[str], bool]
:param include:
A function that will decide whether a file should be copied or skipped it. It accepts file name as parameter
and return True or False. Returning True will continue copy operation, returning False will skip copy operation
for that file
Parameters
----------
architecture : str
name of the type of architecture
Returns
-------
str
returns a valid GO Architecture value
Given a command, mode, and executable search paths list, return the paths which
conforms to the given mode on the PATH with the prepended additional search paths,
or None if there is no such file.
`mode` defaults to os.F_OK | os.X_OK. the default search `path` defaults
to the result of os.environ.get("PATH")
Note: This function was backported from the Python 3 source code.
:type cmd: str
:param cmd:
Executable to be looked up in PATH.
:type mode: str
:param mode:
Modes of access for the executable.
:type executable_search_paths: list
:param executable_search_paths:
List of paths to look for `cmd` in preference order.
Common utilities for the library
Let's try to copy the directory metadata from source to destination Can't copy file access times in Windows Skip ignored names NOTE: The below function is copied from Python source code and modified slightly to return a list of paths that match a given command instead of returning just the first match The function "which" at aws_lambda_builders/utils.py was copied from https://github.com/python/cpython/blob/3.7/Lib/shutil.py SPDX-License-Identifier: Python-2.0 Copyright 2019 by the Python Software Foundation pragma: no cover Check that a given file can be accessed with the correct mode. Additionally check that `file` is not a directory, as on Windows directories pass the os.access check. If we're given a path with a directory part, look it up directly rather than referring to PATH directories. This includes checking relative to the current directory, e.g. ./script The current directory takes precedence on Windows. PATHEXT is necessary to check on Windows. See if the given file matches any of the expected path extensions. This will allow us to short circuit when given "python.exe". If it does match, only test that one, otherwise we have to try others. On other platforms you don't have things like PATHEXT to tell you what file suffixes are executable, so just pass on cmd as-is. | 2,882 | en | 0.827777 |
# -*- coding: utf-8 -*-
# # How long does a Computron take?
#
# - [build model of computron\-to\-wallclock relationship ยท Issue \#3459 ยท Agoric/agoric\-sdk](https://github.com/Agoric/agoric-sdk/issues/3459)
# ## Preface: Python Data Tools
#
# See also [shell.nix](shell.nix).
# +
import pandas as pd
import numpy as np
import sqlalchemy as sqla
import matplotlib.cm as cm
import dask
import dask.dataframe as dd
import dask.bag as db
dict(pandas=pd.__version__,
numpy=np.__version__,
sqlalchemy=sqla.__version__,
dask=dask.__version__)
# -
# ### Notebook / Scripting Authority
#
# As a nod to OCap discipline, we avoid ambient authority unless we're in a `TOP`-level scripting or notebook context.
TOP = __name__ == '__main__'
# Logging is a bit of an exception to OCap discipline, as is stderr.
# +
import logging
from sys import stderr
logging.basicConfig(level=logging.INFO, stream=stderr,
format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger(__name__)
if TOP:
log.info('notebook start')
# -
# ### Dask Parallel Scheduler UI
# +
from dask.distributed import Client, LocalCluster
if TOP:
cluster = LocalCluster(n_workers=8)
client = Client(cluster)
TOP and client
# -
# ## Result Store
# +
db4_uri = 'sqlite:///slog4.db'
if TOP:
db4 = sqla.create_engine(db4_uri)
# -
# ## SLog files
#
# [rclone support for Google drive](https://rclone.org/drive/)
#
# > This contains 564GB of data from 117 participants, spread across 172 slogfiles ...
#
# ```
# [nix-shell:~/t4]$ rclone sync --progress 'Engineering:/2021-07-04 testnet phase4-stress data/validator slogfiles' ./slogfiles/
# Transferred: 78.633G / 78.633 GBytes, 100%, 101.302 MBytes/s, ETA 0s
# Checks: 5 / 5, 100%
# Transferred: 182 / 182, 100%
# Elapsed time: 13m16.0s
# ```
#
# +
import importlib
import slogdata
importlib.reload(slogdata)
from slogdata import SlogAccess, CLI, show_times
if TOP:
def _dir(path):
import pathlib
return pathlib.Path(path)
def _cli(bin):
from subprocess import run, Popen
return CLI(bin, run, Popen, debug=True)
_sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),
_cli('/home/customer/projects/gztool/gztool'))
TOP and show_times(_sa4.get_records('pathrocknetwork/chain-15.pathrocknetwork.slog.gz', 7721, 2))
# -
_bySize = _sa4.files_by_size()
_bySize
_bySize[_bySize.parent == 'KingSuper']
TOP and _bySize[::5].set_index('name')[['st_size']].plot.barh(
title='slogfile sizes (sample)',
figsize=(10, 8));
# ### random access with `gztool`
#
# [gztool](https://github.com/circulosmeos/gztool) `a03c5b4fd5b3` Jul 13 2021.
#
#
# ```
# ~/projects/gztool/gztool -C -e */*.slog.gz
# ...
# ERROR: Compressed data error in 'atlantean/atlantean-agorictest16-chain.slog.gz'.
# ...
# Index file 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi' already exists and will be used.
# Processing 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gz' ...
# Processing index to 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi'...
#
# 172 files processed
# 1 files processed with errors!
# ```
# +
# count lines on all slogfiles in parallel
# TODO: if it's already in the DB, don't compute it again.
if TOP:
_withLines = _bySize.assign(
lines=db.from_sequence(_bySize.values).map(
lambda v: _sa4.line_count(*v[1:3])).compute())
TOP and _withLines
# -
_withLines.to_sql('file_meta', db4, index=False, if_exists='replace')
# !sqlite3 slog4.db '.header on' '.mode column' 'select * from file_meta limit 3'
_withLines = pd.read_sql_table('file_meta', db4)
# +
def file_chart(slogdf, sample=5, **plotkw):
df = slogdf[['name', 'st_size', 'lines']].copy()
df['b64'] = df.st_size / 64
df.drop('st_size', axis=1, inplace=True)
df.set_index('name')[::sample].plot.barh(**plotkw)
TOP and file_chart(_withLines, title='slogfile sizes (sample)', figsize=(10, 8))
# -
# ## slogfile basics
pd.read_sql("""
select st_size, lines
from file_meta
order by st_size desc
""", db4).describe()
# ## Runs, Blocks, and Deliveries
#
# > split each slogfile into runs (each beginning with an import-kernel event)
# +
def partition_lines(lines, step=1000000):
"""Note: line numbers are **1-based**
"""
lo = pd.DataFrame.from_records([
dict(start=lo, qty=min(lines + 1 - lo, step), lines=lines)
for lo in range(1, lines + 1, step)])
return lo
partition_lines(_withLines.lines.iloc[-1])
# +
#client.restart()
# +
# # !sqlite3 slog4.db 'drop table run'
# +
def provide_table(engine, table, todo, chunksize=None, index=True):
if sqla.inspect(engine).has_table(table):
return pd.read_sql_table(table, engine, chunksize=chunksize)
df = todo()
df.to_sql(table, engine, index=index)
return df
def runs_todo(withLines):
runs = dd.from_delayed([
dask.delayed(_sa4.provide_runs)(f.parent, f['name'], part.start, part.qty)
for fid, f in withLines.iterrows()
for _, part in partition_lines(f.lines).iterrows()
]).compute().sort_values(['file_id', 'line'])
withNames = pd.merge(runs, withLines[['file_id', 'parent', 'name', 'st_size', 'lines']],
on='file_id')
# Compute end times
byFile = withNames.groupby('file_id')
runs = pd.concat([
withNames,
byFile.apply(lambda g: pd.DataFrame(dict(time_end=g.time.shift(-1)))),
byFile.apply(lambda g: pd.DataFrame(dict(line_end=g.line.shift(-1)))),
], axis=1)
runs.line_end = np.where(runs.line_end.isnull(), runs.lines, runs.line_end)
return runs.sort_values(['st_size', 'file_id', 'line']).reset_index(drop=True)
_runs = provide_table(db4, 'run', lambda: runs_todo(_withLines))
# -
# !sqlite3 slog4.db '.schema run'
show_times(_runs, ['time', 'time_end'])[['st_size', 'line', 'line_end', 'parent', 'file_id', 'time', 'time_end']]
# ### runs per slogfile
df = _runs.groupby('file_id')[['line']].count()
df.describe()
# +
df = pd.read_sql("""
select file_id, count(*) runs, name, st_size, lines
from run r
-- join file_id s on s."index" = r.slogfile
group by file_id
order by 2
""", db4)
df.set_index('name')[['runs']][::5].plot.barh(
log=True,
title='slogfile runs (sample)',
figsize=(10, 8));
# -
# ## agorictest-16 genesis: `2021-07-01 19:00:00`
gen16 = show_times(pd.DataFrame(dict(blockHeight=64628, blockTime=[1625166000], ts=1625166000)), ['blockTime'])
gen16
# ## Block end start / finish events
# +
import importlib
import slogdata
from slogdata import SlogAccess
importlib.reload(slogdata)
_sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),
_cli('/home/customer/projects/gztool/gztool'))
show_times(
_sa4.provide_blocks('ChainodeTech', 'agorictest-16_chain.slog.gz', 1, 1000000)
)
# -
# ## Separate runs by chain
# +
def first_block(sa, run,
head=5000,
ts=gen16.ts[0]):
log.info('1st block: %s/%s', run.parent, run['name'])
qty = min(int(run.line_end) - run.line + 1, head)
df = sa.get_blocks(f'{run.parent}/{run["name"]}', run.line, qty)[:2]
if not len(df):
return pd.DataFrame.from_records([dict(
blockHeight=-1,
blockTime=-1,
run=run.name,
chain=np.nan)], index=[run.name])
df = df.assign(run=run.name,
chain=16 if df.blockTime[0] >= ts else 15)
return df
show_times(first_block(_sa4, _runs.loc[0]))
# +
def run2chain(sa, runs):
df = runs.apply(lambda run: first_block(sa, run).iloc[0][['blockHeight', 'blockTime', 'chain']],
axis=1)
return df
_r2c = run2chain(_sa4, _runs)
_r2c
# -
_runchain = pd.concat([_runs.drop(columns=['index']), _r2c], axis=1)
_runchain.to_sql('runchain', db4)
_runchain.groupby('chain')[['line']].count()
# !sqlite3 slog4.db '.header on' '.mode column' 'select * from runchain limit 3'
_runchain = pd.read_sql('runchain', db4)
_runchain.groupby('chain')[['line']].count()
_runs['chain'] = _runchain.chain
_runs.groupby('chain')[['file_id', 'lines']].count()
# +
# # !sqlite3 slog4.db 'drop table blockval;'
# +
def blockval_todo(file_meta):
return dd.from_delayed([
dask.delayed(_sa4.provide_blocks)(f.parent, f['name'], part.start, part.qty)
for fid, f in file_meta.iterrows()
for _, part in partition_lines(f.lines).iterrows()
]).compute()
_blockval = provide_table(db4, 'blockval', lambda: blockval_todo(_withLines), index=True)
show_times(_blockval)
# -
# !sqlite3 slog4.db '.schema blockval'
pd.read_sql("""
select file_id, max(blockHeight)
from blockval
where blockTime >= 1625166000
group by file_id
order by 2 desc
""", db4)
# ### Consensus Block-to-Block Time
# +
# db4.execute("""drop table if exists block""")
# -
db4.execute("""
create table block as
select distinct
case when blockTime >= 1625166000 then 16 else 15 end chain
, blockHeight, blockTime
from blockval
order by blockTime
""")
pd.read_sql("""
select * from block limit 10
""", db4)
# ### What is the range of blocks in `agorictest-16`?
pd.read_sql("""
select lo, n, lo + n - 1, hi from (
select min(blockHeight) lo, max(blockHeight) hi, count(distinct blockHeight) n
from block
where chain = 16
)
""", db4)
# +
blk16 = pd.read_sql("""
select blockHeight, blockTime
from block
where chain = 16
""", db4, index_col='blockHeight')
show_times(blk16).describe(datetime_is_numeric=True)
# -
b16time = pd.read_sql("""
select * from block
where chain = 16
""", db4, index_col='blockHeight')
b16time['delta'] = b16time.shift(-1).blockTime - b16time.blockTime
b16time[['delta']].describe()
b16time[b16time.index < 90527].delta.max()
b16time[b16time.delta == 120]
b16time[['delta']].plot(
title='agorictest-16 consensus blockTime delta',
ylabel='sec',
figsize=(9, 6));
show_times(b16time, ['blockTime']).set_index('blockTime')[['delta']].plot(
title='agorictest-16 consensus blockTime delta',
ylabel='sec',
figsize=(9, 6));
# histogram of block-to-block time delta for agorictest-16. (_Note the log scale on the y axis._)
b16time[['delta']].hist(bins=20, log=True);
df = show_times(b16time, ['blockTime'])
df[df.blockTime <= '2021-07-02 19:00:00'][['delta']].hist(bins=20, log=True);
df[df.blockTime <= '2021-07-02 19:00:00'][['delta']].describe()
# ### How many validators logged each block in agorictest-16?
df = pd.read_sql("""
select blockHeight, count(distinct file_id) qty
from blockval
where sign = -1
and blockTime >= 1625166000
group by blockHeight
""", db4)
df.head()
df.set_index('blockHeight').plot(title='agorictest-16 validator coverage by block', figsize=(9, 6));
# !sqlite3 slog4.db '.schema run'
# +
# db4.execute('drop table if exists blockrun16')
db4.execute("""
create table blockrun16 as
with b as (
select *
from blockval
where blockTime >= 1625166000
)
select file_id
, (select r."index"
from run r
where r.file_id = b.file_id and r.line <= b.line and b.line < r.line_end) run
, b.line, b.time
, b.sign
, blockHeight, blockTime
from b
""")
df = pd.read_sql("""
select * from blockrun16
""", db4)
df.tail()
# -
x = df.groupby('blockHeight')[['run']].count()
x.plot();
x['blockHeight'].sort_values('max').reset_index(drop=True).plot();
# ## Slow Blocks
df = show_times(b16time, ['blockTime'])
df[(df.blockTime <= '2021-07-02 19:00:00') &
(df.delta >= 30)]
# Which runs include block 72712, which took 31 sec?
b33 = pd.read_sql("""
select lo.file_id, lo.run, lo.line, hi.line - lo.line + 1 range, lo.blockHeight
from blockrun16 lo
join blockrun16 hi on hi.run = lo.run and hi.blockHeight = lo.blockHeight
where lo.blockHeight in (72712)
and lo.sign = -1
and hi.sign = 1
""", db4)
b33
# ## Correlating block start with block end
_blockrun16 = df = pd.read_sql_table('blockrun16', db4)
df.tail()
lo = df[df.sign == -1]
hi = df.shift(-1)
hi = hi[hi.sign == 1]
dur = hi.time - lo.time
# show_times(df, ['time', 'time_end'])
lo['dur'] = dur
lo['s_hi'] = hi.file_id
lo['l_hi'] = hi.line
lo['t_hi'] = hi.time
dur = lo[lo.file_id == lo.s_hi]
show_times(dur, ['time', 'blockTime'])
show_times(
dur.sort_values('dur').dropna().tail(),
['time', 'blockTime', 't_hi']
)
dur[dur.dur.abs() <= 120].plot.scatter(x='blockHeight', y='dur')
dur[['blockHeight', 'dur']].describe()
# ## Cranks in a Block
# +
def long_runs_including(runs, blockrun, blockHeight):
runs_matching = blockrun[blockrun.blockHeight == blockHeight].run
runs = runs.assign(length=runs.line_end - runs.line)
runs = runs[runs.index.isin(runs_matching)]
return runs.sort_values('length', ascending=False)
_long16 = long_runs_including(_runs, _blockrun16, 64628)
_long16.head()
# -
show_times(dur[dur.run == _long16.index[0]], ['time', 'blockTime', 't_hi'])
_blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]
# +
def blockrun_records(blockHeight, run, slogAccess, blockrun,
target=None, include=None):
ref = f'{run.parent}/{run["name"]}'
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = block_end.line - block_start.line + 1
df = slogAccess.get_records(f'{run.parent}/{run["name"]}', int(block_start.line), int(length),
target=target, include=include)
return df.assign(file_id=run.file_id)
def get_vats(slogAccess, ref, start, qty):
df = slogAccess.get_records(ref, start, qty,
target='create-vat',
include=['create-vat'])
return df
def vats_in_blockrun(blockHeight, run, slogAccess, blockrun):
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = block_end.line - block_start.line + 1
ref = f'{run.parent}/{run["name"]}'
df = get_vats(slogAccess, ref, int(block_start.line), int(length))
return df.assign(blockHeight=blockHeight, parent=run.parent)
# _sa4.get_records('Nodeasy.com/Nodeasy.com-agorictest15-chain.slog.gz', 1662497, 1671912 - 1662497)
vats_in_blockrun(_blockrun16.iloc[0].blockHeight, _runs.loc[_long16.index[0]],
_sa4, _blockrun16)
# -
vats_in_blockrun(64629, _runs.loc[_long16.index[0]],
_sa4, _blockrun16)
no_deliveries = pd.DataFrame.from_records([
{'time': 1625198620.6265895,
'type': 'deliver-result',
'crankNum': 1291,
'vatID': 'v11',
'deliveryNum': 124,
'kd': object(),
'line': 1673077,
'dr': object(),
'syscalls': 2,
'method': 'inbound',
'compute': 119496.0, # missing compute is possible... from replay.
'dur': 0.1912224292755127,
}]).iloc[:0]
no_deliveries.dtypes
# +
import json
import itertools
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
def block_cranks(records):
deliveries = []
syscalls = 0
deliver = None
for record in records:
ty = record['type']
if ty == 'deliver':
deliver = record
syscalls = 0
elif ty == 'syscall-result':
syscalls += 1
elif ty == 'deliver-result':
if not deliver:
log.warn('no deliver? %s', record)
continue
dur = record['time'] - deliver['time']
method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None
compute = record['dr'][2]['compute'] if type(record['dr'][2]) is type({}) else np.nan
detail = dict(record,
syscalls=syscalls,
kd=deliver['kd'],
method=method,
compute=compute,
dur=dur)
deliveries.append(detail)
if deliveries:
return pd.DataFrame.from_records(deliveries)
else:
return no_deliveries
def get_deliveries(slogAccess, ref, start, qty):
if qty <= 2: # just block start, block end
return no_deliveries
df = slogAccess.get_records(
ref, int(start), int(qty),
target=None, include=['deliver', 'deliver-result', 'syscall-result'])
if len(df) > 0 and 'syscallNum' in df.columns:
for c in ['syscallNum', 'ksr', 'vsr', 'vd']:
df = df.drop(columns=list(set(df.columns) & set(['syscallNum', 'ksr', 'vsr', 'vd'])))
return block_cranks(df.to_dict('records'))
else:
return no_deliveries
_g16 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]
_run1 = _runs.loc[_long16.index[0]]
get_deliveries(_sa4, f'{_run1.parent}/{_run1["name"]}', _g16.iloc[0].line, _g16.iloc[1].line - _g16.iloc[0].line + 1)
# -
df = dur[dur.run == _long16.index[0]].assign(length=dur.l_hi - dur.line + 1)
# df[df.length > 2].head(10)
df[df.dur > 5].head(10)
# +
# https://avi.im/blag/2021/fast-sqlite-inserts/
def run_sql(script, engine):
for stmt in script.strip().split(';\n'):
engine.execute(stmt)
run_sql('''
PRAGMA journal_mode = OFF;
PRAGMA synchronous = 0;
PRAGMA cache_size = 1000000;
PRAGMA locking_mode = NORMAL;
PRAGMA temp_store = MEMORY;
''', db4)
# -
len(dur)
dur.to_sql('blockrun16dur', db4, if_exists='replace', chunksize=25000, index=False)
# +
_br2 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64632)].iloc[:2]
get_deliveries(_sa4, f'{_run1.parent}/{_run1["name"]}',
_br2.iloc[0].line, _br2.iloc[1].line - _br2.iloc[0].line + 1)
# +
# chain_id, vatID, deliveryNum -> blockHeight, kd, compute
import inspect
def provide_deliveries(slogAccess, blockHeight, run, blockrun):
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
if len(br) < 2:
return no_deliveries.assign(file_id=-1, chain=-1, blockHeight=blockHeight, run=run.name)
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = int(block_end.line - block_start.line + 1)
df = slogAccess.provide_data(run.parent, run['name'], int(block_start.line), length,
f'deliveries-{blockHeight}', no_deliveries,
lambda ref, start, qty: get_deliveries(slogAccess, ref, start, qty),
'gzip')
df = df.assign(chain=run.chain, blockHeight=blockHeight, run=run.name)
if df.dtypes['chain'] not in ['int64', 'float64'] or 'vatID' not in df.columns or 'vd' in df.columns:
raise NotImplementedError(f'cols: {df.columns} dtypes: {df.dtypes} block {blockHeight, int(block_start.line)}, run\n{run}')
return df
df = provide_deliveries(_sa4, 66371, _run1, _blockrun16)
show_times(df)
# -
# Computron rate for just this one block?
df.compute.sum() / df.dur.sum()
# test empty
provide_deliveries(_sa4, 64629, _run1, _blockrun16)
_runs.loc[455:456]
# ## Cranks in one long run starting at agorictest-16 genesis
gen16
df = pd.read_sql("""
with lo as (
select *
, time - blockTime delta
from blockrun16
where blockHeight = 64628
and blockTime = 1625166000
and sign = -1
and run is not null
), hi as (
select run, max(blockHeight) hi, max(blockTime) t_hi
from blockrun16
where run is not null
and sign = -1
group by run
), agg as (
select lo.*, hi.hi, hi.t_hi
from lo join hi on lo.run = hi.run
where abs(delta) < 7
order by hi.t_hi desc
)
select agg.*, run.parent, run.name
from agg
join run on agg.run = run."index"
limit 5
""", db4)
show_times(df, ['time', 'blockTime', 't_hi'])
show_times(_runs).loc[445]
# +
import json
def run1_deliveries(con, sa, lo, hi, run, br,
json_cols=['kd', 'dr'],
table='run1'):
if sqla.inspect(con).has_table(table):
lo = pd.read_sql(f'select max(blockHeight) + 1 lo from {table}', con).iloc[0].lo
if_exists = 'append'
else:
if_exists = 'replace'
for blockHeight in range(lo, hi):
df = provide_deliveries(sa, blockHeight, run, br)
if not len(df):
# log.info('block %d: no deliveries', blockHeight)
continue
for col in json_cols:
df[col] = df[col].apply(json.dumps)
log.info('block %d of %d: %s += %d rows', blockHeight, hi, table, len(df))
df.to_sql(table, con, if_exists=if_exists, index=False)
if_exists = 'append'
run1_deliveries(db4, _sa4, 64628, 75000, _runs.loc[445], _blockrun16)
# run1_deliveries(db4, _sa4, 75000, 90530, _runs.loc[445], _blockrun16, table='run1b')
# -
_run1 = df = pd.read_sql('select * from run1 union all select * from run1b', db4)
show_times(_run1.tail(3))
_run1.blockHeight.describe()
_run1[_run1.blockHeight >= 88296 - 2].sort_values('blockHeight').head(30).drop(columns=['kd', 'dr', 'file_id'])
df = _run1[_run1.blockHeight == 88295].sort_values('dur', ascending=False).drop(columns=['kd', 'dr', 'file_id'])
df.head(10)
df[df.dur >= 1]
# TODO: compare `getPayout` here (in 88295) vs something earlier... same computrons? same duration?
#
# e.g. if harden weakset grew, the duration could grow while keeping computrons constant
_run1[_run1.method == 'getPayout'][['compute', 'dur']].describe()
_run1[_run1.method == 'getPayout'].compute.hist()
_run1[(_run1.method == 'getPayout') & (_run1.compute == 31654)].plot.scatter(x='blockHeight', y='dur')
lg = _run1[_run1.blockHeight > 76000]
lg = lg[lg.dur < 1]
lg[(lg.method == 'getPayout') & (lg.compute == 31654)].plot.scatter(x='blockHeight', y='dur')
# Things got slower over time.
#
# Hypothesis: GC didn't happen -> weak set got big -> weakset access time got big
# So computron model should not be based on this range, but rather on pre-loadgen time.
# When looking at comptron / wallclock, we should look at:
#
# - all getCurrentAmount calls
# - within a narrow range of blockHeight
# - that all use the same # of computrons
#
# (as above)
#
b16time[b16time.delta == 224]
_run1[['compute', 'dur']].describe()
# +
def drate(df):
rate = df.compute / (df.syscalls + 1) / df.dur
# rate = df.compute / df.dur
return df.assign(rate=rate)
df = drate(_run1).groupby('method')[['rate']].aggregate(['count', 'mean', 'std', 'max'])
df = df.sort_values(('rate', 'mean'), ascending=False)
df
# -
common = _run1.groupby('method')[['line']].count()
common = common[common.line > 20]
common
drate(_run1[_run1.method.isin(common.index)])[['method', 'rate']].boxplot(by='method', rot=90, figsize=(20, 12))
common.sort_values('line', ascending=False).head()
_run1.blockHeight.describe()
_run1.sort_values('dur', ascending=False)
# This is an always-busy sim, but **TODO** we'd like to look at the arrival pattern that we have.
# +
def sim(df, c_eg, dur_eg, target):
df = df[df.chain == 16]
df['running'] = df.compute.cumsum() # try exp
threshold = target * (c_eg / dur_eg)
log.info('threshold: %s', threshold)
df['sim_blk'] = (df.running / threshold).round()
# df['adj'] = df.sim_blk - df.blockHeight
return df.reset_index(drop=True)
df = _run1.drop(columns=['type', 'kd', 'dr', 'file_id', 'line', 'run'])
# df = df[df.method != 'executeContract']
# df = df[df.method == 'getCurrentAmount'] # getPayout
# df.blockHeight = df.blockHeight - df.blockHeight.iloc[0]
df = sim(df, 48390.0, 0.074363, 5)
df = df[df.sim_blk.notnull()]
df.sim_blk = df.sim_blk.astype('int64')
show_times(df)
# -
pd.read_sql('''
select count(distinct run)
from blockrun16
''', db4)
len(_runs)
# +
def nth_block(sa, blockHeight, run, blockrun,
ts=gen16.ts[0]):
log.info('%d th block: %s/%s', blockHeight, run.parent, run['name'])
br = blockrun[(blockrun.blockHeight == blockHeight) & (blockrun.run == run.name)]
df = provide_deliveries(sa, blockHeight, run, br)
if not len(df):
return df
df = df.assign(run=run.name, chain=run.chain)
return df
m1b1 = pd.concat(
df
for _, run in _runs.iterrows()
for df in [nth_block(_sa4, 80001, run, _blockrun16)]
if len(df)
)
m1b1
# -
m1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]
df = m1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]
df.describe()
# ## Validator speed: 2-4x spread for `getCurrentAmount`
df[['dur']].hist()
# +
# df.groupby('method')[['compute']].describe().loc['executeContract']
# -
df.compute.hist(log=True);
df.dur.hist(log=True);
df[df.dur < .1].dur.hist()
# #### Total delivery duration per block
x = pd.concat([
df.groupby('blockHeight')[['dur']].sum(),
df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim')),
], axis=1)
x.hist(); # log=True);
x.describe()
x.dur.quantile(.9)
xx = df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim'))
xx[xx.dur_sim > 25]
df[df.blockHeight == 88295].sort_values('dur', ascending=False)
df[df.sim_blk == 32607].sort_values('dur', ascending=False)
_run1[_run1.compute == 381240].dur.describe()
_run1[_run1.compute == 381240].plot.scatter(x='blockHeight', y='dur')
# This wasn't a big deal during most of the chain (.25sec 75th percentile).
#
# We could model this within 2x or 3x by ignoring the spike.
# **TODO**: what happened during that spike? is it consensus-observable? kernel-observable?
df = _run1[_run1.compute == 381240]
df[(df.blockHeight >= 88100) & (df.blockHeight < 88400)].plot.scatter(x='blockHeight', y='dur')
df[df.sim_blk == 32607].compute.sum()
df[df.sim_blk == 32607].dur.sum()
df[df.sim_blk == 32607].syscalls.sum()
df.groupby('blockHeight')[['syscalls']].sum().describe()
# #### Total compute per block
x = pd.concat([
df.groupby('blockHeight')[['compute']].sum(),
df.groupby('sim_blk')[['compute']].sum().rename(columns=dict(compute='cmp_sim')),
], axis=1)
x.hist(log=True);
x.describe()
cluster.scale(8)
client.restart()
f'{12:04}'
# +
def pick_chain(ht,
gen=1625166000, hi=16, lo=15):
return np.where(ht > gen, hi, lo)
def run_deliveries(slogs, sa, run):
chain_id = f'agorictest-{run.chain}'
blocks = pd.concat(
pd.read_csv(blockFile)
for blockFile in (slogs / run.parent).glob('*-blocks.csv')
)
blocks = blocks[(blocks.line >= run.line) &
(blocks.line < run.line_end)]
blocks = blocks.assign(run=run.name)
heights = blocks.blockHeight.unique()
log.info('run %s %-3d blocks %.16s %s', run.name, len(heights),
pd.to_datetime(run.time, unit='s'), run['name'])
tot = 0
for blockHeight in heights:
detail = provide_deliveries(sa, blockHeight, run, blocks)
if not len(detail):
continue
tot += len(detail)
yield detail
if not tot:
yield no_deliveries.assign(file_id=-1, chain=-1, blockHeight=-1, run=run.name)
def by_vat(dest, run, detail):
chain_id = f'agorictest-{run.chain}'
run_detail = f'{run.name:04}-{run.parent}-{run.file_id}-{run.line}'
for vatID, g in detail.groupby('vatID'):
try:
(dest / chain_id / vatID).mkdir(parents=True)
except:
pass
vat_dir = dest / chain_id / vatID
f = vat_dir / f'delivery-detail-{run_detail}.csv.gz'
log.info('saving to %s:\n%s', f, g.set_index(['vatID', 'deliveryNum'])[['compute', 'dur']].tail(3))
g.to_csv(f, index=False)
f = vat_dir / f'delivery-summary-{run_detail}.csv.gz'
g[['vatID', 'deliveryNum', 'kd', 'syscalls', 'compute']].to_csv(f, index=False)
return detail.assign(run=run.name).groupby(['run', 'vatID'])[['deliveryNum']].count()
#by_vat(_dir('slogfiles/'), _dir('vat-details/'), _sa4, _runs)
for df in run_deliveries(_dir('slogfiles/'), _sa4, _runs.loc[58]):
print(df)
print(by_vat(_dir('vat-details/'), _runs.loc[58], df))
break
# +
def run_deliveries_todo(sa, slogs, dest, runs):
def do_run(run):
df = pd.concat(
detail
for detail in run_deliveries(slogs, sa, run)
)
return by_vat(dest, run, df)
todo = (
dask.delayed(do_run)(run)
for _, run in runs.iterrows()
)
return todo
per_run = dd.from_delayed(run_deliveries_todo(_sa4, _dir('slogfiles/'), _dir('vat-details/'), _runs))
per_run.compute()
# -
pd.to_datetime(1625213913.1672082, unit='s')
# +
import inspect
from slogdata import show_times
db4.execute('drop table if exists crankrun') #@@
def deliveries_todo(sa, blockrun, runs):
todo = (
dask.delayed(provide_deliveries)(sa, blockHeight, run,
blockrun[(blockrun.run == run.name) &
(blockrun.blockHeight == blockHeight)])
for run_ix, run in runs.iterrows()
for heights in [blockrun[blockrun.run == run_ix].blockHeight.unique()]
for _ in [log.info('run %s %-3d blocks %.16s %s', run_ix, len(heights),
pd.to_datetime(run.time, unit='s'), run['name'])]
for blockHeight in heights
)
log.info('todo: %s', type(todo))
df = dd.from_delayed(todo,
meta=no_deliveries.assign(file_id=1, chain=1, blockHeight=1, run=1))
return df.compute()
# _dr16 = provide_table(
# db4, 'crankrun',
# # 65517
# lambda: deliveries_todo(_sa4, _blockrun16[_blockrun16.blockHeight <= 65000], _runs.loc[200:275]))
_dr16 = deliveries_todo(_sa4, _blockrun16, # [_blockrun16.blockHeight <= 65000]
_runs[_runs.chain == 16])
_dr16
# -
# ## deliveries from batch
_delrun = pd.read_sql('select * from delrun', db4)
_delrun.groupby('chain')[['line']].count()
# ## Are compute meter values consistent?
# +
def compute_meter_consistent(df):
compute_count = df.groupby(['vatID', 'deliveryNum'])[['compute']].nunique()
dups = compute_count[compute_count['compute'] > 1]
return pd.merge(dups.reset_index(),
df[['run', 'vatID', 'deliveryNum', 'compute']],
how='left', suffixes=['_dup', ''],
left_on=['vatID', 'deliveryNum'],
right_on=['vatID', 'deliveryNum'])
# x = compute_meter_consistent(_alld16).compute()
x = compute_meter_consistent(_delrun[_delrun.chain == 16]).sort_values(['vatID', 'deliveryNum']) # .compute()
x
# -
compute_meter_consistent(_delrun[_delrun.chain == 15]).sort_values(['vatID', 'deliveryNum']) # .compute()
# ## Computrons per block
blockdel = _delrun[_delrun.method != 'executeContract']
key = ['chain', 'blockHeight', 'vatID', 'deliveryNum', 'compute']
blockdel = blockdel.sort_values(key).drop_duplicates()
df = blockdel.groupby(['chain', 'blockHeight'])[['deliveryNum']].count().sort_index()
df.plot()
_bkcomp = df = blockdel.groupby(['chain', 'blockHeight'])[['compute']].sum()
df
df.plot()
# +
def type2sign(df):
df['sign'] = np.where(df.type == 'cosmic-swingset-end-block-start', -1, 1)
return df
def byChain(df, gen=gen16.ts[0], hi=16, lo=15):
return df.assign(chain=np.where(df.blockTime >= gen, hi, lo))
return df
def slog_blocks(slogfiles,
pattern='**/*-blocks.csv'):
df = pd.concat(type2sign(pd.read_csv(p)[['type', 'blockHeight', 'blockTime']])
for p in slogfiles.glob(pattern))
df = byChain(df)
key = ['chain', 'blockHeight', 'blockTime']
df = df[key].sort_values(key).drop_duplicates()
return df.reset_index(drop=True)
_blk = slog_blocks(_dir('slogfiles/'))
_blk.tail()
# -
_byChain = _blk.groupby('chain')
df = pd.merge(
_byChain[['blockHeight']].nunique(),
_byChain[['blockHeight']].aggregate(['min', 'max'])['blockHeight'],
left_index=True, right_index=True,
)
df['span'] = df['max'] - df['min'] + 1
df
# +
def blockdur(df):
df = df.set_index(['chain', 'blockHeight'])
df['dur'] = df.shift(-1).blockTime - df.blockTime
return df
_bkdur = blockdur(_blk)
_bkdur
# -
# compute by block with duration
_bkcmpdur = _bkcomp.join(_bkdur, lsuffix='_d', rsuffix='_b')
_bkcmpdur['rate'] = (_bkcmpdur.compute / _bkcmpdur.dur).astype(float)
_bkcmpdur
_bkcmpdur[_bkcmpdur.dur > _bkcmpdur.dur.quantile(0.99)]
df = _bkcmpdur.loc[16]
df[df.dur < 8][['rate']].hist(log=True)
_bkcmpdur[_bkcmpdur.dur < 8][['rate']].describe()
# ## simulation
_delrun.groupby('run')[['line']].count()
_delrun[['crankNum', 'run']].groupby(['crankNum'])[['run']].aggregate(['count']).plot()
# +
def sim(df, percentile):
df = df[df.chain == 16]
df = df[df.method != 'executeContract']
key = ['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']
df = df.groupby(key)[['dur']].aggregate(['count', 'mean', 'median', 'sum'])
return df
df = df[['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']].sort_values(
['blockHeight', 'crankNum', 'vatID', 'deliveryNum']).drop_duplicates()
threshold = df.compute.quantile(percentile)
df['running'] = df.compute.cumsum()
df['sim_block'] = (df.running / threshold).round()
return df.reset_index(drop=True)
df = sim(_run1, .99)
df
# -
df[['blockHeight']].plot()
df.set_index('blockHeight')[['sim_block']].plot()
# ## Compute rate by vat
plt.cm.rainbow[1]
pd.Categorical(_delrun.method.dropna(), ordered=True)
# +
import matplotlib as plt
def cmap_of(df, color,
cmap=plt.cm.get_cmap('hot')):
df = df.loc[:, [color]].fillna('???')
byColor = df.groupby(color).count() #.set_index(color)
byColor['unit'] = range(len(byColor))
byColor.unit = byColor.unit / len(byColor)
byColor['color'] = byColor.unit.apply(cmap)
return byColor.loc[df[color]].color
cmap_of(_delrun, 'method')
# +
def vat_rate(df, vatID):
df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()
df['rate'] = df.compute / df.dur
df = df[df.vatID == vatID]
# df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()
#df.sort_values('dur', ascending=False)
#df
df = df.set_index('deliveryNum').sort_index()
return df
def show_rate(df, vatID, figsize=(8, 9)):
df = vat_rate(df, vatID)
ax = df.plot(subplots=True, figsize=figsize)
def fit_line(df, x, y, color=None, figsize=(9, 6)):
df = df[~df[x].isnull() & ~df[y].isnull()]
cs = np.polyfit(df[x], df[y], 1)
f = np.poly1d(cs)
if color:
color = cmap_of(df, color)
ax1 = df[[x, y]].plot.scatter(x=x, y=y, color=color, figsize=figsize)
df['fit'] = f(df[x])
df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);
# show_rate(start1, 'v10');
# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')
# fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']
# fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')
# len(fastSlog[fastSlog.vatID == 'v10'])
# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)
#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)
fit_line(_delrun[_delrun.chain == 16], 'compute', 'dur', color='method')
# -
_r = _delrun[['compute', 'dur', 'method']].assign(rate=_delrun.compute / _delrun.dur)
_r.groupby('method')[['rate']].describe().sort_values(('rate', 'mean'))
df.sort_values(('compute', 'mean'))
df = fastSlog[fastSlog.vatID == 'v10']
df['rate'] = df.compute / df.dur
df[['deliveryNum', 'dur', 'compute', 'rate']].set_index('deliveryNum').plot(subplots=True)
df.rate.describe()
# ### exclude dynamic vat creation
fastSlog.groupby('method')[['compute']].mean().plot.barh(log=True, figsize=(12, 10))
noContract = df =fastSlog[fastSlog.method != 'executeContract'].copy()
df['rate'] = df.compute / df.dur
df[['dur', 'compute', 'rate']].plot(subplots=True)
fit_line(noContract, 'compute', 'dur')
fit_line(fastSlog, 'compute', 'dur')
# ## Add syscalls to the model
df = noContract
cs = np.polyfit(df[['compute', 'syscalls']], df['dur'], 1)
df = _dr16.assign(chain_id=16)
df = df[['chain_id', 'vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']].drop_duplicates()
df = df.set_index(['chain_id', 'vatID', 'deliveryNum']).sort_index()
df[df.index.duplicated()]
df
df.loc[16].loc['v1'].loc[0]
_dr16.query('(deliveryNum == 0) & (vatID == "v1")').groupby('compute')[['line']].count()
pd.merge(_dr16,
df[df.index.duplicated()].reset_index()[['vatID', 'deliveryNum']],
left_on=['vatID', 'deliveryNum'], right_on=['vatID', 'deliveryNum']
)[['vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']]
# _dr16.assign(chain_id=16).set_index(['chain_id', 'vatID', 'deliveryNum'])
dall = pd.concat(
pd.read_csv(f)
for f in _dir('slogfiles/').glob('**/*-deliveries-*.csv.gz')
)
dall
# +
def load_deliveries(files, con, table):
if_exists = 'replace'
for file in files:
df = pd.read_csv(file)
df.to_sql(table, con, if_exists=if_exists)
if_exists = 'append'
log.info('loaded %d records from %s', len(df), file)
load_deliveries(
_dir('slogfiles/').glob('**/*-deliveries-*.csv.gz'),
db4,
'delrun3')
# -
# ### Did we ever do more than 1000 cranks in a block?
#
# if not, current policy never fired
df = _dr16[['blockHeight', 'crankNum']].drop_duplicates()
df.groupby('blockHeight')[['crankNum']].count().sort_values('crankNum', ascending=False)
# ## @@ Older approaches
# ## Delivery statistics
#
# > For each delivery in the corpus, we want to get statistics on the range of wallclock times taken by these validators.
# +
import gzip
import itertools
def iter_cranks(path):
"""split each slogfile into runs (each beginning with an import-kernel event),
process each run by finding sequential matching deliver+deliver-result pairs,
turn each pair into a (crankNum, computrons, wallclock) triple
"""
log.info('iter_cranks: %s', path)
with gzip.open(path) as f:
kernel = None
deliver = None
block = None
syscalls = None
for (ix, line) in enumerate(f):
try:
data = json.loads(line)
except json.JSONDecodeError:
log.warning('%s:%d: bad JSON: %s', path.name, ix, repr(line))
continue
ty = data['type']
# print(ix, data['type'], kernel, deliver)
if ty == 'import-kernel-finish':
kernel = data
deliver = None
syscalls = None
yield dict(kernel,
slogfile=path.name, line=ix)
elif ty == 'create-vat':
yield dict(slogfile=path.name,
line=ix,
time=data['time'],
type=ty,
vatID=data['vatID'],
description=data['description'],
managerType=data['managerType'],
time_kernel=kernel['time'])
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
elif ty == 'cosmic-swingset-end-block-start':
block = data
elif ty == 'cosmic-swingset-end-block-finish':
time = data['time']
time_start = block['time']
dur = time - time_start
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
yield dict(slogfile=path.name,
line=ix,
time=time,
type=ty,
time_start=time_start,
dur=dur,
blockHeight=data['blockHeight'],
blockTime=data['blockTime'],
time_kernel=time_kernel)
block = None
elif deliver is None:
if ty == 'deliver':
deliver = data
syscalls = 0
elif data['type'] == 'deliver-result':
time = data['time']
time_start = deliver['time']
dur = time - time_start
method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None
compute = data['dr'][2]['compute'] if type(data['dr'][2]) is type({}) else None
if block:
blockHeight = block['blockHeight']
blockTime=block['blockTime']
else:
# odd... how do we get here without block info???
log.warning('%s:%d: missing block context', path.name, ix)
blockHeight = blockTime = np.nan
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
yield dict(slogfile=path.name,
line=ix,
time=time,
type=ty,
crankNum=data['crankNum'],
deliveryNum=data['deliveryNum'],
vatID=data['vatID'],
kd=deliver['kd'],
method=method,
syscalls=syscalls,
dr=data['dr'],
compute=compute,
time_start=time_start,
dur=dur,
blockHeight=blockHeight,
blockTime=blockTime,
time_kernel=time_kernel)
deliver = None
elif ty == 'syscall-result':
syscalls += 1
elif ty in ['clist', 'syscall']:
continue
else:
log.warning("%s:%d: expected deliver-result; got: %s", path.name, ix, ty)
deliver = None
def sample(files=50, cranks=2000, slogdir=slogdir):
return pd.DataFrame.from_records(
r
for slogfile in itertools.islice(slogdir.glob('**/*.slog.gz'), files)
for r in itertools.islice(iter_cranks(slogfile), cranks))
# files_top = sample(200, 100)
c500 = sample()
# -
show_times(
files_top[files_top.crankNum == 1][[
'slogfile', 'line', 'time', 'vatID', 'deliveryNum', 'syscalls', 'compute', 'time_kernel', 'blockHeight']
].sort_values('blockHeight').set_index(['slogfile', 'line']),
['time'])
# +
def show_times(df, cols):
out = df.copy()
for col in cols:
out[col] = pd.to_datetime(out[col], unit='s')
return out
def slogfile_summary(df):
g = df.groupby(['slogfile', 'type'])
out = g[['line']].count()
out['time_min'] = g[['time']].min().time
out['time_max'] = g[['time']].max().time
out['blockHeight_min'] = g[['blockHeight']].min().blockHeight
# out['blockHeight_max'] = g[['blockHeight']].max().blockHeight
out['crankNum_min'] = g[['crankNum']].min().crankNum
return show_times(out, ['time_min', 'time_max'])
slogfile_summary(files_top) # [files_top.type == 'deliver-result']).sort_values('crankNum_min', ascending=False).head(15)
# +
def stuff(df, slogfile):
return df[(df.slogfile==slogfile) &
(df.type == 'deliver-result')][['crankNum', 'vatID', 'deliveryNum', 'kd', 'line', 'blockHeight' ]]
coolex = stuff(c500, 'coolex-agorictest16-chain.slog.gz').set_index('crankNum')
mym = stuff(c500, 'mymoniker-agorictest16-chain.slog.gz').set_index('crankNum')
xwalk = pd.merge(coolex, mym, left_index=True, right_index=True)
xwalk[xwalk.kd_x != xwalk.kd_y]
# -
xwalk[xwalk.deliveryNum_y == 2801].kd_y.iloc[0]
# warner says: suppose we have 2 deliverInboundAcks
#
# when swingset tells mb device, device consults state _in RAM_ for dup ack num...
# not durable... differs between run-from-start and restart
# ## global crankNum -> vatID, deliveryNum
cranks = c500[c500['type'] == 'deliver-result']
cranks = cranks[['chain_id', 'crankNum', 'vatID', 'deliveryNum']].set_index(['chain_id', 'crankNum']).drop_duplicates().sort_index()
cranks # .sort_values('deliveryNum')
c500 = c500[~c500.line.isnull()]
show_times(c500[c500.blockHeight == 64628], ['time', 'time_start', 'blockTime'])
cranks.pivot(columns='vatID', values='deliveryNum')
cranks.plot(subplots=True)
c500[['kd']].dropna()
c500[['compute']].dropna()
# +
## reduced data set
# chain-wide deliveries
# chain_id, crankNum -> blockHeight, vatID, deliveryNum, kd, compute
# chain_id, vatID, deliveryNum -> blockHeight, kd, compute
# except vatTP?
# per-validator data
# chain_id, crankNum, run (slogfile, kernel-start) -> dur
# +
# global crankNum -> vatID, deliveryNum
c500[['crankNum', 'vatID', 'deliveryNum']].set_index()
# ignore un-full blocks?
# histogram of block durations; interval between...
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
# "blockTime":1625059381 <- consensus block time is median of block times (?)
# vatID, deliveryNum -> args / syscalls
# watch out for GC esp.
# c.run(runPolicy)
# simple model: kernel says how many computrons
# refinement: computrons, syscalls
# fitness: block distribution... 10s blocks...
# blocks that aren't too big (latency, validator variance risk)
# cpu that isn't idle (throughput)
# an ideal: median block time 10s
# 80 20 %ile
# importing a contract is an outlier
# +
# median validator - existing distribution of deliveries / compute -> blocks
# supplement: study wallclock stuff
# -
show_times(c500[c500['type'] == 'deliver-result'].set_index(['crankNum', 'vatID', 'deliveryNum', 'slogfile'])
.drop(['type', 'kd', 'dr', 'time_dr', 'description', 'managerType'], axis=1).sort_index(),
['time', 'time_kernel', 'blockTime'])
# ### Missing `compute` meter info?
start1 = c500
start1[(start1['type'] == 'deliver-result') & start1.compute.isnull()]
compute_ref = start1[(start1.slogfile == 'coolex-agorictest16-chain.slog.gz') &
(start1['type'] == 'deliver-result')].set_index('crankNum')[['compute']]
compute_ref
compute_delta = start1[['slogfile', 'crankNum', 'compute']]
compute_delta = pd.merge(compute_delta, compute_ref,
left_on='crankNum', right_index=True, suffixes=['', '_ref'])
compute_delta['delta'] = (compute_delta.compute - compute_delta.compute_ref).abs()
compute_delta.sort_values('delta', ascending=False)
# +
df = start1
categories = df.vatID.apply(lambda v: int(v[1:]))
colors = cm.rainbow(np.linspace(0, 1, categories.max() + 1))
df.plot.scatter(x='compute', y='dur', c=colors[categories],
title='Deliveries (colored by vatID)',
figsize=(12, 9), ylabel="dur (sec)");
# -
start1[~start1.compute.isnull()].groupby('vatID')[['crankNum']].count().sort_values('crankNum', ascending=False)
# +
def vat_rate(df, vatID):
df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()
df['rate'] = df.compute / df.dur
df = df[df.vatID == vatID]
# df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()
#df.sort_values('dur', ascending=False)
#df
df = df.set_index('deliveryNum').sort_index()
return df
def show_rate(df, vatID, figsize=(8, 9)):
df = vat_rate(df, vatID)
ax = df.plot(subplots=True, figsize=figsize)
def fit_line(df, x, y, figsize=(9, 6)):
cs = np.polyfit(df[x], df[y], 1)
f = np.poly1d(cs)
ax1 = df[[x, y]].plot.scatter(x=x, y=y, figsize=figsize)
df['fit'] = f(df[x])
df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);
# show_rate(start1, 'v10');
# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')
fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']
fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')
# len(fastSlog[fastSlog.vatID == 'v10'])
# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)
#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)
# -
vat_rate(start1, 'v16');
df = start1.pivot(columns='vatID', values=['compute', 'dur'],
index=['vatID', 'deliveryNum', 'crankNum', 'slogfile', 'line'])
df.reset_index().set_index('deliveryNum').drop(['crankNum', 'line'], axis=1) #.plot(figsize=(12, 8));
df.reset_index().set_index('deliveryNum')[['v23']].sort_index().dropna() #.plot()
df.describe()
df[['v14']].dropna()
df.crankNum.hist();
df.deliveryNum.hist();
df.groupby('method')[['compute', 'rate']].describe()
df.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').head(90).plot(
subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: bottom 90');
df.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').tail(8).plot(
subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: top 8');
durByMethod.dur.sum()
# +
durByMethod = df.groupby('method')[['dur']].sum().sort_values('dur', ascending=False)
durByMethod.plot.pie(y='dur', figsize=(12, 9), autopct='%1.1f%%')
# -
df.groupby('vatID')[['rate']].describe().head(20)
df.groupby('slogfile')[['rate']].describe().head(20)
df.plot.scatter(x='deliveryNum', y='rate')
speed = df.groupby('slogfile')[['rate']].describe()[['rate'][0]][['count', 'mean', 'std']]
speed = speed.sort_values('mean', ascending=False)
speed['relative'] = speed['mean'] / speed['mean'][0]
speed
# +
def boxplot_sorted(df, by, column, **config):
df2 = pd.DataFrame({col:vals[column] for col, vals in df.groupby(by)})
meds = df2.median().sort_values()
return df2[meds.index].boxplot(**config)
ax = boxplot_sorted(df, by=["slogfile"], column="rate", rot=90, figsize=(12, 9))
ax.set_title('Validator Speed: Sample of 20 from Phase 4');
ax.set_ylabel('computrons / sec')
# -
ax = df.sort_values('crankNum').plot.scatter(x='crankNum', y='compute');
ax.set_yscale('log')
df[(df.dur < df.dur.mean() + df.dur.std()) &
(df.compute < df.compute.mean() + df.compute.std())][['compute', 'dur']].hist();
# +
df = crank_info(c500)
df = df[df.crankNum.isin(compute_ref.index)]
rate = np.polyfit(df.compute, df.dur, 1)
f = np.poly1d(rate)
df['rate'] = f(df.compute)
# df[['compute', 'dur', 'rate']].head()
print(f)
# -
ax1 = df[['compute', 'dur']].plot.scatter(x='compute', y='dur', figsize=(9, 6))
df.plot(x='compute', y='rate', color='Red', legend=False, ax=ax1);
ax1.set_title(f"{len(df)} cranks from w3m: Duration vs. Compute Meter");
ax1.set_xlabel("compute units")
ax1.set_ylabel("duration (sec)")
r = df.compute / df.dur
r.max() / r.min()
df.sort_values('rate', ascending=False).drop(['time', 'type', 'detail', 'detail_dr'], axis=1)
# ## Colophon: jupytext
#
# This is a jupyter notebook paired with a python script using [jupytext](https://jupytext.readthedocs.io/en/latest/).
#
# We use the [python38Packages.jupytext](https://search.nixos.org/packages?channel=21.05&from=0&size=50&buckets=%7B%22package_attr_set%22%3A%5B%22python38Packages%22%5D%2C%22package_license_set%22%3A%5B%5D%2C%22package_maintainers_set%22%3A%5B%5D%2C%22package_platforms%22%3A%5B%5D%7D&sort=relevance&query=jupytext) nix package; in particular, `/nix/store/a9911qj06dy0ah7fshl39x3w4cjs7bxk-python3.8-jupytext-1.11.2`.
#
| nb4/slogfiles.py | 52,047 | split each slogfile into runs (each beginning with an import-kernel event),
process each run by finding sequential matching deliver+deliver-result pairs,
turn each pair into a (crankNum, computrons, wallclock) triple
Note: line numbers are **1-based**
-*- coding: utf-8 -*- How long does a Computron take? - [build model of computron\-to\-wallclock relationship ยท Issue \3459 ยท Agoric/agoric\-sdk](https://github.com/Agoric/agoric-sdk/issues/3459) Preface: Python Data Tools See also [shell.nix](shell.nix). + - Notebook / Scripting Authority As a nod to OCap discipline, we avoid ambient authority unless we're in a `TOP`-level scripting or notebook context. Logging is a bit of an exception to OCap discipline, as is stderr. + - Dask Parallel Scheduler UI + - Result Store + - SLog files [rclone support for Google drive](https://rclone.org/drive/) > This contains 564GB of data from 117 participants, spread across 172 slogfiles ... ``` [nix-shell:~/t4]$ rclone sync --progress 'Engineering:/2021-07-04 testnet phase4-stress data/validator slogfiles' ./slogfiles/ Transferred: 78.633G / 78.633 GBytes, 100%, 101.302 MBytes/s, ETA 0s Checks: 5 / 5, 100% Transferred: 182 / 182, 100% Elapsed time: 13m16.0s ``` + - random access with `gztool` [gztool](https://github.com/circulosmeos/gztool) `a03c5b4fd5b3` Jul 13 2021. ``` ~/projects/gztool/gztool -C -e */*.slog.gz ... ERROR: Compressed data error in 'atlantean/atlantean-agorictest16-chain.slog.gz'. ... Index file 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi' already exists and will be used. Processing 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gz' ... Processing index to 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi'... 172 files processed 1 files processed with errors! ``` + count lines on all slogfiles in parallel TODO: if it's already in the DB, don't compute it again. - !sqlite3 slog4.db '.header on' '.mode column' 'select * from file_meta limit 3' + - slogfile basics Runs, Blocks, and Deliveries > split each slogfile into runs (each beginning with an import-kernel event) + +client.restart() + !sqlite3 slog4.db 'drop table run' + Compute end times - !sqlite3 slog4.db '.schema run' runs per slogfile + - agorictest-16 genesis: `2021-07-01 19:00:00` Block end start / finish events + - Separate runs by chain + + - !sqlite3 slog4.db '.header on' '.mode column' 'select * from runchain limit 3' + !sqlite3 slog4.db 'drop table blockval;' + - !sqlite3 slog4.db '.schema blockval' Consensus Block-to-Block Time + db4.execute("""drop table if exists block""") - What is the range of blocks in `agorictest-16`? + - histogram of block-to-block time delta for agorictest-16. (_Note the log scale on the y axis._) How many validators logged each block in agorictest-16? !sqlite3 slog4.db '.schema run' + db4.execute('drop table if exists blockrun16') - Slow Blocks Which runs include block 72712, which took 31 sec? Correlating block start with block end show_times(df, ['time', 'time_end']) Cranks in a Block + - + assert sign == -1? assert sign == -1? _sa4.get_records('Nodeasy.com/Nodeasy.com-agorictest15-chain.slog.gz', 1662497, 1671912 - 1662497) - missing compute is possible... from replay. + {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394} {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394} just block start, block end - df[df.length > 2].head(10) + https://avi.im/blag/2021/fast-sqlite-inserts/ - + + chain_id, vatID, deliveryNum -> blockHeight, kd, compute assert sign == -1? - Computron rate for just this one block? test empty Cranks in one long run starting at agorictest-16 genesis + log.info('block %d: no deliveries', blockHeight) run1_deliveries(db4, _sa4, 75000, 90530, _runs.loc[445], _blockrun16, table='run1b') - TODO: compare `getPayout` here (in 88295) vs something earlier... same computrons? same duration? e.g. if harden weakset grew, the duration could grow while keeping computrons constant Things got slower over time. Hypothesis: GC didn't happen -> weak set got big -> weakset access time got big So computron model should not be based on this range, but rather on pre-loadgen time. When looking at comptron / wallclock, we should look at: - all getCurrentAmount calls - within a narrow range of blockHeight - that all use the same of computrons (as above) + rate = df.compute / df.dur - This is an always-busy sim, but **TODO** we'd like to look at the arrival pattern that we have. + try exp df['adj'] = df.sim_blk - df.blockHeight df = df[df.method != 'executeContract'] df = df[df.method == 'getCurrentAmount'] getPayout df.blockHeight = df.blockHeight - df.blockHeight.iloc[0] - + - Validator speed: 2-4x spread for `getCurrentAmount` + df.groupby('method')[['compute']].describe().loc['executeContract'] - Total delivery duration per block log=True); This wasn't a big deal during most of the chain (.25sec 75th percentile). We could model this within 2x or 3x by ignoring the spike. **TODO**: what happened during that spike? is it consensus-observable? kernel-observable? Total compute per block +by_vat(_dir('slogfiles/'), _dir('vat-details/'), _sa4, _runs) + - +@@ _dr16 = provide_table( db4, 'crankrun', 65517 lambda: deliveries_todo(_sa4, _blockrun16[_blockrun16.blockHeight <= 65000], _runs.loc[200:275])) [_blockrun16.blockHeight <= 65000] - deliveries from batch Are compute meter values consistent? + x = compute_meter_consistent(_alld16).compute() .compute() - .compute() Computrons per block + - + - compute by block with duration simulation + - Compute rate by vat +.set_index(color) + df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()df.sort_values('dur', ascending=False)df show_rate(start1, 'v10'); vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur') fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz'] fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur') len(fastSlog[fastSlog.vatID == 'v10']) fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) .sort_values('compute', ascending=False)fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True) - exclude dynamic vat creation Add syscalls to the model _dr16.assign(chain_id=16).set_index(['chain_id', 'vatID', 'deliveryNum']) + - Did we ever do more than 1000 cranks in a block? if not, current policy never fired @@ Older approaches Delivery statistics > For each delivery in the corpus, we want to get statistics on the range of wallclock times taken by these validators. + print(ix, data['type'], kernel, deliver) {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394} {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394} odd... how do we get here without block info??? files_top = sample(200, 100) - + out['blockHeight_max'] = g[['blockHeight']].max().blockHeight [files_top.type == 'deliver-result']).sort_values('crankNum_min', ascending=False).head(15) + - warner says: suppose we have 2 deliverInboundAcks when swingset tells mb device, device consults state _in RAM_ for dup ack num... not durable... differs between run-from-start and restart global crankNum -> vatID, deliveryNum .sort_values('deliveryNum') + reduced data set chain-wide deliveries chain_id, crankNum -> blockHeight, vatID, deliveryNum, kd, compute chain_id, vatID, deliveryNum -> blockHeight, kd, compute except vatTP? per-validator data chain_id, crankNum, run (slogfile, kernel-start) -> dur + global crankNum -> vatID, deliveryNum ignore un-full blocks? histogram of block durations; interval between... {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394} {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394} "blockTime":1625059381 <- consensus block time is median of block times (?) vatID, deliveryNum -> args / syscalls watch out for GC esp. c.run(runPolicy) simple model: kernel says how many computrons refinement: computrons, syscalls fitness: block distribution... 10s blocks... blocks that aren't too big (latency, validator variance risk) cpu that isn't idle (throughput) an ideal: median block time 10s 80 20 %ile importing a contract is an outlier + median validator - existing distribution of deliveries / compute -> blocks supplement: study wallclock stuff - Missing `compute` meter info? + - + df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()df.sort_values('dur', ascending=False)df show_rate(start1, 'v10'); vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur') len(fastSlog[fastSlog.vatID == 'v10']) fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) .sort_values('compute', ascending=False)fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True) -.plot(figsize=(12, 8));.plot() + - + - + df[['compute', 'dur', 'rate']].head() - Colophon: jupytext This is a jupyter notebook paired with a python script using [jupytext](https://jupytext.readthedocs.io/en/latest/). We use the [python38Packages.jupytext](https://search.nixos.org/packages?channel=21.05&from=0&size=50&buckets=%7B%22package_attr_set%22%3A%5B%22python38Packages%22%5D%2C%22package_license_set%22%3A%5B%5D%2C%22package_maintainers_set%22%3A%5B%5D%2C%22package_platforms%22%3A%5B%5D%7D&sort=relevance&query=jupytext) nix package; in particular, `/nix/store/a9911qj06dy0ah7fshl39x3w4cjs7bxk-python3.8-jupytext-1.11.2`. | 9,710 | en | 0.622159 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the CPIO extracted file-like object."""
from __future__ import unicode_literals
import unittest
from dfvfs.file_io import cpio_file_io
from dfvfs.path import cpio_path_spec
from dfvfs.path import os_path_spec
from dfvfs.resolver import context
from tests.file_io import test_lib
class CPIOBinaryFileTest(test_lib.SylogTestCase):
"""The unit test for a CPIO extracted file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(CPIOBinaryFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.bin.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(
location='/syslog', parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
class CPIOPortableASCIIFileTest(test_lib.SylogTestCase):
"""The unit test for a CPIO extracted file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(CPIOPortableASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.odc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(
location='/syslog', parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
class CPIONewASCIIFileTest(test_lib.SylogTestCase):
"""The unit test for a CPIO extracted file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(CPIONewASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.newc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(
location='/syslog', parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
class CPIONewASCIIFileWithChecksumTest(test_lib.SylogTestCase):
"""The unit test for a CPIO extracted file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(CPIONewASCIIFileWithChecksumTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.crc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(
location='/syslog', parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
if __name__ == '__main__':
unittest.main()
| tests/file_io/cpio_file_io.py | 5,812 | The unit test for a CPIO extracted file-like object.
The unit test for a CPIO extracted file-like object.
The unit test for a CPIO extracted file-like object.
The unit test for a CPIO extracted file-like object.
Sets up the needed objects used throughout the test.
Sets up the needed objects used throughout the test.
Sets up the needed objects used throughout the test.
Sets up the needed objects used throughout the test.
Test the open and close functionality using a path specification.
Test the open and close functionality using a path specification.
Test the open and close functionality using a path specification.
Test the open and close functionality using a path specification.
Test the read functionality.
Test the read functionality.
Test the read functionality.
Test the read functionality.
Test the seek functionality.
Test the seek functionality.
Test the seek functionality.
Test the seek functionality.
Tests for the CPIO extracted file-like object.
!/usr/bin/env python -*- coding: utf-8 -*- | 1,010 | en | 0.85021 |
import time
import json
from anchore_engine.subsys import logger
def get_docker_registry_userpw(registry_record):
user = pw = None
try:
if 'registry_type' in registry_record and registry_record['registry_type'] == 'awsecr':
try:
ecr_creds = json.loads(registry_record['registry_meta'])
except Exception as err:
raise Exception("cannot access/parse registry metadata for awsecr registry type - exception: {}".format(str(err)))
docker_auth_token = ecr_creds['authorizationToken']
user, pw = docker_auth_token.split(":", 1)
else:
user = registry_record['registry_user']
pw = registry_record['registry_pass']
except Exception as err:
logger.error("cannot fetch registry creds from registry record - exception: " + str(err))
raise err
return user, pw
def get_creds_by_registry(registry, repository, registry_creds=None):
user = pw = registry_verify = None
if registry_creds:
try:
registry_creds.sort(key=lambda x: len(x['registry']), reverse=True)
for registry_record in registry_creds:
if registry_record_matches(registry_record['registry'], registry, repository):
if registry_record['record_state_key'] not in ['active']:
try:
last_try = int(registry_record['record_state_val'])
except:
last_try = 0
if (int(time.time()) - last_try) < 60:
logger.debug("SKIPPING REGISTRY ATTEMPT: " + str(registry_record['record_state_key']))
raise Exception("registry not available - " + str(registry_record['record_state_key']))
user, pw = get_docker_registry_userpw(registry_record)
registry_verify = registry_record['registry_verify']
break
except Exception as err:
raise err
return user, pw, registry_verify
def registry_record_matches(registry_record_str, registry, repository):
"""
:param registry_record_str: the string with optional wildcard to match against a the registry/repository combo
:param registry: the registry to match against
:param repository: the repository to match against
:return: bool true if a match, false if not
"""
return (registry_record_str[-1] == '*' and '{}/{}'.format(registry, repository).startswith(registry_record_str[:-1])) or ('/' in registry_record_str and registry_record_str == '{}/{}'.format(registry, repository)) or (registry_record_str == registry)
| anchore_engine/auth/common.py | 2,715 | :param registry_record_str: the string with optional wildcard to match against a the registry/repository combo
:param registry: the registry to match against
:param repository: the repository to match against
:return: bool true if a match, false if not | 252 | en | 0.622358 |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
import numpy as np
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import ScalarizedOutcomeConstraint
from ax.core.search_space import SearchSpace
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.utils import get_data, match_ci_width_truncated
from ax.models.types import TConfig
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast_list
from sklearn.preprocessing import PowerTransformer
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
logger = get_logger(__name__)
class PowerTransformY(Transform):
"""Transform the values to look as normally distributed as possible.
This fits a power transform to the data with the goal of making the transformed
values look as normally distributed as possible. We use Yeo-Johnson
(https://www.stat.umn.edu/arc/yjpower.pdf), which can handle both positive and
negative values.
While the transform seems to be quite robust, it probably makes sense to apply a
bit of winsorization and also standardize the inputs before applying the power
transform. The power transform will automatically standardize the data so the
data will remain standardized.
The transform can't be inverted for all values, so we apply clipping to move
values to the image of the transform. This behavior can be controlled via the
`clip_mean` setting.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
modelbridge: Optional[modelbridge_module.base.ModelBridge] = None,
config: Optional[TConfig] = None,
) -> None:
if config is None:
raise ValueError("PowerTransform requires a config.")
# pyre-fixme[6]: Same issue as for LogY
metric_names = list(config.get("metrics", []))
if len(metric_names) == 0:
raise ValueError("Must specify at least one metric in the config.")
self.clip_mean = config.get("clip_mean", True)
self.metric_names = metric_names
Ys = get_data(observation_data=observation_data, metric_names=metric_names)
self.power_transforms = _compute_power_transforms(Ys=Ys)
self.inv_bounds = _compute_inverse_bounds(self.power_transforms, tol=1e-10)
def transform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for i, m in enumerate(obsd.metric_names):
if m in self.metric_names:
transform = self.power_transforms[m].transform
obsd.means[i], obsd.covariance[i, i] = match_ci_width_truncated(
mean=obsd.means[i],
variance=obsd.covariance[i, i],
transform=lambda y: transform(np.array(y, ndmin=2)),
lower_bound=-np.inf,
upper_bound=np.inf,
)
return observation_data
def untransform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for i, m in enumerate(obsd.metric_names):
if m in self.metric_names:
l, u = self.inv_bounds[m]
transform = self.power_transforms[m].inverse_transform
if not self.clip_mean and (obsd.means[i] < l or obsd.means[i] > u):
raise ValueError(
"Can't untransform mean outside the bounds without clipping"
)
obsd.means[i], obsd.covariance[i, i] = match_ci_width_truncated(
mean=obsd.means[i],
variance=obsd.covariance[i, i],
transform=lambda y: transform(np.array(y, ndmin=2)),
lower_bound=l,
upper_bound=u,
clip_mean=True,
)
return observation_data
def transform_optimization_config(
self,
optimization_config: OptimizationConfig,
modelbridge: Optional[modelbridge_module.base.ModelBridge],
fixed_features: ObservationFeatures,
) -> OptimizationConfig:
for c in optimization_config.all_constraints:
if isinstance(c, ScalarizedOutcomeConstraint):
c_metric_names = [metric.name for metric in c.metrics]
intersection = set(c_metric_names) & set(self.metric_names)
if intersection:
raise NotImplementedError(
f"PowerTransformY cannot be used for metric(s) {intersection} "
"that are part of a ScalarizedOutcomeConstraint."
)
elif c.metric.name in self.metric_names:
if c.relative:
raise ValueError(
f"PowerTransformY cannot be applied to metric {c.metric.name} "
"since it is subject to a relative constraint."
)
else:
transform = self.power_transforms[c.metric.name].transform
c.bound = transform(np.array(c.bound, ndmin=2)).item()
return optimization_config
def _compute_power_transforms(
Ys: Dict[str, List[float]]
) -> Dict[str, PowerTransformer]:
"""Compute power transforms."""
power_transforms = {}
for k, ys in Ys.items():
y = np.array(ys)[:, None] # Need to unsqueeze the last dimension
pt = PowerTransformer(method="yeo-johnson").fit(y)
power_transforms[k] = pt
return power_transforms
def _compute_inverse_bounds(
power_transforms: Dict[str, PowerTransformer], tol: float = 1e-10
) -> Dict[str, Tuple[float, float]]:
"""Computes the image of the transform so we can clip when we untransform.
The inverse of the Yeo-Johnson transform is given by:
if X >= 0 and lambda == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda != 0:
X = (X_trans * lambda + 1) ** (1 / lambda) - 1
elif X < 0 and lambda != 2:
X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))
elif X < 0 and lambda == 2:
X = 1 - exp(-X_trans)
We can break this down into three cases:
lambda < 0: X < -1 / lambda
0 <= lambda <= 2: X is unbounded
lambda > 2: X > 1 / (2 - lambda)
Sklearn standardizes the transformed values to have mean zero and standard
deviation 1, so we also need to account for this when we compute the bounds.
"""
inv_bounds = defaultdict()
for k, pt in power_transforms.items():
bounds = [-np.inf, np.inf]
mu, sigma = pt._scaler.mean_.item(), pt._scaler.scale_.item() # pyre-ignore
lambda_ = pt.lambdas_.item() # pyre-ignore
if lambda_ < -1 * tol:
bounds[1] = (-1.0 / lambda_ - mu) / sigma
elif lambda_ > 2.0 + tol:
bounds[0] = (1.0 / (2.0 - lambda_) - mu) / sigma
inv_bounds[k] = tuple(checked_cast_list(float, bounds))
return inv_bounds
| ax/modelbridge/transforms/power_transform_y.py | 8,041 | Transform the values to look as normally distributed as possible.
This fits a power transform to the data with the goal of making the transformed
values look as normally distributed as possible. We use Yeo-Johnson
(https://www.stat.umn.edu/arc/yjpower.pdf), which can handle both positive and
negative values.
While the transform seems to be quite robust, it probably makes sense to apply a
bit of winsorization and also standardize the inputs before applying the power
transform. The power transform will automatically standardize the data so the
data will remain standardized.
The transform can't be inverted for all values, so we apply clipping to move
values to the image of the transform. This behavior can be controlled via the
`clip_mean` setting.
Computes the image of the transform so we can clip when we untransform.
The inverse of the Yeo-Johnson transform is given by:
if X >= 0 and lambda == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda != 0:
X = (X_trans * lambda + 1) ** (1 / lambda) - 1
elif X < 0 and lambda != 2:
X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))
elif X < 0 and lambda == 2:
X = 1 - exp(-X_trans)
We can break this down into three cases:
lambda < 0: X < -1 / lambda
0 <= lambda <= 2: X is unbounded
lambda > 2: X > 1 / (2 - lambda)
Sklearn standardizes the transformed values to have mean zero and standard
deviation 1, so we also need to account for this when we compute the bounds.
Compute power transforms.
Winsorize observation data in place.
Winsorize observation data in place.
!/usr/bin/env python3 Copyright (c) Meta Platforms, Inc. and affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. import as module to make sphinx-autodoc-typehints happy noqa F401 pragma: no cover pyre-fixme[6]: Same issue as for LogY Need to unsqueeze the last dimension pyre-ignore pyre-ignore | 1,944 | en | 0.847718 |
#
# Copyright 2019 Jonas Berg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
.. moduleauthor:: Jonas Berg
dummy_serial: A dummy/mock implementation of a serial port for testing purposes.
"""
__author__ = "Jonas Berg"
__license__ = "Apache License, Version 2.0"
import sys
import time
DEFAULT_TIMEOUT = 0.01
"""The default timeot value in seconds. Used if not set by the constructor."""
DEFAULT_BAUDRATE = 19200
"""The default baud rate. Used if not set by the constructor."""
VERBOSE = False
"""Set this to :const:`True` for printing the communication, and also details on the port initialization.
Might be monkey-patched in the calling test module.
"""
RESPONSES = {}
"""A dictionary of respones from the dummy serial port.
The key is the message (string) sent to the dummy serial port, and the item is the response (string)
from the dummy serial port.
Intended to be monkey-patched in the calling test module.
"""
RESPONSES["EXAMPLEREQUEST"] = "EXAMPLERESPONSE"
DEFAULT_RESPONSE = "NotFoundInResponseDictionary"
"""Response when no matching message (key) is found in the look-up dictionary.
Should not be an empty string, as that is interpreted as "no data available on port".
Might be monkey-patched in the calling test module.
"""
NO_DATA_PRESENT = ""
class Serial:
"""Dummy (mock) serial port for testing purposes.
Mimics the behavior of a serial port as defined by the `pySerial <https://github.com/pyserial/pyserial>`_ module.
Args:
* port:
* timeout:
Note:
As the portname argument not is used properly, only one port on :mod:`dummy_serial` can be used simultaneously.
"""
def __init__(self, *args, **kwargs):
self._waiting_data = NO_DATA_PRESENT
self._isOpen = True
self.port = kwargs["port"] # Serial port name.
self._initial_port_name = self.port # Initial name given to the serial port
try:
self.timeout = kwargs["timeout"]
except:
self.timeout = DEFAULT_TIMEOUT
try:
self.baudrate = kwargs["baudrate"]
except:
self.baudrate = DEFAULT_BAUDRATE
if VERBOSE:
_print_out("\nDummy_serial: Initializing")
_print_out("dummy_serial initialization args: " + repr(args))
_print_out("dummy_serial initialization kwargs: " + repr(kwargs) + "\n")
def __repr__(self):
"""String representation of the dummy_serial object"""
return "{0}.{1}<id=0x{2:x}, open={3}>(port={4!r}, timeout={5!r}, waiting_data={6!r})".format(
self.__module__,
self.__class__.__name__,
id(self),
self._isOpen,
self.port,
self.timeout,
self._waiting_data,
)
@property
def is_open(self):
return self._isOpen
def reset_input_buffer(self):
pass
def reset_output_buffer(self):
pass
def open(self):
"""Open a (previously initialized) port on dummy_serial."""
if VERBOSE:
_print_out("\nDummy_serial: Opening port\n")
if self._isOpen:
raise IOError("Dummy_serial: The port is already open")
self._isOpen = True
self.port = self._initial_port_name
def close(self):
"""Close a port on dummy_serial."""
if VERBOSE:
_print_out("\nDummy_serial: Closing port\n")
if not self._isOpen:
raise IOError("Dummy_serial: The port is already closed")
self._isOpen = False
self.port = None
def write(self, inputdata):
"""Write to a port on dummy_serial.
Args:
inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response
for subsequent read operations.
Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.
"""
if VERBOSE:
_print_out(
"\nDummy_serial: Writing to port. Given:" + repr(inputdata) + "\n"
)
if sys.version_info[0] > 2:
if not type(inputdata) == bytes:
raise TypeError(
"The input must be type bytes. Given:" + repr(inputdata)
)
inputstring = str(inputdata, encoding="latin1")
else:
inputstring = inputdata
if not self._isOpen:
raise IOError(
"Dummy_serial: Trying to write, but the port is not open. Given:"
+ repr(inputdata)
)
# Look up which data that should be waiting for subsequent read commands
try:
response = RESPONSES[inputstring]
except:
response = DEFAULT_RESPONSE
self._waiting_data = response
def read(self, numberOfBytes):
"""Read from a port on dummy_serial.
The response is dependent on what was written last to the port on dummy_serial,
and what is defined in the :data:`RESPONSES` dictionary.
Args:
numberOfBytes (int): For compability with the real function.
Returns a **string** for Python2 and **bytes** for Python3.
If the response is shorter than numberOfBytes, it will sleep for timeout.
If the response is longer than numberOfBytes, it will return only numberOfBytes bytes.
"""
if VERBOSE:
_print_out(
"\nDummy_serial: Reading from port (max length {!r} bytes)".format(
numberOfBytes
)
)
if numberOfBytes < 0:
raise IOError(
"Dummy_serial: The numberOfBytes to read must not be negative. Given: {!r}".format(
numberOfBytes
)
)
if not self._isOpen:
raise IOError("Dummy_serial: Trying to read, but the port is not open.")
# Do the actual reading from the waiting data, and simulate the influence of numberOfBytes
if self._waiting_data == DEFAULT_RESPONSE:
returnstring = self._waiting_data
elif numberOfBytes == len(self._waiting_data):
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
elif numberOfBytes < len(self._waiting_data):
if VERBOSE:
_print_out(
"Dummy_serial: The numberOfBytes to read is smaller than the available data. "
+ "Some bytes will be kept for later. Available data: {!r} (length = {}), numberOfBytes: {}".format(
self._waiting_data, len(self._waiting_data), numberOfBytes
)
)
returnstring = self._waiting_data[:numberOfBytes]
self._waiting_data = self._waiting_data[numberOfBytes:]
else: # Wait for timeout, as we have asked for more data than available
if VERBOSE:
_print_out(
"Dummy_serial: The numberOfBytes to read is larger than the available data. "
+ "Will sleep until timeout. Available data: {!r} (length = {}), numberOfBytes: {}".format(
self._waiting_data, len(self._waiting_data), numberOfBytes
)
)
time.sleep(self.timeout)
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
# TODO Adapt the behavior to better mimic the Windows behavior
if VERBOSE:
_print_out(
"Dummy_serial read return data: {!r} (has length {})\n".format(
returnstring, len(returnstring)
)
)
if sys.version_info[0] > 2: # Convert types to make it python3 compatible
return bytes(returnstring, encoding="latin1")
else:
return returnstring
def _print_out(inputstring):
"""Print the inputstring. To make it compatible with Python2 and Python3."""
sys.stdout.write(inputstring + "\n")
| dummy_serial.py | 8,611 | Dummy (mock) serial port for testing purposes.
Mimics the behavior of a serial port as defined by the `pySerial <https://github.com/pyserial/pyserial>`_ module.
Args:
* port:
* timeout:
Note:
As the portname argument not is used properly, only one port on :mod:`dummy_serial` can be used simultaneously.
String representation of the dummy_serial object
Print the inputstring. To make it compatible with Python2 and Python3.
Close a port on dummy_serial.
Open a (previously initialized) port on dummy_serial.
Read from a port on dummy_serial.
The response is dependent on what was written last to the port on dummy_serial,
and what is defined in the :data:`RESPONSES` dictionary.
Args:
numberOfBytes (int): For compability with the real function.
Returns a **string** for Python2 and **bytes** for Python3.
If the response is shorter than numberOfBytes, it will sleep for timeout.
If the response is longer than numberOfBytes, it will return only numberOfBytes bytes.
Write to a port on dummy_serial.
Args:
inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response
for subsequent read operations.
Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.
.. moduleauthor:: Jonas Berg
dummy_serial: A dummy/mock implementation of a serial port for testing purposes.
Copyright 2019 Jonas Berg Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Serial port name. Initial name given to the serial port Look up which data that should be waiting for subsequent read commands Do the actual reading from the waiting data, and simulate the influence of numberOfBytes Wait for timeout, as we have asked for more data than available TODO Adapt the behavior to better mimic the Windows behavior Convert types to make it python3 compatible | 2,334 | en | 0.827235 |
#!/usr/bin/env python
import os
from setuptools import setup
from setuptools import find_packages
import sys
from financialdatapy import __version__ as VERSION
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
description = 'Extract financial data of a company.'
with open('README.md', 'r') as f:
long_description = f.read()
install_requires = [
'pandas>=1.4.0',
'requests>=2.27.1',
'xmltodict>=0.12.0',
'python-dotenv>=0.19.2',
'beautifulsoup4>=4.10.0',
'lxml>=4.7.1',
'user_agent>=0.1.10',
]
project_urls = {
'Source': 'https://github.com/choi-jiwoo/financialdatapy',
}
setup(
name='financialdatapy',
version=VERSION,
author='Choi Jiwoo',
author_email='cho2.jiwoo@gmail.com',
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
python_requires='>=3.10',
keywords=['python', 'stock', 'finance'],
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
project_urls=project_urls,
)
| setup.py | 1,452 | !/usr/bin/env python 'setup.py publish' shortcut. | 49 | en | 0.156986 |
# IMPORT MANAGEMENT
try:
import gevent.monkey
except ModuleNotFoundError:
import os
os.system('pip install -r requirements.txt')
import gevent.monkey
gevent.monkey.patch_all() # patch everything
import colorama
colorama.init(autoreset=True)
import discord.commands
import asyncio
import discord
import dotenv
import os
# IMPORTS
from discord.ext import commands
from cogs.helpers import config, management
from discord_together import DiscordTogether
# SETTINGS
COLOR = config.load()['color-primary']
TESTING_MODE = management.testing_mode()
PREFIX = '//'
# SETUP
dotenv.load_dotenv() # initialize virtual environment
token = os.getenv('DISCORD_TOKEN')
client = commands.Bot(command_prefix=PREFIX, intents=discord.Intents.all())
async def status_task():
while True:
await client.change_presence(activity=discord.Game(f'v0.5ใปopen source'))
@client.event
async def on_ready():
management.set_start_time()
if management.testing_mode():
await client.change_presence(status=discord.Status.idle)
print(colorama.Fore.GREEN + 'ONLINE as', client.user)
client.togetherControl = await DiscordTogether(token)
client.loop.create_task(status_task())
# load cogs
# credit: https://youtu.be/vQw8cFfZPx0
for filename in os.listdir(os.getcwd() + '/src/cogs/'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
try:
client.run(token) # run bot with the token set in the .env file
except:
print(colorama.Fore.RED + 'Unable to run the client. Please check your bot token.') | src/bot.py | 1,573 | IMPORT MANAGEMENT patch everything IMPORTS SETTINGS SETUP initialize virtual environment load cogs credit: https://youtu.be/vQw8cFfZPx0 run bot with the token set in the .env file | 179 | en | 0.417175 |
from __future__ import unicode_literals, division, absolute_import
import logging
import re
from datetime import datetime
from sqlalchemy import Column, Unicode, Integer
from flexget import plugin
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.titles.series import name_to_re
from flexget.db_schema import versioned_base
log = logging.getLogger('pogcal_acquired')
Base = versioned_base('pogcal_acquired', 0)
session = requests.Session(max_retries=3)
class PogcalShow(Base):
__tablename__ = 'pogcal_shows'
id = Column(Integer, primary_key=True, autoincrement=False, nullable=False)
name = Column(Unicode)
class PogcalAcquired(object):
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'}
},
'required': ['username', 'password'],
'additionalProperties': False
}
@plugin.priority(-255)
def on_task_output(self, task, config):
if not task.accepted and not task.options.test:
return
try:
result = session.post('http://www.pogdesign.co.uk/cat/',
data={'username': config['username'],
'password': config['password'],
'sub_login': 'Account Login'})
except requests.RequestException as e:
log.error('Error logging in to pog calendar: %s' % e)
return
if 'logout' not in result.text:
log.error('Username/password for pogdesign calendar appear to be incorrect.')
return
elif task.options.test:
log.verbose('Successfully logged in to pogdesign calendar.')
for entry in task.accepted:
if not entry.get('series_name') or not entry.get('series_id_type') == 'ep':
continue
show_id = self.find_show_id(entry['series_name'], task.session)
if not show_id:
log.debug('Could not find pogdesign calendar id for `%s`' % entry['series_name'])
continue
if task.options.test:
log.verbose('Would mark %s %s in pogdesign calenadar.' % (entry['series_name'], entry['series_id']))
continue
else:
log.verbose('Marking %s %s in pogdesign calenadar.' % (entry['series_name'], entry['series_id']))
shid = '%s-%s-%s/%s-%s' % (show_id, entry['series_season'], entry['series_episode'],
datetime.now().month, datetime.now().year)
try:
session.post('http://www.pogdesign.co.uk/cat/watchhandle',
data={'watched': 'adding', 'shid': shid})
except requests.RequestException as e:
log.error('Error marking %s %s in pogdesign calendar: %s' %
(entry['series_name'], entry['series_id'], e))
def find_show_id(self, show_name, db_sess):
# Check if we have this show id cached
show_name = show_name.lower()
db_show = db_sess.query(PogcalShow).filter(PogcalShow.name == show_name).first()
if db_show:
return db_show.id
try:
page = session.get('http://www.pogdesign.co.uk/cat/showselect.php')
except requests.RequestException as e:
log.error('Error looking up show show list from pogdesign calendar: %s' % e)
return
# Try to find the show id from pogdesign show list
show_re = name_to_re(None, show_name)
soup = get_soup(page.content)
search = re.compile(show_re, flags=re.I)
show = soup.find(text=search)
if show:
id = int(show.previous['value'])
db_sess.add(PogcalShow(id=id, name=show_name))
return id
else:
log.verbose('Could not find pogdesign calendar id for show `%s`' % show_re)
@event('plugin.register')
def register_plugin():
plugin.register(PogcalAcquired, 'pogcal_acquired', api_ver=2)
| flexget/plugins/services/pogcal_acquired.py | 4,154 | Check if we have this show id cached Try to find the show id from pogdesign show list | 85 | en | 0.857386 |
import copy
import datetime
import decimal
import inspect
import json
import logging
import traceback
import uuid
import warnings
from collections import Counter, defaultdict, namedtuple
from collections.abc import Hashable
from functools import wraps
from typing import List
from dateutil.parser import parse
from great_expectations import __version__ as ge_version
from great_expectations.core.evaluation_parameters import build_evaluation_parameters
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_suite import (
ExpectationSuite,
expectationSuiteSchema,
)
from great_expectations.core.expectation_validation_result import (
ExpectationSuiteValidationResult,
ExpectationValidationResult,
)
from great_expectations.core.id_dict import BatchKwargs
from great_expectations.core.run_identifier import RunIdentifier
from great_expectations.data_asset.util import (
parse_result_format,
recursively_convert_to_json_serializable,
)
from great_expectations.exceptions import GreatExpectationsError
from great_expectations.marshmallow__shade import ValidationError
logger = logging.getLogger(__name__)
logging.captureWarnings(True)
class DataAsset:
# This should in general only be changed when a subclass *adds expectations* or *changes expectation semantics*
# That way, multiple backends can implement the same data_asset_type
_data_asset_type = "DataAsset"
def __init__(self, *args, **kwargs):
"""
Initialize the DataAsset.
:param profiler (profiler class) = None: The profiler that should be run on the data_asset to
build a baseline expectation suite.
Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a
Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments
so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of
*args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the
support for the profiler parameter not obvious from the signature.
"""
interactive_evaluation = kwargs.pop("interactive_evaluation", True)
profiler = kwargs.pop("profiler", None)
expectation_suite = kwargs.pop("expectation_suite", None)
expectation_suite_name = kwargs.pop("expectation_suite_name", None)
data_context = kwargs.pop("data_context", None)
batch_kwargs = kwargs.pop(
"batch_kwargs", BatchKwargs(ge_batch_id=str(uuid.uuid1()))
)
batch_parameters = kwargs.pop("batch_parameters", {})
batch_markers = kwargs.pop("batch_markers", {})
if "autoinspect_func" in kwargs:
warnings.warn(
"Autoinspect_func is no longer supported; use a profiler instead (migration is easy!).",
category=DeprecationWarning,
)
super().__init__(*args, **kwargs)
self._config = {"interactive_evaluation": interactive_evaluation}
self._initialize_expectations(
expectation_suite=expectation_suite,
expectation_suite_name=expectation_suite_name,
)
self._data_context = data_context
self._batch_kwargs = BatchKwargs(batch_kwargs)
self._batch_markers = batch_markers
self._batch_parameters = batch_parameters
# This special state variable tracks whether a validation run is going on, which will disable
# saving expectation config objects
self._active_validation = False
if profiler is not None:
profiler.profile(self)
if data_context and hasattr(data_context, "_expectation_explorer_manager"):
self.set_default_expectation_argument("include_config", True)
def list_available_expectation_types(self):
keys = dir(self)
return [
expectation for expectation in keys if expectation.startswith("expect_")
]
def autoinspect(self, profiler):
"""Deprecated: use profile instead.
Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
Returns:
tuple(expectation_suite, validation_results)
"""
warnings.warn(
"The term autoinspect is deprecated and will be removed in a future release. Please use 'profile'\
instead."
)
expectation_suite, validation_results = profiler.profile(self)
return expectation_suite, validation_results
def profile(self, profiler, profiler_configuration=None):
"""Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
profiler_configuration: Optional profiler configuration dict
Returns:
tuple(expectation_suite, validation_results)
"""
expectation_suite, validation_results = profiler.profile(
self, profiler_configuration
)
return expectation_suite, validation_results
# TODO: add warning if no expectation_explorer_manager and how to turn on
def edit_expectation_suite(self):
return self._data_context._expectation_explorer_manager.edit_expectation_suite(
self
)
@classmethod
def expectation(cls, method_arg_names):
"""Manages configuration and running of expectation objects.
Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator \
used by great expectations to manage expectation configurations.
Args:
method_arg_names (List) : An ordered list of the arguments used by the method implementing the expectation \
(typically the result of inspection). Positional arguments are explicitly mapped to \
keyword arguments when the expectation is run.
Notes:
Intermediate decorators that call the core @expectation decorator will most likely need to pass their \
decorated methods' signature up to the expectation decorator. For example, the MetaPandasDataset \
column_map_expectation decorator relies on the DataAsset expectation decorator, but will pass through the \
signature from the implementing method.
@expectation intercepts and takes action based on the following parameters:
* include_config (boolean or None) : \
If True, then include the generated expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
* catch_exceptions (boolean or None) : \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
* result_format (str or None) : \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
* meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
"""
def outer_wrapper(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
# Get the name of the method
method_name = func.__name__
# Combine all arguments into a single new "all_args" dictionary to name positional parameters
all_args = dict(zip(method_arg_names, args))
all_args.update(kwargs)
# Unpack display parameters; remove them from all_args if appropriate
if "include_config" in kwargs:
include_config = kwargs["include_config"]
del all_args["include_config"]
else:
include_config = self.default_expectation_args["include_config"]
if "catch_exceptions" in kwargs:
catch_exceptions = kwargs["catch_exceptions"]
del all_args["catch_exceptions"]
else:
catch_exceptions = self.default_expectation_args["catch_exceptions"]
if "result_format" in kwargs:
result_format = kwargs["result_format"]
else:
result_format = self.default_expectation_args["result_format"]
# Extract the meta object for use as a top-level expectation_config holder
if "meta" in kwargs:
meta = kwargs["meta"]
del all_args["meta"]
else:
meta = None
# Get the signature of the inner wrapper:
argspec = inspect.getfullargspec(func)[0][1:]
if "result_format" in argspec:
all_args["result_format"] = result_format
else:
if "result_format" in all_args:
del all_args["result_format"]
all_args = recursively_convert_to_json_serializable(all_args)
# Patch in PARAMETER args, and remove locally-supplied arguments
# This will become the stored config
expectation_args = copy.deepcopy(all_args)
if self._expectation_suite.evaluation_parameters:
(
evaluation_args,
substituted_parameters,
) = build_evaluation_parameters(
expectation_args,
self._expectation_suite.evaluation_parameters,
self._config.get("interactive_evaluation", True),
self._data_context,
)
else:
(
evaluation_args,
substituted_parameters,
) = build_evaluation_parameters(
expectation_args,
None,
self._config.get("interactive_evaluation", True),
self._data_context,
)
# Construct the expectation_config object
expectation_config = ExpectationConfiguration(
expectation_type=method_name, kwargs=expectation_args, meta=meta
)
raised_exception = False
exception_traceback = None
exception_message = None
# Finally, execute the expectation method itself
if (
self._config.get("interactive_evaluation", True)
or self._active_validation
):
try:
return_obj = func(self, **evaluation_args)
if isinstance(return_obj, dict):
return_obj = ExpectationValidationResult(**return_obj)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
exception_message = "{}: {}".format(
type(err).__name__, str(err)
)
return_obj = ExpectationValidationResult(success=False)
else:
raise err
else:
return_obj = ExpectationValidationResult(
expectation_config=copy.deepcopy(expectation_config)
)
# If validate has set active_validation to true, then we do not save the config to avoid
# saving updating expectation configs to the same suite during validation runs
if self._active_validation is True:
stored_config = expectation_config
else:
# Append the expectation to the config.
stored_config = self._expectation_suite.add_expectation(
expectation_config
)
if include_config:
return_obj.expectation_config = copy.deepcopy(stored_config)
# If there was no interactive evaluation, success will not have been computed.
if return_obj.success is not None:
# Add a "success" object to the config
stored_config.success_on_last_run = return_obj.success
if catch_exceptions:
return_obj.exception_info = {
"raised_exception": raised_exception,
"exception_message": exception_message,
"exception_traceback": exception_traceback,
}
if len(substituted_parameters) > 0:
if meta is None:
meta = dict()
meta["substituted_parameters"] = substituted_parameters
# Add meta to return object
if meta is not None:
return_obj.meta = meta
return_obj = recursively_convert_to_json_serializable(return_obj)
if self._data_context is not None:
return_obj = self._data_context.update_return_obj(self, return_obj)
return return_obj
return wrapper
return outer_wrapper
def _initialize_expectations(
self, expectation_suite=None, expectation_suite_name=None
):
"""Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.
In addition, this always sets the `default_expectation_args` to:
`include_config`: False,
`catch_exceptions`: False,
`output_format`: 'BASIC'
By default, initializes data_asset_type to the name of the implementing class, but subclasses
that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their
interoperability.
Args:
expectation_suite (json): \
A json-serializable expectation config. \
If None, creates default `_expectation_suite` with an empty list of expectations and \
key value `data_asset_name` as `data_asset_name`.
expectation_suite_name (string): \
The name to assign to the `expectation_suite.expectation_suite_name`
Returns:
None
"""
if expectation_suite is not None:
if isinstance(expectation_suite, dict):
expectation_suite = expectationSuiteSchema.load(expectation_suite)
else:
expectation_suite = copy.deepcopy(expectation_suite)
self._expectation_suite = expectation_suite
if expectation_suite_name is not None:
if (
self._expectation_suite.expectation_suite_name
!= expectation_suite_name
):
logger.warning(
"Overriding existing expectation_suite_name {n1} with new name {n2}".format(
n1=self._expectation_suite.expectation_suite_name,
n2=expectation_suite_name,
)
)
self._expectation_suite.expectation_suite_name = expectation_suite_name
else:
if expectation_suite_name is None:
expectation_suite_name = "default"
self._expectation_suite = ExpectationSuite(
expectation_suite_name=expectation_suite_name
)
self._expectation_suite.data_asset_type = self._data_asset_type
self.default_expectation_args = {
"include_config": True,
"catch_exceptions": False,
"result_format": "BASIC",
}
def append_expectation(self, expectation_config):
"""This method is a thin wrapper for ExpectationSuite.append_expectation"""
warnings.warn(
"append_expectation is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.add_expectation instead.",
DeprecationWarning,
)
self._expectation_suite.append_expectation(expectation_config)
def find_expectation_indexes(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
) -> List[int]:
"""This method is a thin wrapper for ExpectationSuite.find_expectation_indexes"""
warnings.warn(
"find_expectation_indexes is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.find_expectation_indexes instead.",
DeprecationWarning,
)
return self._expectation_suite.find_expectation_indexes(
expectation_configuration=expectation_configuration, match_type=match_type
)
def find_expectations(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
) -> List[ExpectationConfiguration]:
"""This method is a thin wrapper for ExpectationSuite.find_expectations()"""
warnings.warn(
"find_expectations is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.find_expectation_indexes instead.",
DeprecationWarning,
)
return self._expectation_suite.find_expectations(
expectation_configuration=expectation_configuration, match_type=match_type
)
def remove_expectation(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
remove_multiple_matches: bool = False,
) -> List[ExpectationConfiguration]:
"""This method is a thin wrapper for ExpectationSuite.remove()"""
warnings.warn(
"DataAsset.remove_expectations is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.remove_expectation instead.",
DeprecationWarning,
)
return self._expectation_suite.remove_expectation(
expectation_configuration=expectation_configuration,
match_type=match_type,
remove_multiple_matches=remove_multiple_matches,
)
def set_config_value(self, key, value):
self._config[key] = value
def get_config_value(self, key):
return self._config[key]
@property
def batch_kwargs(self):
return self._batch_kwargs
@property
def batch_id(self):
return self.batch_kwargs.to_id()
@property
def batch_markers(self):
return self._batch_markers
@property
def batch_parameters(self):
return self._batch_parameters
def discard_failing_expectations(self):
res = self.validate(only_return_failures=True).results
if any(res):
for item in res:
self.remove_expectation(
expectation_configuration=item.expectation_config,
match_type="runtime",
)
warnings.warn("Removed %s expectations that were 'False'" % len(res))
def get_default_expectation_arguments(self):
"""Fetch default expectation arguments for this data_asset
Returns:
A dictionary containing all the current default expectation arguments for a data_asset
Ex::
{
"include_config" : True,
"catch_exceptions" : False,
"result_format" : 'BASIC'
}
See also:
set_default_expectation_arguments
"""
return self.default_expectation_args
def set_default_expectation_argument(self, argument, value):
"""Set a default expectation argument for this data_asset
Args:
argument (string): The argument to be replaced
value : The New argument to use for replacement
Returns:
None
See also:
get_default_expectation_arguments
"""
# !!! Maybe add a validation check here?
self.default_expectation_args[argument] = value
def get_expectations_config(
self,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_config_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False,
):
warnings.warn(
"get_expectations_config is deprecated, and will be removed in a future release. "
+ "Please use get_expectation_suite instead.",
DeprecationWarning,
)
return self.get_expectation_suite(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_config_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings,
)
def get_expectation_suite(
self,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_config_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False,
suppress_logging=False,
):
"""Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_config_kwargs (boolean): \
In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): \
In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
suppress_warnings (boolean): \
If true, do not include warnings in logging information about the operation.
suppress_logging (boolean): \
If true, do not create a log entry (useful when using get_expectation_suite programmatically)
Returns:
An expectation suite.
Note:
get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \
copy of _expectation_suite, not the original object.
"""
expectation_suite = copy.deepcopy(self._expectation_suite)
expectations = expectation_suite.expectations
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
# Note: This is conservative logic.
# Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.
# In cases where expectation.success is missing or None, expectations are *retained*.
# Such a case could occur if expectations were loaded from a config file and never run.
if expectation.success_on_last_run is False:
discards["failed_expectations"] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
message = "\t%d expectation(s) included in expectation_suite." % len(
expectations
)
if discards["failed_expectations"] > 0 and not suppress_warnings:
message += (
" Omitting %d expectation(s) that failed when last run; set "
"discard_failed_expectations=False to include them."
% discards["failed_expectations"]
)
for expectation in expectations:
# FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation,
# which calls _copy_and_clean_up_expectation
expectation.success_on_last_run = None
if discard_result_format_kwargs:
if "result_format" in expectation.kwargs:
del expectation.kwargs["result_format"]
discards["result_format"] += 1
if discard_include_config_kwargs:
if "include_config" in expectation.kwargs:
del expectation.kwargs["include_config"]
discards["include_config"] += 1
if discard_catch_exceptions_kwargs:
if "catch_exceptions" in expectation.kwargs:
del expectation.kwargs["catch_exceptions"]
discards["catch_exceptions"] += 1
settings_message = ""
if discards["result_format"] > 0 and not suppress_warnings:
settings_message += " result_format"
if discards["include_config"] > 0 and not suppress_warnings:
settings_message += " include_config"
if discards["catch_exceptions"] > 0 and not suppress_warnings:
settings_message += " catch_exceptions"
if (
len(settings_message) > 1
): # Only add this if we added one of the settings above.
settings_message += " settings filtered."
expectation_suite.expectations = expectations
if not suppress_logging:
logger.info(message + settings_message)
return expectation_suite
def save_expectation_suite(
self,
filepath=None,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_config_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False,
):
"""Writes ``_expectation_config`` to a JSON file.
Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \
can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \
pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \
the JSON expectations config.
Args:
filepath (string): \
The location and name to write the JSON config file to.
discard_failed_expectations (boolean): \
If True, excludes expectations that do not return ``success = True``. \
If False, all expectations are written to the JSON config file.
discard_result_format_kwargs (boolean): \
If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \
file.
discard_include_config_kwargs (boolean): \
If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \
file.
discard_catch_exceptions_kwargs (boolean): \
If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \
config file.
suppress_warnings (boolean): \
It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \
suppressed.
"""
expectation_suite = self.get_expectation_suite(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_config_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings,
)
if filepath is None and self._data_context is not None:
self._data_context.save_expectation_suite(expectation_suite)
elif filepath is not None:
with open(filepath, "w") as outfile:
json.dump(
expectationSuiteSchema.dump(expectation_suite),
outfile,
indent=2,
sort_keys=True,
)
else:
raise ValueError(
"Unable to save config: filepath or data_context must be available."
)
def validate(
self,
expectation_suite=None,
run_id=None,
data_context=None,
evaluation_parameters=None,
catch_exceptions=True,
result_format=None,
only_return_failures=False,
run_name=None,
run_time=None,
):
"""Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.
Args:
expectation_suite (json or None): \
If None, uses the expectations config generated with the DataAsset during the current session. \
If a JSON file, validates those expectations.
run_name (str): \
Used to identify this validation result as part of a collection of validations. \
See DataContext for more information.
data_context (DataContext): \
A datacontext object to use as part of validation for binding evaluation parameters and \
registering validation results.
evaluation_parameters (dict or None): \
If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \
data_asset. If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): \
If True, exceptions raised by tests will not end validation and will be described in the returned \
report.
result_format (string or None): \
If None, uses the default value ('BASIC' or as specified). \
If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \
etc.).
only_return_failures (boolean): \
If True, expectation results are only returned when ``success = False`` \
Returns:
A JSON-formatted dictionary containing a list of the validation results. \
An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the \
current environment. If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
"""
try:
validation_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
assert not (run_id and run_name) and not (
run_id and run_time
), "Please provide either a run_id or run_name and/or run_time."
if isinstance(run_id, str) and not run_name:
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional). Instead of providing a run_id, you may also provide"
"run_name and run_time separately.",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
pass
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif not isinstance(run_id, RunIdentifier):
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
self._active_validation = True
# If a different validation data context was provided, override
validate__data_context = self._data_context
if data_context is None and self._data_context is not None:
data_context = self._data_context
elif data_context is not None:
# temporarily set self._data_context so it is used inside the expectation decorator
self._data_context = data_context
results = []
if expectation_suite is None:
expectation_suite = self.get_expectation_suite(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_include_config_kwargs=False,
discard_catch_exceptions_kwargs=False,
)
elif isinstance(expectation_suite, str):
try:
with open(expectation_suite) as infile:
expectation_suite = expectationSuiteSchema.loads(infile.read())
except ValidationError:
raise
except OSError:
raise GreatExpectationsError(
"Unable to load expectation suite: IO error while reading %s"
% expectation_suite
)
elif not isinstance(expectation_suite, ExpectationSuite):
logger.error(
"Unable to validate using the provided value for expectation suite; does it need to be "
"loaded from a dictionary?"
)
if getattr(data_context, "_usage_statistics_handler", None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(
self
),
success=False,
)
return ExpectationValidationResult(success=False)
# Evaluation parameter priority is
# 1. from provided parameters
# 2. from expectation configuration
# 3. from data context
# So, we load them in reverse order
if data_context is not None:
runtime_evaluation_parameters = (
data_context.evaluation_parameter_store.get_bind_params(run_id)
)
else:
runtime_evaluation_parameters = {}
if expectation_suite.evaluation_parameters:
runtime_evaluation_parameters.update(
expectation_suite.evaluation_parameters
)
if evaluation_parameters is not None:
runtime_evaluation_parameters.update(evaluation_parameters)
# Convert evaluation parameters to be json-serializable
runtime_evaluation_parameters = recursively_convert_to_json_serializable(
runtime_evaluation_parameters
)
# Warn if our version is different from the version in the configuration
# TODO: Deprecate "great_expectations.__version__"
suite_ge_version = expectation_suite.meta.get(
"great_expectations_version"
) or expectation_suite.meta.get("great_expectations.__version__")
if suite_ge_version:
if suite_ge_version != ge_version:
warnings.warn(
"WARNING: This configuration object was built using version %s of great_expectations, but "
"is currently being validated by version %s."
% (
suite_ge_version,
ge_version,
)
)
else:
warnings.warn(
"WARNING: No great_expectations version found in configuration object."
)
###
# This is an early example of what will become part of the ValidationOperator
# This operator would be dataset-semantic aware
# Adding now to simply ensure we can be slightly better at ordering our expectation evaluation
###
# Group expectations by column
columns = {}
for expectation in expectation_suite.expectations:
if "column" in expectation.kwargs and isinstance(
expectation.kwargs["column"], Hashable
):
column = expectation.kwargs["column"]
else:
column = "_nocolumn"
if column not in columns:
columns[column] = []
columns[column].append(expectation)
expectations_to_evaluate = []
for col in columns:
expectations_to_evaluate.extend(columns[col])
for expectation in expectations_to_evaluate:
try:
# copy the config so we can modify it below if needed
expectation = copy.deepcopy(expectation)
expectation_method = getattr(self, expectation.expectation_type)
if result_format is not None:
expectation.kwargs.update({"result_format": result_format})
# A missing parameter will raise an EvaluationParameterError
(
evaluation_args,
substituted_parameters,
) = build_evaluation_parameters(
expectation.kwargs,
runtime_evaluation_parameters,
self._config.get("interactive_evaluation", True),
self._data_context,
)
result = expectation_method(
catch_exceptions=catch_exceptions,
include_config=True,
**evaluation_args
)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
result = ExpectationValidationResult(
success=False,
exception_info={
"raised_exception": raised_exception,
"exception_traceback": exception_traceback,
"exception_message": str(err),
},
)
else:
raise err
# if include_config:
result.expectation_config = expectation
# Add an empty exception_info object if no exception was caught
if catch_exceptions and result.exception_info is None:
result.exception_info = {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
}
results.append(result)
statistics = _calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if not exp.success:
abbrev_results.append(exp)
results = abbrev_results
expectation_suite_name = expectation_suite.expectation_suite_name
result = ExpectationSuiteValidationResult(
results=results,
success=statistics.success,
statistics={
"evaluated_expectations": statistics.evaluated_expectations,
"successful_expectations": statistics.successful_expectations,
"unsuccessful_expectations": statistics.unsuccessful_expectations,
"success_percent": statistics.success_percent,
},
evaluation_parameters=runtime_evaluation_parameters,
meta={
"great_expectations_version": ge_version,
"expectation_suite_name": expectation_suite_name,
"run_id": run_id,
"batch_kwargs": self.batch_kwargs,
"batch_markers": self.batch_markers,
"batch_parameters": self.batch_parameters,
"validation_time": validation_time,
},
)
self._data_context = validate__data_context
except Exception:
if getattr(data_context, "_usage_statistics_handler", None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(self),
success=False,
)
raise
finally:
self._active_validation = False
if getattr(data_context, "_usage_statistics_handler", None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(self),
success=True,
)
return result
def get_evaluation_parameter(self, parameter_name, default_value=None):
"""Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter.
"""
if parameter_name in self._expectation_suite.evaluation_parameters:
return self._expectation_suite.evaluation_parameters[parameter_name]
else:
return default_value
def set_evaluation_parameter(self, parameter_name, parameter_value):
"""Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used
"""
self._expectation_suite.evaluation_parameters.update(
{parameter_name: parameter_value}
)
def add_citation(
self,
comment,
batch_kwargs=None,
batch_markers=None,
batch_parameters=None,
citation_date=None,
):
if batch_kwargs is None:
batch_kwargs = self.batch_kwargs
if batch_markers is None:
batch_markers = self.batch_markers
if batch_parameters is None:
batch_parameters = self.batch_parameters
self._expectation_suite.add_citation(
comment,
batch_kwargs=batch_kwargs,
batch_markers=batch_markers,
batch_parameters=batch_parameters,
citation_date=citation_date,
)
@property
def expectation_suite_name(self):
"""Gets the current expectation_suite name of this data_asset as stored in the expectations configuration."""
return self._expectation_suite.expectation_suite_name
@expectation_suite_name.setter
def expectation_suite_name(self, expectation_suite_name):
"""Sets the expectation_suite name of this data_asset as stored in the expectations configuration."""
self._expectation_suite.expectation_suite_name = expectation_suite_name
###
#
# Output generation
#
###
def _format_map_output(
self,
result_format,
success,
element_count,
nonnull_count,
unexpected_count,
unexpected_list,
unexpected_index_list,
):
"""Helper function to construct expectation result objects for map_expectations (such as column_map_expectation
and file_lines_map_expectation).
Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.
In each case, the object returned has a different set of populated fields.
See :ref:`result_format` for more information.
This function handles the logic for mapping those fields for column_map_expectations.
"""
# NB: unexpected_count parameter is explicit some implementing classes may limit the length of unexpected_list
# Retain support for string-only output formats:
result_format = parse_result_format(result_format)
# Incrementally add to result and return when all values for the specified level are present
return_obj = {"success": success}
if result_format["result_format"] == "BOOLEAN_ONLY":
return return_obj
missing_count = element_count - nonnull_count
if element_count > 0:
missing_percent = missing_count / element_count * 100
if nonnull_count > 0:
unexpected_percent_total = unexpected_count / element_count * 100
unexpected_percent_nonmissing = unexpected_count / nonnull_count * 100
else:
unexpected_percent_total = None
unexpected_percent_nonmissing = None
else:
missing_percent = None
unexpected_percent_total = None
unexpected_percent_nonmissing = None
return_obj["result"] = {
"element_count": element_count,
"missing_count": missing_count,
"missing_percent": missing_percent,
"unexpected_count": unexpected_count,
"unexpected_percent": unexpected_percent_nonmissing,
"unexpected_percent_total": unexpected_percent_total,
"unexpected_percent_nonmissing": unexpected_percent_nonmissing,
"partial_unexpected_list": unexpected_list[
: result_format["partial_unexpected_count"]
],
}
if result_format["result_format"] == "BASIC":
return return_obj
# Try to return the most common values, if possible.
if 0 < result_format.get("partial_unexpected_count"):
try:
partial_unexpected_counts = [
{"value": key, "count": value}
for key, value in sorted(
Counter(unexpected_list).most_common(
result_format["partial_unexpected_count"]
),
key=lambda x: (-x[1], str(x[0])),
)
]
except TypeError:
partial_unexpected_counts = []
if "details" not in return_obj["result"]:
return_obj["result"]["details"] = {}
return_obj["result"]["details"][
"partial_unexpected_counts_error"
] = "partial_unexpected_counts requested, but requires a hashable type"
finally:
return_obj["result"].update(
{
"partial_unexpected_index_list": unexpected_index_list[
: result_format["partial_unexpected_count"]
]
if unexpected_index_list is not None
else None,
"partial_unexpected_counts": partial_unexpected_counts,
}
)
if result_format["result_format"] == "SUMMARY":
return return_obj
return_obj["result"].update(
{
"unexpected_list": unexpected_list,
"unexpected_index_list": unexpected_index_list,
}
)
if result_format["result_format"] == "COMPLETE":
return return_obj
raise ValueError(
"Unknown result_format {}.".format(result_format["result_format"])
)
def _calc_map_expectation_success(self, success_count, nonnull_count, mostly):
"""Calculate success and percent_success for column_map_expectations
Args:
success_count (int): \
The number of successful values in the column
nonnull_count (int): \
The number of nonnull values in the column
mostly (float or None): \
A value between 0 and 1 (or None), indicating the fraction of successes required to pass the \
expectation as a whole. If mostly=None, then all values must succeed in order for the expectation as \
a whole to succeed.
Returns:
success (boolean), percent_success (float)
"""
if isinstance(success_count, decimal.Decimal):
raise ValueError(
"success_count must not be a decimal; check your db configuration"
)
if isinstance(nonnull_count, decimal.Decimal):
raise ValueError(
"nonnull_count must not be a decimal; check your db configuration"
)
if nonnull_count > 0:
percent_success = success_count / nonnull_count
if mostly is not None:
success = bool(percent_success >= mostly)
else:
success = bool(nonnull_count - success_count == 0)
else:
success = True
percent_success = None
return success, percent_success
###
#
# Iterative testing for custom expectations
#
###
def test_expectation_function(self, function, *args, **kwargs):
"""Test a generic expectation function
Args:
function (func): The function to be tested. (Must be a valid expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you will still need \
to define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
"""
argspec = inspect.getfullargspec(function)[0][1:]
new_function = self.expectation(argspec)(function)
return new_function(self, *args, **kwargs)
ValidationStatistics = namedtuple(
"ValidationStatistics",
[
"evaluated_expectations",
"successful_expectations",
"unsuccessful_expectations",
"success_percent",
"success",
],
)
def _calc_validation_statistics(validation_results):
"""
Calculate summary statistics for the validation results and
return ``ExpectationStatistics``.
"""
# calc stats
successful_expectations = sum(exp.success for exp in validation_results)
evaluated_expectations = len(validation_results)
unsuccessful_expectations = evaluated_expectations - successful_expectations
success = successful_expectations == evaluated_expectations
try:
success_percent = successful_expectations / evaluated_expectations * 100
except ZeroDivisionError:
# success_percent = float("nan")
success_percent = None
return ValidationStatistics(
successful_expectations=successful_expectations,
evaluated_expectations=evaluated_expectations,
unsuccessful_expectations=unsuccessful_expectations,
success=success,
success_percent=success_percent,
)
| great_expectations/data_asset/data_asset.py | 53,983 | Initialize the DataAsset.
:param profiler (profiler class) = None: The profiler that should be run on the data_asset to
build a baseline expectation suite.
Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a
Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments
so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of
*args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the
support for the profiler parameter not obvious from the signature.
Calculate success and percent_success for column_map_expectations
Args:
success_count (int): The number of successful values in the column
nonnull_count (int): The number of nonnull values in the column
mostly (float or None): A value between 0 and 1 (or None), indicating the fraction of successes required to pass the expectation as a whole. If mostly=None, then all values must succeed in order for the expectation as a whole to succeed.
Returns:
success (boolean), percent_success (float)
Calculate summary statistics for the validation results and
return ``ExpectationStatistics``.
Helper function to construct expectation result objects for map_expectations (such as column_map_expectation
and file_lines_map_expectation).
Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.
In each case, the object returned has a different set of populated fields.
See :ref:`result_format` for more information.
This function handles the logic for mapping those fields for column_map_expectations.
Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.
In addition, this always sets the `default_expectation_args` to:
`include_config`: False,
`catch_exceptions`: False,
`output_format`: 'BASIC'
By default, initializes data_asset_type to the name of the implementing class, but subclasses
that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their
interoperability.
Args:
expectation_suite (json): A json-serializable expectation config. If None, creates default `_expectation_suite` with an empty list of expectations and key value `data_asset_name` as `data_asset_name`.
expectation_suite_name (string): The name to assign to the `expectation_suite.expectation_suite_name`
Returns:
None
This method is a thin wrapper for ExpectationSuite.append_expectation
Deprecated: use profile instead.
Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
Returns:
tuple(expectation_suite, validation_results)
Manages configuration and running of expectation objects.
Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator used by great expectations to manage expectation configurations.
Args:
method_arg_names (List) : An ordered list of the arguments used by the method implementing the expectation (typically the result of inspection). Positional arguments are explicitly mapped to keyword arguments when the expectation is run.
Notes:
Intermediate decorators that call the core @expectation decorator will most likely need to pass their decorated methods' signature up to the expectation decorator. For example, the MetaPandasDataset column_map_expectation decorator relies on the DataAsset expectation decorator, but will pass through the signature from the implementing method.
@expectation intercepts and takes action based on the following parameters:
* include_config (boolean or None) : If True, then include the generated expectation config as part of the result object. For more detail, see :ref:`include_config`.
* catch_exceptions (boolean or None) : If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`.
* result_format (str or None) : Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
* meta (dict or None): A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`.
Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.
Sets the expectation_suite name of this data_asset as stored in the expectations configuration.
This method is a thin wrapper for ExpectationSuite.find_expectation_indexes
This method is a thin wrapper for ExpectationSuite.find_expectations()
Fetch default expectation arguments for this data_asset
Returns:
A dictionary containing all the current default expectation arguments for a data_asset
Ex::
{
"include_config" : True,
"catch_exceptions" : False,
"result_format" : 'BASIC'
}
See also:
set_default_expectation_arguments
Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter.
Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_config_kwargs (boolean): In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
suppress_warnings (boolean): If true, do not include warnings in logging information about the operation.
suppress_logging (boolean): If true, do not create a log entry (useful when using get_expectation_suite programmatically)
Returns:
An expectation suite.
Note:
get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a copy of _expectation_suite, not the original object.
Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
profiler_configuration: Optional profiler configuration dict
Returns:
tuple(expectation_suite, validation_results)
This method is a thin wrapper for ExpectationSuite.remove()
Writes ``_expectation_config`` to a JSON file.
Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from the JSON expectations config.
Args:
filepath (string): The location and name to write the JSON config file to.
discard_failed_expectations (boolean): If True, excludes expectations that do not return ``success = True``. If False, all expectations are written to the JSON config file.
discard_result_format_kwargs (boolean): If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config file.
discard_include_config_kwargs (boolean): If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config file.
discard_catch_exceptions_kwargs (boolean): If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON config file.
suppress_warnings (boolean): It True, all warnings raised by Great Expectations, as a result of dropped expectations, are suppressed.
Set a default expectation argument for this data_asset
Args:
argument (string): The argument to be replaced
value : The New argument to use for replacement
Returns:
None
See also:
get_default_expectation_arguments
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used
Test a generic expectation function
Args:
function (func): The function to be tested. (Must be a valid expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to define custom classes, etc. To use developed expectations from the command-line tool, you will still need to define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.
Args:
expectation_suite (json or None): If None, uses the expectations config generated with the DataAsset during the current session. If a JSON file, validates those expectations.
run_name (str): Used to identify this validation result as part of a collection of validations. See DataContext for more information.
data_context (DataContext): A datacontext object to use as part of validation for binding evaluation parameters and registering validation results.
evaluation_parameters (dict or None): If None, uses the evaluation_paramters from the expectation_suite provided or as part of the data_asset. If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): If True, exceptions raised by tests will not end validation and will be described in the returned report.
result_format (string or None): If None, uses the default value ('BASIC' or as specified). If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', etc.).
only_return_failures (boolean): If True, expectation results are only returned when ``success = False``
Returns:
A JSON-formatted dictionary containing a list of the validation results. An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the current environment. If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
This should in general only be changed when a subclass *adds expectations* or *changes expectation semantics* That way, multiple backends can implement the same data_asset_type This special state variable tracks whether a validation run is going on, which will disable saving expectation config objects TODO: add warning if no expectation_explorer_manager and how to turn on Get the name of the method Combine all arguments into a single new "all_args" dictionary to name positional parameters Unpack display parameters; remove them from all_args if appropriate Extract the meta object for use as a top-level expectation_config holder Get the signature of the inner wrapper: Patch in PARAMETER args, and remove locally-supplied arguments This will become the stored config Construct the expectation_config object Finally, execute the expectation method itself If validate has set active_validation to true, then we do not save the config to avoid saving updating expectation configs to the same suite during validation runs Append the expectation to the config. If there was no interactive evaluation, success will not have been computed. Add a "success" object to the config Add meta to return object !!! Maybe add a validation check here? Note: This is conservative logic. Instead of retaining expectations IFF success==True, it discard expectations IFF success==False. In cases where expectation.success is missing or None, expectations are *retained*. Such a case could occur if expectations were loaded from a config file and never run. FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation, which calls _copy_and_clean_up_expectation Only add this if we added one of the settings above. If a different validation data context was provided, override temporarily set self._data_context so it is used inside the expectation decorator Evaluation parameter priority is 1. from provided parameters 2. from expectation configuration 3. from data context So, we load them in reverse order Convert evaluation parameters to be json-serializable Warn if our version is different from the version in the configuration TODO: Deprecate "great_expectations.__version__" This is an early example of what will become part of the ValidationOperator This operator would be dataset-semantic aware Adding now to simply ensure we can be slightly better at ordering our expectation evaluation Group expectations by column copy the config so we can modify it below if needed A missing parameter will raise an EvaluationParameterError if include_config: Add an empty exception_info object if no exception was caught Output generation NB: unexpected_count parameter is explicit some implementing classes may limit the length of unexpected_list Retain support for string-only output formats: Incrementally add to result and return when all values for the specified level are present Try to return the most common values, if possible. Iterative testing for custom expectations calc stats success_percent = float("nan") | 15,737 | en | 0.711492 |
"""update liscence colum to hash
Revision ID: 0a769c5cda0a
Revises: 1de63d54c3b7
Create Date: 2018-06-21 17:57:36.549097
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0a769c5cda0a'
down_revision = '1de63d54c3b7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'birth',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
op.alter_column('user', 'liscence_2',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
op.alter_column('user', 'liscence_3',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
op.alter_column('user', 'liscence_4',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
op.alter_column('user', 'serial',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'serial',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
op.alter_column('user', 'liscence_4',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
op.alter_column('user', 'liscence_3',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
op.alter_column('user', 'liscence_2',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
op.alter_column('user', 'birth',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
# ### end Alembic commands ###
| wicarproject/migrations/versions/0a769c5cda0a_update_liscence_colum_to_hash.py | 2,303 | update liscence colum to hash
Revision ID: 0a769c5cda0a
Revises: 1de63d54c3b7
Create Date: 2018-06-21 17:57:36.549097
revision identifiers, used by Alembic. commands auto generated by Alembic - please adjust! end Alembic commands commands auto generated by Alembic - please adjust! end Alembic commands | 312 | en | 0.615589 |
import os
from plugins import BaseAssessment
from yapsy.IPlugin import IPlugin
from asmtypes import ArastDataOutputError
class ReaprAssessment(BaseAssessment, IPlugin):
OUTPUT = 'contigs'
def run(self):
"""
Build the command and run.
Return list of file(s)
"""
contigs = self.data.contigfiles
reads = self.data.readsets
if len(contigs) > 1:
raise Exception('Reapr: multiple contig files!')
#### Generate Bamfiles
if len(reads) > 1:
self.out_module.write('WARNING: Reapr will use only one read library')
read_pair = reads[0].files
bamfile = os.path.join(self.outpath, 'out.bam')
cmd_args = [self.executable, 'smaltmap', contigs[0],
read_pair[0], read_pair[1], bamfile]
self.arast_popen(cmd_args)
if not os.path.exists(bamfile):
raise ArastDataOutputError('REAPR: Unable to create alignment')
#### Run REAPR Pipeline
rpr_outpath = os.path.join(self.outpath, 'output')
cmd_args = [self.executable, 'pipeline', contigs[0], bamfile, rpr_outpath]
self.arast_popen(cmd_args)
# Move files into root dir
for f in os.listdir(rpr_outpath):
old = os.path.join(rpr_outpath, f)
new = os.path.join(self.outpath, f)
os.rename(old, new)
broken = os.path.join(self.outpath, '04.break.broken_assembly.fa')
if os.path.exists(broken):
return {'contigs': [broken]}
| lib/assembly/plugins/reapr.py | 1,563 | Build the command and run.
Return list of file(s)
Generate Bamfiles Run REAPR Pipeline Move files into root dir | 113 | en | 0.755907 |
#!/usr/bin/env python3
# Northcliff Airconditioner Controller Version 3.48 Gen
import RPi.GPIO as GPIO
import time
from datetime import datetime
#import requests
#from threading import Thread
import paho.mqtt.client as mqtt
import struct
import json
import serial
import binascii
import sys
import spidev
import math
import os
class NorthcliffAirconController(object):
def __init__(self, calibrate_damper_on_startup):
# Set up GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
self.control_enable = 17
self.damper_control = 25
self.damper_stop = 24
self.damper_zone = 23
GPIO.setup(self.control_enable, GPIO.OUT)
GPIO.setup(self.damper_control, GPIO.OUT)
GPIO.setup(self.damper_stop, GPIO.OUT)
GPIO.setup(self.damper_zone, GPIO.OUT)
GPIO.output(self.control_enable, False)
self.damper_control_state = False
GPIO.output(self.damper_control, False)
self.damper_stop_state = False
GPIO.output(self.damper_stop, False)
self.damper_zone_state = False
GPIO.output(self.damper_zone, False)
# Aircon Startup Mode
self.remote_operation_on = False # This flag keeps track of whether the aircon is under remote or autonomous operation
self.enable_serial_comms_loop = False # This flag is set to True during remote operation to enable the serial comms loop when the aircon is under remote operations
self.heating = False # Mirrors aircon heating state indicator
self.compressor = False # Mirrors aircon compressor state indicator
self.malfunction = False # Mirrors aircon malfunction state indicator and is used to indicate a malfunction in the aircon/controller comms
self.heat_mode = False # Mirrors aircon heat mode indicator
self.cool_mode = False # Mirrors aircon cool mode indicator
self.fan_mode = False # Mirrors aircon fan mode indicator
self.fan_hi = False # Mirrors aircon fan hi indicator
self.fan_med = False # Mirrors aircon fan med indicator
self.fan_lo = False # Mirrors aircon fan lo indicator
self.filter = False # Mirrors aircon filter indicator
# Set up damper states
self.requested_damper_percent = 100
self.adjusting_damper = False
# Set default damper positions
self.damper_day_position = 416
self.damper_night_position = 1648
self.calibrate_damper_on_startup = calibrate_damper_on_startup
# Set up heartbeat
self.heartbeat_count = 0
self.no_heartbeat_ack = False
# Set up Serial Comms Data
self.packet1_header_a = '00'
self.packet1_header_b = '8f'
self.packet1_header = self.packet1_header_a + self.packet1_header_b
self.packet2_header_a = '80'
self.packet2_header_b = '8c'
self.packet2_header = self.packet2_header_a + self.packet2_header_b
self.packet3_initial_header = self.packet1_header
self.mode = {'Auto On': 'b0', 'Auto Off': '90', 'Dry On': 'b1', 'Dry Off': '91', 'Cool On': 'b2', 'Cool Off': '92', 'Fan On': 'b3', 'Fan Off': '93', 'Heat On': 'b4', 'Heat Off': '94'}
self.set_temp = {'18 degrees': '48', '19 degrees': '4a', '20 degrees': '4c', '21 degrees': '4e', '22 degrees': '50', '23 degrees': '52', '24 degrees': '54', '25 degrees': '56', '26 degrees': '58',
'27 degrees': '5a', '28 degrees': '5c', '29 degrees': '5e', '30 degrees': '60'}
self.fan_speed = {'Lo On': 'f0', 'Lo Off': 'e0', 'Med On': 'f1', 'Med Off': 'e1', 'Hi On': 'f2', 'Hi Off': 'e2'}
self.clean_filter = {'Reset': 'f1', 'No Reset': 'f0'}
self.alerts = {'Not in Warmup': ['f8', 'fa'], 'Warmup': ['f9', 'fb'], 'Clean Filter': ['fa', 'fb'], 'Filter OK': ['f8', 'f9']}
self.compressor_state = {'Off': 'e0', 'On': 'e2'}
# Set up dictionaries for Serial Comms Packets to Off, Fan Mode, Fan Lo
self.packet_1_dictionary = {"1Header1": self.packet1_header, "2Mode1": self.mode['Fan Off'], "3Filler1a": "00", "4SetTemp1": self.set_temp['20 degrees'], "5Fan1": self.fan_speed['Hi Off'],
"6Filler1b": "fffff03fffffffffff"}
self.packet_2_dictionary = {"1Header2": self.packet2_header, "2Mode2": self.mode['Fan Off'], "3Filler2a": "00", "4SetTemp2": self.set_temp['20 degrees'], "5Fan2": self.fan_speed['Hi Off'],
"6ActualTemp2": "90", "7Filler2b": "00", "8Unknown2": "e0", "9Alerts2": self.alerts['Warmup'], "10Filler2c": "ffff", "11Compressor2": self.compressor_state['On'],
"12Filler2c": "ffff", "13Checksum2": "00"}
self.packet_3_dictionary = {"1Header3": self.packet3_initial_header, "2Mode3": self.mode['Fan Off'], "3Filler3a": "00", "4SetTemp3": self.set_temp['20 degrees'],
"5Fan3": self.fan_speed['Hi Off'], "6Filler3b": "fffff03fffffffffff"}
# Set up serial port for aircon controller comms
self.aircon_comms = serial.Serial("/dev/ttyAMA0", 1200, parity=serial.PARITY_EVEN, timeout=0.5) # After swapping serial and bluetooth ports so we can use parity
# Set up SPI Port for the damper position sensor
self.spi = spidev.SpiDev()
speed = 50000
self.spi.open(0,0)
self.spi.max_speed_hz = speed
# Initialise damper position sensor
resp = self.spi.xfer2([0x0e, 0x00, 0x00]) # X-Channel Self Test
time.sleep(0.3)
resp = self.spi.xfer2([0x00, 0x00]) # Exit Self Test
time.sleep(0.1)
resp = self.spi.xfer2([0x0f, 0x00, 0x00]) # Y-Channel Self Test
time.sleep(0.3)
resp = self.spi.xfer2([0x00, 0x00]) # Exit Self Test
time.sleep(0.1)
def print_status(self, print_message):
today = datetime.now()
print("")
print(print_message + today.strftime('%A %d %B %Y @ %H:%M:%S'))
def startup(self):
self.print_status("Northcliff Aircon Controller starting up on ")
# Set up mqtt client
self.client = mqtt.Client('aircon') #Create new instance of mqtt Class
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.connect("<your mqtt Broker name>", 1883, 60) #Connect to mqtt broker
self.client.loop_start() #Start mqtt monitor thread
if self.calibrate_damper_on_startup == True:
self.calibrate_damper(damper_movement_time = 180)
# Detect Damper Position and update Home Manager with aircon status
self.detect_damper_position(calibrate = False)
self.update_status()
def on_connect(self, client, userdata, flags, rc): # Print mqtt status on connecting to broker
time.sleep(1)
self.print_status("Connected to mqtt server with result code "+str(rc)+" on ")
print("")
self.client.subscribe("AirconControl")
def on_message(self, client, userdata, msg): # mqtt message method calls
decoded_payload = str(msg.payload.decode("utf-8"))
message = msg.topic+" "+ decoded_payload # Capture message with binary states converted to a string
#print(message)
if str(msg.topic) == 'AirconControl':
parsed_json = json.loads(decoded_payload)
if parsed_json['service'] == 'Off':
self.process_thermo_off_command()
elif parsed_json['service'] == 'Ventilate':
self.process_ventilate_mode()
elif parsed_json['service'] == 'Thermostat Heat':
self.process_thermo_heat_command()
elif parsed_json['service'] == 'Thermostat Cool':
self.process_thermo_cool_command()
elif parsed_json['service'] == 'Thermostat Auto':
self.process_thermo_auto_command()
elif parsed_json['service'] == 'Heat Mode':
self.process_heat_command()
elif parsed_json['service'] == 'Cool Mode':
self.process_cool_command()
elif parsed_json['service'] == 'Fan Mode':
self.process_fan_command()
elif parsed_json['service'] == 'Fan Hi':
self.process_fan_hi_command()
elif parsed_json['service'] == 'Fan Med':
self.process_fan_med_command()
elif parsed_json['service'] == 'Fan Lo':
self.process_fan_lo_command()
elif parsed_json['service'] == 'Damper Percent':
self.requested_damper_percent = parsed_json['value']
self.print_status("Damper Command Received on ")
print("Requested Damper Percent is", self.requested_damper_percent, "Current Damper Percent is", self.reported_damper_percent)
elif parsed_json['service'] == 'Update Status': # If HomeManager wants a status update
self.print_status("Status Update Requested on ")
self.update_status()
elif parsed_json['service'] == 'Heartbeat Ack': # If HomeManager sends a heartbeat ack
self.heartbeat_ack()
else:
print("Received unknown message", str(parsed_json))
def update_status(self): # Send aircon status to Home Manager
status = json.dumps({'service': 'Status Update', 'Remote Operation': self.remote_operation_on, 'Heat': self.heat_mode, 'Cool': self.cool_mode,
'Fan': self.fan_mode, 'Fan Hi': self.fan_hi, 'Fan Med': self.fan_med, 'Fan Lo': self.fan_lo, 'Heating': self.heating,
'Compressor': self.compressor, 'Malfunction': self.malfunction, 'Damper': self.reported_damper_percent, 'Filter': self.filter})
self.client.publish('AirconStatus', status)
### Methods for mqtt messages received from Home Manager ###
def process_thermo_off_command(self):
self.print_status("Thermo Off Command received on ")
self.packet_1_dictionary["2Mode1"] = self.mode['Fan Off'] # Set Fan to Off Mode
self.packet_3_dictionary["2Mode3"] = self.mode['Fan Off'] # Set Fan to Off Mode
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi Off'] # Set Fan to High
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi Off'] # Set Fan to High
self.cool_mode = False
self.fan_mode = False
self.heat_mode = False
self.fan_med = False
self.fan_hi = False
self.fan_lo = False
self.update_status()
time.sleep(3) # Wait for packets to be sent before disconnecting
self.enable_serial_comms_loop = False # Sets the flag to exit serial comms loop and prepare for disconnect
# The disconnect is done in the main loop so it happens between packet 3 and packet 1
def process_thermo_heat_command(self):
self.print_status("Thermo Heat Command received on ")
if self.remote_operation_on == False: # Turn On
self.remote_operation_on = True
self.enable_serial_comms_loop = True
GPIO.output(self.control_enable, True) # Take Control of Remote
self.damper_control_state = True
GPIO.output(self.damper_control, True) # Take Control of Damper
time.sleep (1.0)
self.packet_1_dictionary["2Mode1"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['30 degrees'] # Set 30 degrees for Heating
self.packet_3_dictionary["2Mode3"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['30 degrees'] # Set 30 degrees for Heating
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Lo On'] # Fan Lo
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Lo On'] # Fan Lo
self.cool_mode = False
self.fan_mode = True
self.heat_mode = False
self.fan_med = False
self.fan_hi = False
self.fan_lo = True
self.update_status()
def process_thermo_cool_command(self):
self.print_status("Thermo Cool Command received on ")
if self.remote_operation_on == False: # Turn On
self.remote_operation_on = True
self.enable_serial_comms_loop = True
GPIO.output(self.control_enable, True) # Take Control of Remote
self.damper_control_state = True
GPIO.output(self.damper_control, True) # Take Control of Damper
time.sleep (1.0)
self.packet_1_dictionary["2Mode1"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['18 degrees'] # Set 18 Degrees for Cooling
self.packet_3_dictionary["2Mode3"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['18 degrees'] # Set 18 Degrees for Cooling
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Lo On'] # Fan Lo
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Lo On'] # Fan Lo
self.cool_mode = False
self.fan_mode = True
self.heat_mode = False
self.fan_med = False
self.fan_hi = False
self.fan_lo = True
self.update_status()
def process_ventilate_mode(self):
self.print_status("Ventilate Command received on ")
if self.remote_operation_on == False: # Turn On
self.remote_operation_on = True
self.enable_serial_comms_loop = True
GPIO.output(self.control_enable, True) # Take Control of Remote
self.damper_control_state = True
GPIO.output(self.damper_control, True) # Take Control of Damper
time.sleep (1.0)
self.packet_1_dictionary["2Mode1"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['21 degrees'] # Set 21 Degrees
self.packet_3_dictionary["2Mode3"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['21 degrees'] # Set 21 Degrees
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi On'] # Fan Hi
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi On'] # Fan Hi
self.cool_mode = False
self.fan_mode = True
self.heat_mode = False
self.fan_med = False
self.fan_hi = True
self.fan_lo = False
self.update_status()
def process_thermo_auto_command(self): # Holding place if Auto method is to be added in the future
pass
def process_heat_command(self):
self.print_status("Heat Mode Command received on ")
self.packet_1_dictionary["2Mode1"] = self.mode['Heat On'] # Set to Heat Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['30 degrees'] # Set 30 degrees for Heating
self.packet_3_dictionary["2Mode3"] = self.mode['Heat On'] # Set to Heat Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['30 degrees'] # Set 30 degrees for Heating
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi On'] # Fan Hi
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi On'] # Fan Hi
self.cool_mode = False
self.fan_mode = False
self.heat_mode = True
self.fan_med = False
self.fan_hi = True
self.fan_lo = False
self.update_status()
def process_cool_command(self):
self.print_status("Cool Mode Command received on ")
self.packet_1_dictionary["2Mode1"] = self.mode['Cool On'] # Set to Cool Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['18 degrees'] # Set 18 Degrees for Cooling
self.packet_3_dictionary["2Mode3"] = self.mode['Cool On'] # Set to Cool Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['18 degrees'] # Set 18 Degrees for Cooling
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi On'] # Fan Hi
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi On'] # Fan Hi
self.cool_mode = True
self.fan_mode = False
self.heat_mode = False
self.fan_med = False
self.fan_hi = True
self.fan_lo = False
self.update_status()
def process_fan_command(self):
self.print_status("Fan Mode Command received on ")
self.packet_1_dictionary["2Mode1"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_3_dictionary["2Mode3"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Lo On'] # Fan Lo
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Lo On'] # Fan Lo
self.cool_mode = False
self.fan_mode = True
self.heat_mode = False
self.fan_med = False
self.fan_hi = False
self.fan_lo = True
self.update_status()
def process_fan_hi_command(self):
self.print_status("Fan Hi Command received on ")
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi On'] # Fan Hi
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi On'] # Fan Hi
self.fan_med = False
self.fan_hi = True
self.fan_lo = False
self.update_status()
def process_fan_med_command(self):
self.print_status("Fan Med Command received on ")
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Med On'] # Fan Med
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Med On'] # Fan Med
self.fan_med = True
self.fan_hi = False
self.fan_lo = False
self.update_status()
def process_fan_lo_command(self):
self.print_status("Fan Lo Command received on ")
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Lo On'] # Fan Lo
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Lo On'] # Fan Lo
self.fan_med = False
self.fan_hi = False
self.fan_lo = True
self.update_status()
def heartbeat_ack(self):
#self.print_status('Heartbeat received from Home Manager on ')
self.heartbeat_count = 0
self.no_heartbeat_ack = False
### End of Methods for mqtt messages received from Home Manager ###
### Methods called in main loop ###
def process_home_manager_heartbeat(self): # Send heartbeat signal to Home Manager every 120 loops. Turn aircon off and reboot if there's no response within 80 more loops
self.heartbeat_count += 1
if self.heartbeat_count == 120:
#self.print_status('Sending Heartbeat to Home Manager on ')
self.send_heartbeat_to_home_manager()
if self.heartbeat_count > 200:
self.print_status('Home Manager Heartbeat Lost. Setting Aircon to Thermo Off Mode on ')
self.client.publish('AirconStatus', '{"service": "Restart"}')
self.no_heartbeat_ack = True
self.process_thermo_off_command()
time.sleep(10)
os.system('sudo reboot')
def send_heartbeat_to_home_manager(self):
self.client.publish('AirconStatus', '{"service": "Heartbeat"}')
def build_packets(self, packet_1, packet_3): # Build packets 1 and 3 for sending to the aircon
packets = [packet_1, packet_3]
for x in range(2):
sorted_packet = ([value for (key, value) in sorted(packets[x].items())]) # Sort the bytes contained in each packet dictionary into the correct order by using the first digit in the byte key
packet_no_checksum = ''.join(sorted_packet) # Join the packet dictionary bytes into one string
checksum = self.calculate_checksum(packet_no_checksum) # Calculate the checksum
packet_with_checksum = packet_no_checksum + checksum # Add the checksum to the end of the packet
packet_send = bytes.fromhex(''.join(packet_with_checksum)) # Convert the joined packet to binary
if x == 0: # Packet 1
self.packet_1_with_checksum = packet_with_checksum
self.packet_1_send = packet_send
else: # Packet 3
self.packet_3_with_checksum = packet_with_checksum
self.packet_3_send = packet_send
def send_serial_aircon_data(self, packet): # Send packet to aircon comms port
self.aircon_comms.write(packet)
def receive_serial_aircon_data(self): # Receive Packet 2 from aircon comms port
# Look for Packet 2 Header (x808c)
header_loop_count = 0
found_packet_2_header = False
while header_loop_count < 16: # Test an entire packet for header
test_for_header_1 = self.aircon_comms.read(1) # Read one byte to look for the first half of the header
if test_for_header_1 == b'\x80':
test_for_header_2 = self.aircon_comms.read(1) # Read one byte to look for the second half of the header, after sucessfully finding the first half of the header
if test_for_header_2 == b'\x8c':
found_packet_2_header = True # Flag that the correct Packet 2 header has been found
exit_loop_count = header_loop_count # Record the loop count in which the correct header was found (for debugging purposes)
header_loop_count = 16 # Exit the loop if the complete correct header has been found
else:
header_loop_count += 1 # Look for another instance of the first half of the header if the correct second half wasn't found immediately after the first half
else:
header_loop_count += 1 # Keep looking for the first half of the header
if found_packet_2_header == True: # Read the remaining bytes in the packet after the correct Packet 2 Header is found
self.raw_response_1 = self.aircon_comms.read(6) # Capture the next 6 bytes
self.raw_response_2 = self.aircon_comms.read(8) # capture the next 8 bytes
self.raw_response = b"".join([test_for_header_1, test_for_header_2, self.raw_response_1, self.raw_response_2]) # Construct the entire Packet 2 in binary form
self.packet_2 = str(binascii.hexlify(self.raw_response), "utf-8") # Convert Packet to to a string
self.decode_packet(self.packet_2) # Extract each component of Packet 2 and place in a dictionary that decodes the aircon function of each packet byte
else: # Flag that no correct Packet 2 header has been found
print("No valid Packet 2 Header received")
self.packet_2_error = True
self.malfunction = True
def decode_packet(self, packet_2): # Extract each component of Packet 2 and place in a dictionary that decodes the aircon function of each packet byte. Validate checksum and comparison with Packet 1 data
self.packet_2_error = False # Flag that Packet 2 is OK
self.previous_malfunction = self.malfunction # Capture the previous malfunction state
self.malfunction = False # Clear the malfunction flag
self.packet_2_dictionary["1Header2"] = packet_2[0:4]
self.packet_2_dictionary["2Mode2"] = packet_2[4:6]
self.packet_2_dictionary["3Filler2a"] = packet_2[6:8]
self.packet_2_dictionary["4SetTemp2"] = packet_2[8:10]
self.packet_2_dictionary["5Fan2"] = packet_2[10:12]
self.previous_actual_temperature = self.packet_2_dictionary["6ActualTemp2"]
self.packet_2_dictionary["6ActualTemp2"] = packet_2[12:14]
self.packet_2_dictionary["7Filler2b"] = packet_2[14:16]
self.packet_2_dictionary["8Unknown2"] = packet_2[16:18]
self.packet_2_dictionary["9Alerts2"] = packet_2[18:20]
self.packet_2_dictionary["10Filler2c"] = packet_2[20:24]
self.packet_2_dictionary["11Compressor2"] = packet_2[24:26]
self.packet_2_dictionary["12Filler2c"] = packet_2[26:30]
self.packet_2_dictionary["13Checksum2"] = packet_2[30:32]
packet_no_checksum = packet_2[0:30] # Capure the packet without the checksum so that the checksum can be calculated
checksum = self.calculate_checksum(packet_no_checksum)
if self.packet_2_dictionary["11Compressor2"] == self.compressor_state['On']:
if self.compressor == False:
self.compressor = True
#self.print_status("Aircon Compressor Started on ")
self.update_status()
if self.packet_2_dictionary["11Compressor2"] == self.compressor_state['Off']:
if self.compressor == True:
self.compressor = False
#self.print_status("Aircon Compressor Stopped on ")
self.update_status()
if self.packet_2_dictionary["9Alerts2"] == self.alerts['Warmup'][0] or self.packet_2_dictionary["9Alerts2"] == self.alerts['Warmup'][1]:
if self.heating == False:
self.heating = True
#self.print_status("Aircon Warmup Started on ")
self.update_status()
if self.packet_2_dictionary["9Alerts2"] == self.alerts['Not in Warmup'][0] or self.packet_2_dictionary["9Alerts2"] == self.alerts['Not in Warmup'][1]:
if self.heating == True:
self.heating = False
#self.print_status("Aircon Warmup Stopped on ")
self.update_status()
if self.packet_2_dictionary["9Alerts2"] == self.alerts['Clean Filter'][0] or self.packet_2_dictionary["9Alerts2"] == self.alerts['Clean Filter'][1]:
if self.filter == False:
self.filter = True
self.print_status("Filter Clean Alert Active on ")
self.update_status()
if self.packet_2_dictionary["9Alerts2"] == self.alerts['Filter OK'][0] or self.packet_2_dictionary["9Alerts2"] == self.alerts['Filter OK'][1]:
if self.filter == True:
self.filter = False
self.print_status("Filter Clean Alert Reset on ")
self.update_status()
if self.packet_2_dictionary["8Unknown2"] != "e0":
self.print_status("Unknown Byte 8 of Packet 2 ")
print("Expected e0 but received ", self.packet_2_dictionary["8Unknown2"])
if checksum != self.packet_2_dictionary["13Checksum2"]:
print ("Packet 2 Checksum Error. Expected ", checksum, " Received ", self.packet_2_dictionary["13Checksum2"])
self.packet_2_error = True
self.malfunction = True
if packet_2[4:12] != self.packet_1_with_checksum[4:12]:
print("Mismatch between Packets 1 and 2. Expected ", self.packet_1_with_checksum[4:12], " but received ", packet_2[4:12])
if self.malfunction != self.previous_malfunction:
self.update_status()
def calculate_checksum(self, packet_no_checksum): # Calculate and return Packet 2's checksum
b = [packet_no_checksum[i:i+2] for i in range(0, len(packet_no_checksum), 2)] # Build a list of each non-checksum Packet 2 byte in hex string form
c = [int(i, 16) for i in b] # Convert the hex string form list into a list of integers
d = sum(c) % 256 # Sum the integer list in modulo 256
return hex(d)[2:].zfill(2) # Return the checksum in 2 digit hex form
def calculate_next_sequence_number(self, current_number): # Calculate to next Packet 3 sequence number
current_first_byte = int(current_number[0:2], 16) # Convert the first byte in hex string form to an integer
current_third_nibble = int(current_number[2:3], 16) # Convert the third nibble in hex string form to an integer
if current_third_nibble == 11: # The third nibble cycles between Hex 8 and Hex b
next_third_nibble = 8 # Reset to 8 if it's completed its full cycle
if current_first_byte == 50: # The first byte cycles between Hex 00 and Hex 32, incrementing by one when the third nibble completes its full cycle
next_first_byte = 0 # Reset to 0 if it's completed its full cycle
else:
next_first_byte = current_first_byte + 1
else:
next_first_byte = current_first_byte
next_third_nibble = current_third_nibble + 1
next_string = hex(next_first_byte)[2:].zfill(2) + hex(next_third_nibble)[2:] + "f" # Combine the first byte and third nibble in string form, adding hex f at the end to make it two complete bytes
return next_string
def detect_damper_position(self, calibrate):
resp2 = self.spi.xfer2([0x11, 0x00, 0x00])
resp2a = int(resp2.pop(1)/2) # Remove LSB since we only need 10% resolution
resp2b = int(resp2.pop(1)) # Capture but ignore these three bits since we only need 10% resolution
self.damper_position = int(resp2a) * 2 * 8 # Move bits to their correct position and use Y-Axis number as the position
if calibrate == False:
self.current_damper_percent = int((self.damper_night_position - self.damper_position)/((self.damper_night_position - self.damper_day_position)/100))# Sets Day Position at 100% and Night Position at 0% - Assuming that the Night Position has a higher reading from the damper position sensor that the Day Position
# Convert the reported damper percentage to the nearest 10% of the current percentage
if self.current_damper_percent >=95:
self.reported_damper_percent = 100
elif self.current_damper_percent < 95 and self.current_damper_percent >= 85:
self.reported_damper_percent = 90
elif self.current_damper_percent < 85 and self.current_damper_percent >= 75:
self.reported_damper_percent = 80
elif self.current_damper_percent < 75 and self.current_damper_percent >= 65:
self.reported_damper_percent = 70
elif self.current_damper_percent < 65 and self.current_damper_percent >= 55:
self.reported_damper_percent = 60
elif self.current_damper_percent < 55 and self.current_damper_percent >= 45:
self.reported_damper_percent = 50
elif self.current_damper_percent < 45 and self.current_damper_percent >= 35:
self.reported_damper_percent = 40
elif self.current_damper_percent < 35 and self.current_damper_percent >= 25:
self.reported_damper_percent = 30
elif self.current_damper_percent < 25 and self.current_damper_percent >= 15:
self.reported_damper_percent = 20
elif self.current_damper_percent < 15 and self.current_damper_percent >= 5:
self.reported_damper_percent = 10
else:
self.reported_damper_percent = 0
def adjust_damper_position(self):
if self.requested_damper_percent != self.reported_damper_percent:
self.adjusting_damper = True
if self.requested_damper_percent > self.reported_damper_percent:
self.damper_day_zone() # Set damper switch to day zone if the damper's to be moved towards the day zone
else:
self.requested_damper_percent < self.reported_damper_percent
self.damper_night_zone() # Set damper switch to night zone if the damper's to be moved towards the night zone
else:
if self.adjusting_damper == True: # Flag that the damper is no longer being adjusted if it was previously being adjusted
self.adjusting_damper = False
self.update_status()
if self.requested_damper_percent == 100: # Lock damper in Day Zone if the damper is to be wholly in Day Zone
self.damper_day_zone()
elif self.requested_damper_percent == 0: # Lock damper in Night Zone if the dampr is to be wholly in Night Zone
self.damper_night_zone()
else:
self.hold_damper() # Hold damper in position if the damper is to be between zones
def damper_day_zone(self): # Move damper towards the Day Zone
self.damper_stop_state = False
GPIO.output(self.damper_stop, False)
self.damper_zone_state = False
GPIO.output(self.damper_zone, False)
def damper_night_zone(self): # Move damper towards the Night Zone
self.damper_stop_state = False
GPIO.output(self.damper_stop, False)
self.damper_zone_state = True
GPIO.output(self.damper_zone, True)
def hold_damper(self): # Stop damper motion
self.damper_stop_state = True
GPIO.output(self.damper_stop, True)
def calibrate_damper(self, damper_movement_time):
print('Calibrating Damper')
print('Taking Control of Damper')
self.damper_control_state = True
GPIO.output(self.damper_control, True) # Take Control of Damper
time.sleep(1)
print('Moving Damper to Night Zone')
self.damper_night_zone()
time.sleep(damper_movement_time)
print('Moved Damper to Night Zone')
self.detect_damper_position(calibrate = True)
print('Night Zone Damper Position', self.damper_position)
print('Changing Night Zone Damper Position from', self.damper_night_position, 'to', self.damper_position)
self.damper_night_position = self.damper_position
print('Moving Damper to Day Zone')
self.damper_day_zone()
time.sleep(damper_movement_time)
print('Moved Damper to Day Zone')
self.detect_damper_position(calibrate = True)
print('Day Zone Damper Position', self.damper_position)
print('Changing Day Zone Damper Position from', self.damper_day_position, 'to', self.damper_position)
self.damper_day_position = self.damper_position
print('Relinquishing Control of Damper')
self.damper_control_state = False # Flag that the damper is no longer being controlled
GPIO.output(self.damper_control, False) # Relinquish Control of Damper
time.sleep(1)
def shutdown_cleanup(self):
self.print_status("Northcliff Aircon Controller shutting down on ")
self.process_thermo_off_command() #Turn Aircon off
GPIO.cleanup()
self.client.loop_stop() #Stop monitoring mqtt thread
self.spi.close()
sys.exit(0)
### End of methods called in the main loop ###
### Debugging methods ###
def capture_and_print_serial(self): # Only used for serial comms debugging
self.controller_msg = self.aircon_comms.read(8)
print(str(self.controller_msg))
def capture_and_file_serial_data(self, capture_file_name): # Only used for serial comms debugging
a = 0
with open(capture_file_name, "wb+") as f:
while a <= 1000:
self.controller_msg = self.aircon_comms.read(8)
f.write(self.controller_msg)
print(str(a) + str(self.controller_msg))
a = a + 1
### End end of debugging methods ###
### Main Loop ###
def run(self):
try:
self.startup()
while True:
self.process_home_manager_heartbeat() # Send heartbeat to Home Manager every 120 loops.
if self.enable_serial_comms_loop == True:
self.aircon_comms.flushInput() # remove sent packets from aircon comms buffer
self.build_packets(self.packet_1_dictionary, self.packet_3_dictionary) # Build Packets 1 and 3
self.send_serial_aircon_data(self.packet_1_send) # Send Packet 1 to aircon comms port
time.sleep(0.160) # Wait until Packet 1 has been sent before clearing aircon comms buffer
self.aircon_comms.flushInput() # remove sent packets from aircon comms buffer
time.sleep(0.15) # Gap between Packets 1 and 2
self.receive_serial_aircon_data() # Receive Packet 2 and decode it
if self.packet_2_error == False: #Only send packet 3 if packet 2 was OK
time.sleep(0.16) # Gap between Packets 2 and 3
self.send_serial_aircon_data(self.packet_3_send) # Send Packet 3
self.packet_3_dictionary["1Header3"] = self.calculate_next_sequence_number(self.packet_3_dictionary["1Header3"]) # Set up the sequence number for the next transmission of Packet 3
else:
print("Packet 3 not sent because of Packet 2 error")
time.sleep(0.45) # Wait until Packet 3 has been sent, plus 0.05 sec gap (or equivalent time if it isn't sent)
self.detect_damper_position(calibrate = False) # Determine the damper's current position
self.adjust_damper_position() # Adjusts damper position if the current damper position is different from the requested damper position
else:
if self.remote_operation_on == True: # This ensures that the disconnect is only done once
self.remote_operation_on = False # Flag that the aircon is not being controlled
GPIO.output(self.control_enable, False) # Relinquish Control of the aircon
self.damper_control_state = False # Flag that the damper is no longer being controlled
GPIO.output(self.damper_control, False) # Relinquish Control of Damper
self.damper_day_zone() # Turn Damper Zone and Stop relays Off
self.heartbeat_count = 0 # Reset the heartbeat count to start from zero when Home Manager comms is restored
if self.no_heartbeat_ack == True:
self.malfunction = True
else:
self.malfunction = False #Clear Malfunction Flag (Packets might be corrupted on disconnect) unless there's a loss of heartbeat
self.update_status()
else:
time.sleep (1)
except KeyboardInterrupt:
self.shutdown_cleanup()
### End of Main Loop ###
if __name__ =='__main__':
controller = NorthcliffAirconController(calibrate_damper_on_startup = False)
controller.run()
| Northcliff_Aircon_Controller.py | 38,352 | !/usr/bin/env python3 Northcliff Airconditioner Controller Version 3.48 Genimport requestsfrom threading import Thread Set up GPIO Aircon Startup Mode This flag keeps track of whether the aircon is under remote or autonomous operation This flag is set to True during remote operation to enable the serial comms loop when the aircon is under remote operations Mirrors aircon heating state indicator Mirrors aircon compressor state indicator Mirrors aircon malfunction state indicator and is used to indicate a malfunction in the aircon/controller comms Mirrors aircon heat mode indicator Mirrors aircon cool mode indicator Mirrors aircon fan mode indicator Mirrors aircon fan hi indicator Mirrors aircon fan med indicator Mirrors aircon fan lo indicator Mirrors aircon filter indicator Set up damper states Set default damper positions Set up heartbeat Set up Serial Comms Data Set up dictionaries for Serial Comms Packets to Off, Fan Mode, Fan Lo Set up serial port for aircon controller comms After swapping serial and bluetooth ports so we can use parity Set up SPI Port for the damper position sensor Initialise damper position sensor X-Channel Self Test Exit Self Test Y-Channel Self Test Exit Self Test Set up mqtt clientCreate new instance of mqtt ClassConnect to mqtt brokerStart mqtt monitor thread Detect Damper Position and update Home Manager with aircon status Print mqtt status on connecting to broker mqtt message method calls Capture message with binary states converted to a stringprint(message) If HomeManager wants a status update If HomeManager sends a heartbeat ack Send aircon status to Home Manager Methods for mqtt messages received from Home Manager Set Fan to Off Mode Set Fan to Off Mode Set Fan to High Set Fan to High Wait for packets to be sent before disconnecting Sets the flag to exit serial comms loop and prepare for disconnect The disconnect is done in the main loop so it happens between packet 3 and packet 1 Turn On Take Control of Remote Take Control of Damper Set to Fan Mode Set 30 degrees for Heating Set to Fan Mode Set 30 degrees for Heating Fan Lo Fan Lo Turn On Take Control of Remote Take Control of Damper Set to Fan Mode Set 18 Degrees for Cooling Set to Fan Mode Set 18 Degrees for Cooling Fan Lo Fan Lo Turn On Take Control of Remote Take Control of Damper Set to Fan Mode Set 21 Degrees Set to Fan Mode Set 21 Degrees Fan Hi Fan Hi Holding place if Auto method is to be added in the future Set to Heat Mode Set 30 degrees for Heating Set to Heat Mode Set 30 degrees for Heating Fan Hi Fan Hi Set to Cool Mode Set 18 Degrees for Cooling Set to Cool Mode Set 18 Degrees for Cooling Fan Hi Fan Hi Set to Fan Mode Set to Fan Mode Fan Lo Fan Lo Fan Hi Fan Hi Fan Med Fan Med Fan Lo Fan Loself.print_status('Heartbeat received from Home Manager on ') End of Methods for mqtt messages received from Home Manager Methods called in main loop Send heartbeat signal to Home Manager every 120 loops. Turn aircon off and reboot if there's no response within 80 more loopsself.print_status('Sending Heartbeat to Home Manager on ') Build packets 1 and 3 for sending to the aircon Sort the bytes contained in each packet dictionary into the correct order by using the first digit in the byte key Join the packet dictionary bytes into one string Calculate the checksum Add the checksum to the end of the packet Convert the joined packet to binary Packet 1 Packet 3 Send packet to aircon comms port Receive Packet 2 from aircon comms port Look for Packet 2 Header (x808c) Test an entire packet for header Read one byte to look for the first half of the header Read one byte to look for the second half of the header, after sucessfully finding the first half of the header Flag that the correct Packet 2 header has been found Record the loop count in which the correct header was found (for debugging purposes) Exit the loop if the complete correct header has been found Look for another instance of the first half of the header if the correct second half wasn't found immediately after the first half Keep looking for the first half of the header Read the remaining bytes in the packet after the correct Packet 2 Header is found Capture the next 6 bytes capture the next 8 bytes Construct the entire Packet 2 in binary form Convert Packet to to a string Extract each component of Packet 2 and place in a dictionary that decodes the aircon function of each packet byte Flag that no correct Packet 2 header has been found Extract each component of Packet 2 and place in a dictionary that decodes the aircon function of each packet byte. Validate checksum and comparison with Packet 1 data Flag that Packet 2 is OK Capture the previous malfunction state Clear the malfunction flag Capure the packet without the checksum so that the checksum can be calculatedself.print_status("Aircon Compressor Started on ")self.print_status("Aircon Compressor Stopped on ")self.print_status("Aircon Warmup Started on ")self.print_status("Aircon Warmup Stopped on ") Calculate and return Packet 2's checksum Build a list of each non-checksum Packet 2 byte in hex string form Convert the hex string form list into a list of integers Sum the integer list in modulo 256 Return the checksum in 2 digit hex form Calculate to next Packet 3 sequence number Convert the first byte in hex string form to an integer Convert the third nibble in hex string form to an integer The third nibble cycles between Hex 8 and Hex b Reset to 8 if it's completed its full cycle The first byte cycles between Hex 00 and Hex 32, incrementing by one when the third nibble completes its full cycle Reset to 0 if it's completed its full cycle Combine the first byte and third nibble in string form, adding hex f at the end to make it two complete bytes Remove LSB since we only need 10% resolution Capture but ignore these three bits since we only need 10% resolution Move bits to their correct position and use Y-Axis number as the position Sets Day Position at 100% and Night Position at 0% - Assuming that the Night Position has a higher reading from the damper position sensor that the Day Position Convert the reported damper percentage to the nearest 10% of the current percentage Set damper switch to day zone if the damper's to be moved towards the day zone Set damper switch to night zone if the damper's to be moved towards the night zone Flag that the damper is no longer being adjusted if it was previously being adjusted Lock damper in Day Zone if the damper is to be wholly in Day Zone Lock damper in Night Zone if the dampr is to be wholly in Night Zone Hold damper in position if the damper is to be between zones Move damper towards the Day Zone Move damper towards the Night Zone Stop damper motion Take Control of Damper Flag that the damper is no longer being controlled Relinquish Control of DamperTurn Aircon offStop monitoring mqtt thread End of methods called in the main loop Debugging methods Only used for serial comms debugging Only used for serial comms debugging End end of debugging methods Main Loop Send heartbeat to Home Manager every 120 loops. remove sent packets from aircon comms buffer Build Packets 1 and 3 Send Packet 1 to aircon comms port Wait until Packet 1 has been sent before clearing aircon comms buffer remove sent packets from aircon comms buffer Gap between Packets 1 and 2 Receive Packet 2 and decode itOnly send packet 3 if packet 2 was OK Gap between Packets 2 and 3 Send Packet 3 Set up the sequence number for the next transmission of Packet 3 Wait until Packet 3 has been sent, plus 0.05 sec gap (or equivalent time if it isn't sent) Determine the damper's current position Adjusts damper position if the current damper position is different from the requested damper position This ensures that the disconnect is only done once Flag that the aircon is not being controlled Relinquish Control of the aircon Flag that the damper is no longer being controlled Relinquish Control of Damper Turn Damper Zone and Stop relays Off Reset the heartbeat count to start from zero when Home Manager comms is restoredClear Malfunction Flag (Packets might be corrupted on disconnect) unless there's a loss of heartbeat End of Main Loop | 8,182 | en | 0.849719 |
from django.utils.version import get_version
VERSION = (3, 1, 6, "final", 0)
__version__ = get_version(VERSION)
def setup(set_prefix=True):
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
Set the thread-local urlresolvers script prefix if `set_prefix` is True.
"""
from django.apps import apps
from django.conf import settings
from django.urls import set_script_prefix
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
if set_prefix:
set_script_prefix(
"/" if settings.FORCE_SCRIPT_NAME is None else settings.FORCE_SCRIPT_NAME
)
apps.populate(settings.INSTALLED_APPS)
| Thesis@3.9.1/Lib/site-packages/django/__init__.py | 799 | Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
Set the thread-local urlresolvers script prefix if `set_prefix` is True. | 208 | en | 0.859683 |
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as AutomotiveProvider
class Provider(AutomotiveProvider):
# from
# https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_the_United_Kingdom
license_formats = (
'??## ???',
'??##???'
)
| oscar/lib/python2.7/site-packages/faker/providers/automotive/en_GB/__init__.py | 305 | coding=utf-8 from https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_the_United_Kingdom | 97 | en | 0.634569 |
import mysql_conn
class BaseField:
def __init__(self,name,column_type,primary_key,default):
self.name=name
self.column_type=column_type
self.primary_key=primary_key
self.default=default
class StringField(BaseField):
def __init__(self,name,column_type='varchar(200)',primary_key=False,default=None):
super().__init__(name,column_type,primary_key,default)
class IntegerField(BaseField):
def __init__(self,name,column_type='int',primary_key=False,default=0):
super().__init__(name, column_type, primary_key, default)
class ModelsMeta(type):
def __new__(cls,name,bases,attr):
if name=='Models':
return type.__new__(cls,name,bases,attr)
table_name=attr.get('table_name',None)
if not table_name:
table_name=name
primary_key=None
mappings=dict()
for k,v in attr.items():
if isinstance(v,BaseField):
mappings[k]=v
if v.primary_key:
if primary_key:
raise TypeError('ไธป้ฎ้ๅค')
primary_key=k
for k in mappings.keys():
attr.pop(k)
if not primary_key:
raise TypeError('ๆฒกๆไธป้ฎ')
attr['mappings']=mappings
attr['primary_key']=primary_key
attr['table_name']=table_name
return type.__new__(cls,name,bases,attr)
class Models(dict,metaclass=ModelsMeta):
def __init__(self,**kwargs):
super().__init__(**kwargs)
def __setattr__(self, key, value):
self[key]=value
def __getattr__(self, item):
try:
return self[item]
except BaseException:
raise TypeError('ๆฒกๆ่ฟไธชๅฑๆง')
@classmethod
def select_one(cls,**kwargs):
key=list(kwargs.keys())[0]
value=kwargs[key]
sql='select * from %s where %s=?'%(cls.table_name,key)
sql=sql.replace('?','%s')
ms=mysql_conn.Mysql()
re=ms.select(sql,value)
if re:
return cls(**re[0])
else:
return
@classmethod
def select_many(cls,**kwargs):
ms=mysql_conn.Mysql()
if kwargs:
key = list(kwargs.keys())[0]
value = kwargs[key]
sql = 'select * from %s where %s=?' % (cls.table_name, key)
sql = sql.replace('?', '%s')
re = ms.select(sql, value)
else:
sql='select * from %s' %(cls.table_name)
re = ms.select(sql, None)
if re:
return list(cls(**r) for r in re)
else:
return
def update(self):
ms=mysql_conn.Mysql()
field_list=[]
field_list_value=[]
primary_key_value=None
for k,v in self.mappings.items():
if v.primary_key:
primary_key_value=getattr(self,v.name,None)
else:
field_list.append(v.name+'=?')
field_list_value.append(getattr(self,v.name,v.default))
sql='update %s set %s where %s = %s'%(self.table_name,','.join(field_list),self.primary_key,primary_key_value)
sql=sql.replace('?','%s')
ms.execute(sql,field_list_value)
def save(self):
ms = mysql_conn.Mysql()
field_list = []
field_list_value = []
char_list=[]
for k, v in self.mappings.items():
if not v.primary_key:
field_list.append(v.name)
char_list.append('?')
field_list_value.append(getattr(self,v.name,v.default))
sql='insert into %s(%s) value(%s)'%(self.table_name,','.join(field_list),','.join(char_list))
sql=sql.replace('?','%s')
ms.execute(sql,field_list_value)
class User(Models):
table_name='user'
id=IntegerField('id',primary_key=True)
name=StringField('name')
password=StringField('password')
if __name__ == '__main__':
# user=User.select_one(id=1)
# user.name='่ฟๆต่ฏ1111'
# user.update()
# print(user)
user=User(name='miaoqinian',password='xxx')
user.save()
| orm1.py | 4,126 | user=User.select_one(id=1) user.name='่ฟๆต่ฏ1111' user.update() print(user) | 72 | ja | 0.083905 |
import copy
import os
import logging
import pickle
from typing import Dict, List, Optional, Union
try:
import sigopt as sgo
Connection = sgo.Connection
except ImportError:
sgo = None
Connection = None
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
class SigOptSearch(Searcher):
"""A wrapper around SigOpt to provide trial suggestions.
You must install SigOpt and have a SigOpt API key to use this module.
Store the API token as an environment variable ``SIGOPT_KEY`` as follows:
.. code-block:: bash
pip install -U sigopt
export SIGOPT_KEY= ...
You will need to use the `SigOpt experiment and space specification
<https://app.sigopt.com/docs/overview/create>`_.
This module manages its own concurrency.
Parameters:
space (list of dict): SigOpt configuration. Parameters will be sampled
from this configuration and will be used to override
parameters generated in the variant generation process.
Not used if existing experiment_id is given
name (str): Name of experiment. Required by SigOpt.
max_concurrent (int): Number of maximum concurrent trials supported
based on the user's SigOpt plan. Defaults to 1.
connection (Connection): An existing connection to SigOpt.
experiment_id (str): Optional, if given will connect to an existing
experiment. This allows for a more interactive experience with
SigOpt, such as prior beliefs and constraints.
observation_budget (int): Optional, can improve SigOpt performance.
project (str): Optional, Project name to assign this experiment to.
SigOpt can group experiments by project
metric (str or list(str)): If str then the training result
objective value attribute. If list(str) then a list of
metrics that can be optimized together. SigOpt currently
supports up to 2 metrics.
mode (str or list(str)): If experiment_id is given then this
field is ignored, If str then must be one of {min, max}.
If list then must be comprised of {min, max, obs}. Determines
whether objective is minimizing or maximizing the metric
attribute. If metrics is a list then mode must be a list
of the same length as metric.
Example:
.. code-block:: python
space = [
{
'name': 'width',
'type': 'int',
'bounds': {
'min': 0,
'max': 20
},
},
{
'name': 'height',
'type': 'int',
'bounds': {
'min': -100,
'max': 100
},
},
]
algo = SigOptSearch(
space, name="SigOpt Example Experiment",
max_concurrent=1, metric="mean_loss", mode="min")
Example:
.. code-block:: python
space = [
{
'name': 'width',
'type': 'int',
'bounds': {
'min': 0,
'max': 20
},
},
{
'name': 'height',
'type': 'int',
'bounds': {
'min': -100,
'max': 100
},
},
]
algo = SigOptSearch(
space, name="SigOpt Multi Objective Example Experiment",
max_concurrent=1, metric=["average", "std"], mode=["max", "min"])
"""
OBJECTIVE_MAP = {
"max": {
"objective": "maximize",
"strategy": "optimize"
},
"min": {
"objective": "minimize",
"strategy": "optimize"
},
"obs": {
"strategy": "store"
}
}
def __init__(self,
space: List[Dict] = None,
name: str = "Default Tune Experiment",
max_concurrent: int = 1,
connection: Optional[Connection] = None,
experiment_id: Optional[str] = None,
observation_budget: Optional[int] = None,
project: Optional[str] = None,
metric: Union[None, str, List[str]] = "episode_reward_mean",
mode: Union[None, str, List[str]] = "max",
points_to_evaluate: Optional[List[Dict]] = None,
**kwargs):
assert (experiment_id is
None) ^ (space is None), "space xor experiment_id must be set"
assert type(max_concurrent) is int and max_concurrent > 0
if connection is not None:
self.conn = connection
else:
assert sgo is not None, """SigOpt must be installed!
You can install SigOpt with the command:
`pip install -U sigopt`."""
assert "SIGOPT_KEY" in os.environ, \
"SigOpt API key must be stored as " \
"environ variable at SIGOPT_KEY"
# Create a connection with SigOpt API, requires API key
self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"])
self._max_concurrent = max_concurrent
if isinstance(metric, str):
metric = [metric]
mode = [mode]
self._metric = metric
self._live_trial_mapping = {}
if experiment_id is None:
sigopt_params = dict(
name=name,
parameters=space,
parallel_bandwidth=self._max_concurrent)
if observation_budget is not None:
sigopt_params["observation_budget"] = observation_budget
if project is not None:
sigopt_params["project"] = project
if len(metric) > 1 and observation_budget is None:
raise ValueError(
"observation_budget is required for an"
"experiment with more than one optimized metric")
sigopt_params["metrics"] = self.serialize_metric(metric, mode)
self.experiment = self.conn.experiments().create(**sigopt_params)
else:
self.experiment = self.conn.experiments(experiment_id).fetch()
self._points_to_evaluate = points_to_evaluate
super(SigOptSearch, self).__init__(metric=metric, mode=mode, **kwargs)
def suggest(self, trial_id: str):
if self._max_concurrent:
if len(self._live_trial_mapping) >= self._max_concurrent:
return None
suggestion_kwargs = {}
if self._points_to_evaluate:
config = self._points_to_evaluate.pop(0)
suggestion_kwargs = {"assignments": config}
# Get new suggestion from SigOpt
suggestion = self.conn.experiments(
self.experiment.id).suggestions().create(**suggestion_kwargs)
self._live_trial_mapping[trial_id] = suggestion.id
return copy.deepcopy(suggestion.assignments)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial.
"""
if result:
payload = dict(
suggestion=self._live_trial_mapping[trial_id],
values=self.serialize_result(result))
self.conn.experiments(
self.experiment.id).observations().create(**payload)
# Update the experiment object
self.experiment = self.conn.experiments(self.experiment.id).fetch()
elif error:
# Reports a failed Observation
self.conn.experiments(self.experiment.id).observations().create(
failed=True, suggestion=self._live_trial_mapping[trial_id])
del self._live_trial_mapping[trial_id]
@staticmethod
def serialize_metric(metrics: List[str], modes: List[str]):
"""
Converts metrics to https://app.sigopt.com/docs/objects/metric
"""
serialized_metric = []
for metric, mode in zip(metrics, modes):
serialized_metric.append(
dict(name=metric, **SigOptSearch.OBJECTIVE_MAP[mode].copy()))
return serialized_metric
def serialize_result(self, result: Dict):
"""
Converts experiments results to
https://app.sigopt.com/docs/objects/metric_evaluation
"""
missing_scores = [
metric for metric in self._metric if metric not in result
]
if missing_scores:
raise ValueError(
f"Some metrics specified during initialization are missing. "
f"Missing metrics: {missing_scores}, provided result {result}")
values = []
for metric in self._metric:
value = dict(name=metric, value=result[metric])
values.append(value)
return values
def save(self, checkpoint_path: str):
trials_object = (self.experiment.id, self._live_trial_mapping,
self._points_to_evaluate)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
trials_object = pickle.load(inputFile)
experiment_id, self._live_trial_mapping, self._points_to_evaluate = \
trials_object
self.experiment = self.conn.experiments(experiment_id).fetch()
| python/ray/tune/suggest/sigopt.py | 9,997 | A wrapper around SigOpt to provide trial suggestions.
You must install SigOpt and have a SigOpt API key to use this module.
Store the API token as an environment variable ``SIGOPT_KEY`` as follows:
.. code-block:: bash
pip install -U sigopt
export SIGOPT_KEY= ...
You will need to use the `SigOpt experiment and space specification
<https://app.sigopt.com/docs/overview/create>`_.
This module manages its own concurrency.
Parameters:
space (list of dict): SigOpt configuration. Parameters will be sampled
from this configuration and will be used to override
parameters generated in the variant generation process.
Not used if existing experiment_id is given
name (str): Name of experiment. Required by SigOpt.
max_concurrent (int): Number of maximum concurrent trials supported
based on the user's SigOpt plan. Defaults to 1.
connection (Connection): An existing connection to SigOpt.
experiment_id (str): Optional, if given will connect to an existing
experiment. This allows for a more interactive experience with
SigOpt, such as prior beliefs and constraints.
observation_budget (int): Optional, can improve SigOpt performance.
project (str): Optional, Project name to assign this experiment to.
SigOpt can group experiments by project
metric (str or list(str)): If str then the training result
objective value attribute. If list(str) then a list of
metrics that can be optimized together. SigOpt currently
supports up to 2 metrics.
mode (str or list(str)): If experiment_id is given then this
field is ignored, If str then must be one of {min, max}.
If list then must be comprised of {min, max, obs}. Determines
whether objective is minimizing or maximizing the metric
attribute. If metrics is a list then mode must be a list
of the same length as metric.
Example:
.. code-block:: python
space = [
{
'name': 'width',
'type': 'int',
'bounds': {
'min': 0,
'max': 20
},
},
{
'name': 'height',
'type': 'int',
'bounds': {
'min': -100,
'max': 100
},
},
]
algo = SigOptSearch(
space, name="SigOpt Example Experiment",
max_concurrent=1, metric="mean_loss", mode="min")
Example:
.. code-block:: python
space = [
{
'name': 'width',
'type': 'int',
'bounds': {
'min': 0,
'max': 20
},
},
{
'name': 'height',
'type': 'int',
'bounds': {
'min': -100,
'max': 100
},
},
]
algo = SigOptSearch(
space, name="SigOpt Multi Objective Example Experiment",
max_concurrent=1, metric=["average", "std"], mode=["max", "min"])
Notification for the completion of trial.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial.
Converts metrics to https://app.sigopt.com/docs/objects/metric
Converts experiments results to
https://app.sigopt.com/docs/objects/metric_evaluation
Create a connection with SigOpt API, requires API key Get new suggestion from SigOpt Update the experiment object Reports a failed Observation | 3,608 | en | 0.624665 |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13a_learner.ipynb (unless otherwise specified).
__all__ = ['CancelFitException', 'CancelEpochException', 'CancelTrainException', 'CancelValidException',
'CancelBatchException', 'replacing_yield', 'mk_metric', 'save_model', 'load_model', 'Learner',
'VerboseCallback', 'Metric', 'AvgMetric', 'AvgLoss', 'AvgSmoothLoss', 'Recorder', 'FetchPreds',
'load_learner']
# Cell
from .data.all import *
from .optimizer import *
from .callback.core import *
# Cell
defaults.lr = 1e-3
# Cell
def replacing_yield(o, attr, val):
"Context manager to temporarily replace an attribute"
old = getattr(o,attr)
try: yield setattr(o,attr,val)
finally: setattr(o,attr,old)
# Cell
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
return m if isinstance(m, Metric) else AvgMetric(m)
# Cell
def save_model(file, model, opt, with_opt=True):
"Save `model` to `file` along with `opt` (if available, and if `with_opt`)"
if opt is None: with_opt=False
state = get_model(model).state_dict()
if with_opt: state = {'model': state, 'opt':opt.state_dict()}
torch.save(state, file)
# Cell
def load_model(file, model, opt, with_opt=None, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(model_state, strict=strict)
if hasopt and ifnone(with_opt,True):
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# Cell
def _try_concat(o):
try: return torch.cat(o)
except: return sum([L(o_[i,:] for i in range_of(o_)) for o_ in o], L())
# Cell
from contextlib import ExitStack
# Cell
_before_epoch = [event.begin_fit, event.begin_epoch]
_after_epoch = [event.after_epoch, event.after_fit]
# Cell
class Learner():
def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None,
metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True,
moms=(0.95,0.85,0.95)):
store_attr(self, "dls,model,opt_func,lr,splitter,model_dir,wd,wd_bn_bias,train_bn,metrics,moms")
self.training,self.create_mbar,self.logger,self.opt,self.cbs = False,True,print,None,L()
if loss_func is None:
loss_func = getattr(dls.train_ds, 'loss_func', None)
assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function."
self.loss_func = loss_func
self.path = path if path is not None else getattr(dls, 'path', Path('.'))
self.add_cbs([(cb() if isinstance(cb, type) else cb) for cb in L(defaults.callbacks)+L(cbs)])
self.model.to(self.dls.device)
if hasattr(self.model, 'reset'): self.model.reset()
self.epoch,self.n_epoch,self.loss = 0,1,tensor(0.)
@property
def metrics(self): return self._metrics
@metrics.setter
def metrics(self,v): self._metrics = L(v).map(mk_metric)
def add_cbs(self, cbs): L(cbs).map(self.add_cb)
def remove_cbs(self, cbs): L(cbs).map(self.remove_cb)
def add_cb(self, cb):
old = getattr(self, cb.name, None)
assert not old or isinstance(old, type(cb)), f"self.{cb.name} already registered"
cb.learn = self
setattr(self, cb.name, cb)
self.cbs.append(cb)
return self
def remove_cb(self, cb):
cb.learn = None
if hasattr(self, cb.name): delattr(self, cb.name)
if cb in self.cbs: self.cbs.remove(cb)
@contextmanager
def added_cbs(self, cbs):
self.add_cbs(cbs)
yield
self.remove_cbs(cbs)
def ordered_cbs(self, cb_func): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, cb_func)]
def __call__(self, event_name): L(event_name).map(self._call_one)
def _call_one(self, event_name):
assert hasattr(event, event_name)
[cb(event_name) for cb in sort_by_run(self.cbs)]
def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
def create_opt(self):
self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
if not self.wd_bn_bias:
for p in self._bn_bias_state(True ): p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False): p['force_train'] = True
def _split(self, b):
i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1)
self.xb,self.yb = b[:i],b[i:]
def all_batches(self):
self.n_iter = len(self.dl)
for o in enumerate(self.dl): self.one_batch(*o)
def one_batch(self, i, b):
self.iter = i
try:
self._split(b); self('begin_batch')
self.pred = self.model(*self.xb); self('after_pred')
if len(self.yb) == 0: return
self.loss = self.loss_func(self.pred, *self.yb); self('after_loss')
if not self.training: return
self.loss.backward(); self('after_backward')
self.opt.step(); self('after_step')
self.opt.zero_grad()
except CancelBatchException: self('after_cancel_batch')
finally: self('after_batch')
def _do_begin_fit(self, n_epoch):
self.n_epoch,self.loss = n_epoch,tensor(0.); self('begin_fit')
def _do_epoch_train(self):
try:
self.dl = self.dls.train; self('begin_train')
self.all_batches()
except CancelTrainException: self('after_cancel_train')
finally: self('after_train')
def _do_epoch_validate(self, ds_idx=1, dl=None):
if dl is None: dl = self.dls[ds_idx]
names = ['shuffle', 'drop_last']
try:
dl,old,has = change_attrs(dl, names, [False,False])
self.dl = dl; self('begin_validate')
with torch.no_grad(): self.all_batches()
except CancelValidException: self('after_cancel_validate')
finally:
dl,*_ = change_attrs(dl, names, old, has); self('after_validate')
def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
with self.added_cbs(cbs):
if reset_opt or not self.opt: self.create_opt()
if wd is None: wd = self.wd
if wd is not None: self.opt.set_hypers(wd=wd)
self.opt.set_hypers(lr=self.lr if lr is None else lr)
try:
self._do_begin_fit(n_epoch)
for epoch in range(n_epoch):
try:
self.epoch=epoch; self('begin_epoch')
self._do_epoch_train()
self._do_epoch_validate()
except CancelEpochException: self('after_cancel_epoch')
finally: self('after_epoch')
except CancelFitException: self('after_cancel_fit')
finally: self('after_fit')
def validate(self, ds_idx=1, dl=None, cbs=None):
if dl is None: dl = self.dls[ds_idx]
with self.added_cbs(cbs), self.no_logging(), self.no_mbar():
self(_before_epoch)
self._do_epoch_validate(ds_idx, dl)
self(_after_epoch)
return getattr(self, 'final_record', None)
@delegates(GatherPredsCallback.__init__)
def get_preds(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, with_loss=False, act=None,
inner=False, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffled=False, drop_last=False)
cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs)
#with self.no_logging(), self.added_cbs(cb), self.loss_not_reduced(), self.no_mbar():
ctx_mgrs = [self.no_logging(), self.added_cbs(cb), self.no_mbar()]
if with_loss: ctx_mgrs.append(self.loss_not_reduced())
with ExitStack() as stack:
for mgr in ctx_mgrs: stack.enter_context(mgr)
self(event.begin_epoch if inner else _before_epoch)
self._do_epoch_validate(dl=dl)
self(event.after_epoch if inner else _after_epoch)
if act is None: act = getattr(self.loss_func, 'activation', noop)
res = cb.all_tensors()
pred_i = 1 if with_input else 0
if res[pred_i] is not None:
res[pred_i] = act(res[pred_i])
if with_decoded: res.insert(pred_i+2, getattr(self.loss_func, 'decodes', noop)(res[pred_i]))
return tuple(res)
def predict(self, item, rm_type_tfms=None, with_input=False):
dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
dec = self.dls.decode_batch((*tuplify(inp),*tuplify(dec_preds)))[0]
i = getattr(self.dls, 'n_inp', -1)
dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]])
res = dec_targ,dec_preds[0],preds[0]
if with_input: res = (dec_inp,) + res
return res
def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffle=shuffle)
b = dl.one_batch()
_,_,preds = self.get_preds(dl=[b], with_decoded=True)
self.dls.show_results(b, preds, max_n=max_n, **kwargs)
def show_training_loop(self):
indent = 0
for s in _loop:
if s.startswith('Start'): print(f'{" "*indent}{s}'); indent += 2
elif s.startswith('End'): indent -= 2; print(f'{" "*indent}{s}')
else: print(f'{" "*indent} - {s:15}:', self.ordered_cbs(s))
@contextmanager
def no_logging(self): return replacing_yield(self, 'logger', noop)
@contextmanager
def no_mbar(self): return replacing_yield(self, 'create_mbar', False)
@contextmanager
def loss_not_reduced(self):
if hasattr(self.loss_func, 'reduction'): return replacing_yield(self.loss_func, 'reduction', 'none')
else: return replacing_yield(self, 'loss_func', partial(self.loss_func, reduction='none'))
def save(self, file, with_opt=True):
if rank_distrib(): return # don't save if slave proc
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
save_model(file, self.model, getattr(self,'opt',None), with_opt)
def load(self, file, with_opt=None, device=None, strict=True):
if device is None: device = self.dls.device
if self.opt is None: self.create_opt()
distrib_barrier()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model(file, self.model, self.opt, with_opt=with_opt, device=device, strict=strict)
return self
Learner.x,Learner.y = add_props(lambda i,x: detuplify((x.xb,x.yb)[i]))
# Cell
add_docs(Learner, "Group together a `model`, some `dls` and a `loss_func` to handle training",
add_cbs="Add `cbs` to the list of `Callback` and register `self` as their learner",
add_cb="Add `cb` to the list of `Callback` and register `self` as their learner",
remove_cbs="Remove `cbs` from the list of `Callback` and deregister `self` as their learner",
remove_cb="Add `cb` from the list of `Callback` and deregister `self` as their learner",
added_cbs="Context manage that temporarily adds `cbs`",
ordered_cbs="Return a list of `Callback` for one step `cb_func` in the training loop",
create_opt="Create an optimizer with `lr`",
one_batch="Train or evaluate `self.model` on batch `(xb,yb)`",
all_batches="Train or evaluate `self.model` on all batches of `self.dl`",
fit="Fit `self.model` for `n_epoch` using `cbs`. Optionally `reset_opt`.",
validate="Validate on `dl` with potential new `cbs`.",
get_preds="Get the predictions and targets on the `ds_idx`-th dbunchset or `dl`, optionally `with_input` and `with_loss`",
predict="Return the prediction on `item`, fully decoded, loss function decoded and probabilities",
show_results="Show some predictions on `ds_idx`-th dbunchset or `dl`",
show_training_loop="Show each step in the training loop",
no_logging="Context manager to temporarily remove `logger`",
no_mbar="Context manager to temporarily prevent the master progress bar from being created",
loss_not_reduced="A context manager to evaluate `loss_func` with reduction set to none.",
save="Save model and optimizer state (if `with_opt`) to `self.path/self.model_dir/file`",
load="Load model and optimizer state (if `with_opt`) from `self.path/self.model_dir/file` using `device`"
)
# Cell
class VerboseCallback(Callback):
"Callback that prints the name of each event called"
def __call__(self, event_name):
print(event_name)
super().__call__(event_name)
# Cell
@docs
class Metric():
"Blueprint for defining a metric"
def reset(self): pass
def accumulate(self, learn): pass
@property
def value(self): raise NotImplementedError
@property
def name(self): return class2attr(self, 'Metric')
_docs = dict(
reset="Reset inner state to prepare for new computation",
name="Name of the `Metric`, camel-cased and with Metric removed",
accumulate="Use `learn` to update the state with new results",
value="The value of the metric")
# Cell
def _maybe_reduce(val):
if num_distrib()>1:
val = val.clone()
torch.distributed.all_reduce(val, op=torch.distributed.ReduceOp.SUM)
val /= num_distrib()
return val
# Cell
class AvgMetric(Metric):
"Average the values of `func` taking into account potential different batch sizes"
def __init__(self, func): self.func = func
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += to_detach(self.func(learn.pred, *learn.yb))*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# Cell
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
# Cell
class AvgSmoothLoss(Metric):
"Smooth average of the losses (exponentially weighted with `beta`)"
def __init__(self, beta=0.98): self.beta = beta
def reset(self): self.count,self.val = 0,tensor(0.)
def accumulate(self, learn):
self.count += 1
self.val = torch.lerp(to_detach(learn.loss.mean(), gather=False), self.val, self.beta)
@property
def value(self): return self.val/(1-self.beta**self.count)
# Cell
from fastprogress.fastprogress import format_time
def _maybe_item(t):
t = t.value
return t.item() if isinstance(t, Tensor) and t.numel()==1 else t
# Cell
class Recorder(Callback):
"Callback that registers statistics (lr, loss and metrics) during training"
run_after = TrainEvalCallback
def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98):
store_attr(self, 'add_time,train_metrics,valid_metrics')
self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta)
def begin_fit(self):
"Prepare state for training"
self.lrs,self.iters,self.losses,self.values = [],[],[],[]
names = self.metrics.attrgot('name')
if self.train_metrics and self.valid_metrics:
names = L('loss') + names
names = names.map('train_{}') + names.map('valid_{}')
elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names
else: names = L('train_loss') + names
if self.add_time: names.append('time')
self.metric_names = 'epoch'+names
self.smooth_loss.reset()
def after_batch(self):
"Update all metrics and records lr and smooth loss in training"
if len(self.yb) == 0: return
mets = self._train_mets if self.training else self._valid_mets
for met in mets: met.accumulate(self.learn)
if not self.training: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value
def begin_epoch(self):
"Set timer if `self.add_time=True`"
self.cancel_train,self.cancel_valid = False,False
if self.add_time: self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0))
def begin_train (self): self._train_mets[1:].map(Self.reset())
def begin_validate(self): self._valid_mets.map(Self.reset())
def after_train (self): self.log += self._train_mets.map(_maybe_item)
def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
def after_cancel_train(self): self.cancel_train = True
def after_cancel_validate(self): self.cancel_valid = True
def after_epoch(self):
"Store and log the loss/metric values"
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
self.logger(self.log)
self.iters.append(self.smooth_loss.count)
@property
def _train_mets(self):
if getattr(self, 'cancel_train', False): return L()
return L(self.smooth_loss) + (self.metrics if self.train_metrics else L())
@property
def _valid_mets(self):
if getattr(self, 'cancel_valid', False): return L()
return (L(self.loss) + self.metrics if self.valid_metrics else L())
def plot_loss(self, skip_start=5, with_valid=True):
plt.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
if with_valid:
idx = (np.array(self.iters)<skip_start).sum()
plt.plot(self.iters[idx:], L(self.values[idx:]).itemgot(1), label='valid')
plt.legend()
# Cell
add_docs(Recorder,
begin_train = "Reset loss and metrics state",
after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)",
begin_validate = "Reset loss and metrics state",
after_validate = "Log loss and metric values on the validation set",
after_cancel_train = "Ignore training metrics for this epoch",
after_cancel_validate = "Ignore validation metrics for this epoch",
plot_loss = "Plot the losses from `skip_start` and onward")
defaults.callbacks = [TrainEvalCallback, Recorder]
# Cell
class FetchPreds(Callback):
"A callback to fetch predictions during the training loop"
def __init__(self, ds_idx=1, dl=None, with_input=False, with_decoded=False):
store_attr(self, 'ds_idx,dl,with_input,with_decoded')
def after_validate(self):
learn,rec = self.learn,self.learn.recorder
learn.remove_cbs([self,rec])
self.preds = learn.get_preds(ds_idx=self.ds_idx, dl=self.dl,
with_input=self.with_input, with_decoded=self.with_decoded, inner=True)
learn.add_cbs([self, rec])
# Cell
@patch
def freeze_to(self:Learner, n):
if self.opt is None: self.create_opt()
self.opt.freeze_to(n)
self.opt.clear_state()
@patch
def freeze(self:Learner): self.freeze_to(-1)
@patch
def unfreeze(self:Learner): self.freeze_to(0)
add_docs(Learner,
freeze_to="Freeze parameter groups up to `n`",
freeze="Freeze up to last parameter group",
unfreeze="Unfreeze the entire model")
# Cell
@patch
def export(self:Learner, fname='export.pkl'):
"Export the content of `self` without the items and the optimizer state for inference"
if rank_distrib(): return # don't export if slave proc
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict()
self.opt = None
with warnings.catch_warnings():
#To avoid the warning that come from PyTorch about model not being checked
warnings.simplefilter("ignore")
torch.save(self, self.path/fname)
self.create_opt()
self.opt.load_state_dict(state)
self.dls = old_dbunch
# Cell
def load_learner(fname, cpu=True):
"Load a `Learner` object in `fname`, optionally putting it on the `cpu`"
res = torch.load(fname, map_location='cpu' if cpu else None)
if hasattr(res, 'to_fp32'): res = res.to_fp32()
if cpu: res.dls.cpu()
return res
# Cell
@patch
def tta(self:Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
"Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation"
if dl is None: dl = self.dls[ds_idx]
if item_tfms is not None or batch_tfms is not None: dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self,'progress'): self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in self.progress.mbar if hasattr(self,'progress') else range(n):
self.epoch = i #To keep track of progress on mbar since the progress callback will use self.epoch
aug_preds.append(self.get_preds(ds_idx, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = aug_preds.max(0)[0] if use_max else aug_preds.mean(0)
self.epoch = n
with dl.dataset.set_split_idx(1): preds,targs = self.get_preds(ds_idx, inner=True)
if use_max: return torch.stack([preds, aug_preds], 0).max(0)[0],targs
preds = (aug_preds,preds) if beta is None else torch.lerp(aug_preds, preds, beta)
return preds,targs | fastai2/learner.py | 22,746 | Average the losses taking into account potential different batch sizes
Average the values of `func` taking into account potential different batch sizes
Smooth average of the losses (exponentially weighted with `beta`)
A callback to fetch predictions during the training loop
Blueprint for defining a metric
Callback that registers statistics (lr, loss and metrics) during training
Callback that prints the name of each event called
Update all metrics and records lr and smooth loss in training
Store and log the loss/metric values
Set timer if `self.add_time=True`
Prepare state for training
Export the content of `self` without the items and the optimizer state for inference
Load a `Learner` object in `fname`, optionally putting it on the `cpu`
Load `model` from `file` along with `opt` (if available, and if `with_opt`)
Convert `m` to an `AvgMetric`, unless it's already a `Metric`
Context manager to temporarily replace an attribute
Save `model` to `file` along with `opt` (if available, and if `with_opt`)
Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation
AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13a_learner.ipynb (unless otherwise specified). Cell Cell Cell Cell Cell Cell Cell Cell Cell Cellwith self.no_logging(), self.added_cbs(cb), self.loss_not_reduced(), self.no_mbar(): don't save if slave proc Cell Cell Cell Cell Cell Cell Cell Cell Cell Cell Cell Cell Cell don't export if slave procTo avoid the warning that come from PyTorch about model not being checked Cell CellTo keep track of progress on mbar since the progress callback will use self.epoch | 1,602 | en | 0.777059 |
class dotnetPointList_t(object):
""" dotnetPointList_t(Size: int) """
def FromStruct(self, PointList):
""" FromStruct(self: dotnetPointList_t,PointList: PointList) """
pass
def ToStruct(self, PointList):
""" ToStruct(self: dotnetPointList_t,PointList: PointList) """
pass
@staticmethod
def __new__(self, Size):
"""
__new__[dotnetPointList_t]() -> dotnetPointList_t
__new__(cls: type,Size: int)
"""
pass
aPointList = None
ClientId = None
IndexCurrentItem = None
NumberItems = None
NumberItemsInSet = None
| release/stubs.min/Tekla/Structures/ModelInternal_parts/dotnetPointList_t.py | 634 | dotnetPointList_t(Size: int)
FromStruct(self: dotnetPointList_t,PointList: PointList)
ToStruct(self: dotnetPointList_t,PointList: PointList)
__new__[dotnetPointList_t]() -> dotnetPointList_t
__new__(cls: type,Size: int) | 225 | en | 0.657642 |
"""Test case that checks the working of the utils/command/gen_uml.py module."""
from utils.model.gen_uml import generate
import importlib_metadata
class PseudoFile:
def __init__(self):
self.data = ""
def write(self, data):
self.data += data
def close(self):
pass
def test_loading():
dist = importlib_metadata.distribution("gaphor")
model_file = dist.locate_file("tests/test-model.gaphor")
outfile = PseudoFile()
generate(model_file, outfile)
assert outfile.data == GENERATED, f'"""{outfile.data}"""'
GENERATED = """# This file is generated by build_uml.py. DO NOT EDIT!
from gaphor.UML.properties import association, attribute, enumeration, derived, derivedunion, redefine
# class 'ValSpec' has been stereotyped as 'SimpleAttribute'
# class 'ShouldNotShowUp' has been stereotyped as 'SimpleAttribute' too
class Element: pass
class SubClass(Element): pass
class C: pass
class D(C): pass
C.attr = attribute('attr', str)
C.name1 = association('name1', SubClass, opposite='name2')
SubClass.name2 = association('name2', C, opposite='name1')
C.base = association('base', SubClass, opposite='abstract')
D.subbase = association('subbase', SubClass, opposite='concrete')
SubClass.concrete = association('concrete', D, opposite='subbase')
D.name3 = association('name3', SubClass, opposite='name4')
# 'SubClass.value' is a simple attribute
SubClass.value = attribute('value', str)
SubClass.abstract = derivedunion('abstract', C, 0, '*', SubClass.concrete)
SubClass.name4 = redefine(SubClass, 'name4', D, name2)
"""
| tests/test_gen_uml.py | 1,572 | Test case that checks the working of the utils/command/gen_uml.py module. | 73 | en | 0.659834 |
"""
This file offers the methods to automatically retrieve the graph Acidocella sp. MX-AZ02.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def AcidocellaSpMxAz02(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Acidocella sp. MX-AZ02 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Acidocella sp. MX-AZ02 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="AcidocellaSpMxAz02",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| bindings/python/ensmallen/datasets/string/acidocellaspmxaz02.py | 3,480 | Return new instance of the Acidocella sp. MX-AZ02 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Acidocella sp. MX-AZ02 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
This file offers the methods to automatically retrieve the graph Acidocella sp. MX-AZ02.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
pylint: disable=import-error | 2,693 | en | 0.706483 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright ยฉ 2018 Michael J. Hayford
""" Support creation of an iPython console, with rayoptics environment
.. Created on Wed Nov 21 21:48:02 2018
.. codeauthor: Michael J. Hayford
"""
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
from IPython.lib import guisupport
from rayoptics.gui.appmanager import ModelInfo
def create_ipython_console(app, title, view_width, view_ht):
""" create a iPython console with a rayoptics environment """
opt_model = app.app_manager.model
if opt_model:
ro_env = {
'app': app,
'opm': opt_model,
'sm': opt_model.seq_model,
'osp': opt_model.optical_spec,
'pm': opt_model.parax_model
}
else:
ro_env = {
'app': app,
'opm': opt_model
}
ro_setup = 'from rayoptics.environment import *'
# construct the top level widget
ipy_console = ConsoleWidget()
# load the environment
ipy_console.execute_command(ro_setup)
ipy_console.push_vars(ro_env)
mi = ModelInfo(opt_model)
sub_window = app.add_subwindow(ipy_console, mi)
sub_window.setWindowTitle(title)
orig_x, orig_y = app.initial_window_offset()
sub_window.setGeometry(orig_x, orig_y, view_width, view_ht)
sub_window.show()
class ConsoleWidget(RichJupyterWidget):
def __init__(self, customBanner=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if customBanner is not None:
self.banner = customBanner
self.font_size = 6
self.kernel_manager = kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel(show_banner=False)
kernel_manager.kernel.gui = 'qt'
self.kernel_client = kernel_client = self.kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
guisupport.get_app_qt().exit()
self.exit_requested.connect(stop)
def push_vars(self, variableDict):
"""
Given a dictionary containing name / value pairs, push those variables
to the Jupyter console widget
"""
self.kernel_manager.kernel.shell.push(variableDict)
def clear(self):
"""
Clears the terminal
"""
self._control.clear()
# self.kernel_manager
def print_text(self, text):
"""
Prints some plain text to the console
"""
self._append_plain_text(text)
def execute_command(self, command):
"""
Execute a command in the frame of the console widget
"""
self._execute(command, False)
| src/rayoptics/qtgui/ipyconsole.py | 2,845 | Clears the terminal
create a iPython console with a rayoptics environment
Execute a command in the frame of the console widget
Prints some plain text to the console
Given a dictionary containing name / value pairs, push those variables
to the Jupyter console widget
Support creation of an iPython console, with rayoptics environment
.. Created on Wed Nov 21 21:48:02 2018
.. codeauthor: Michael J. Hayford
!/usr/bin/env python3 -*- coding: utf-8 -*- Copyright ยฉ 2018 Michael J. Hayford construct the top level widget load the environment self.kernel_manager | 561 | en | 0.701783 |
"""Phonopy QHA module."""
# Copyright (C) 2012 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from phonopy.units import Avogadro, EvTokJmol, EVAngstromToGPa
from phonopy.qha.eos import get_eos, fit_to_eos
class BulkModulus(object):
"""Bulk modulus class.
This class is used to calculate bulk modulus only from temperature
independent energy input.
"""
def __init__(self,
volumes,
energies,
eos='vinet'):
"""Init method.
volumes : array_like
Unit cell volumes where energies are obtained.
shape=(volumes, ), dtype='double'.
energies : array_like
Energies obtained at volumes.
shape=(volumes, ), dtype='double'.
eos : str
Identifier of equation of states function.
"""
self._volumes = volumes
if np.array(energies).ndim == 1:
self._energies = energies
else:
self._energies = energies[0]
self._eos = get_eos(eos)
self._energy = None
self._bulk_modulus = None
self._b_prime = None
try:
(self._energy,
self._bulk_modulus,
self._b_prime,
self._volume) = fit_to_eos(volumes,
self._energies,
self._eos)
except TypeError:
msg = ["Failed to fit to \"%s\" equation of states." % eos]
if len(volumes) < 4:
msg += ["At least 4 volume points are needed for the fitting."]
msg += ["Careful choice of volume points is recommended."]
raise RuntimeError("\n".join(msg))
@property
def bulk_modulus(self):
"""Return bulk modulus."""
return self._bulk_modulus
def get_bulk_modulus(self):
"""Return bulk modulus."""
warnings.warn("BulkModulus.get_bulk_modulus() is deprecated."
"Use BulkModulus.bulk_modulus attribute.",
DeprecationWarning)
return self.bulk_modulus
@property
def equilibrium_volume(self):
"""Return volume at equilibrium."""
return self._volume
def get_equilibrium_volume(self):
"""Return volume at equilibrium."""
warnings.warn("BulkModulus.get_equilibrium_volume() is deprecated."
"Use BulkModulus.equilibrium_volume attribute.",
DeprecationWarning)
return self.equilibrium_volume
@property
def b_prime(self):
"""Return fitted parameter B'."""
return self._b_prime
def get_b_prime(self):
"""Return fitted parameter B'."""
warnings.warn("BulkModulus.get_b_prime() is deprecated."
"Use BulkModulus.b_prime attribute.",
DeprecationWarning)
return self._b_prime
@property
def energy(self):
"""Return fitted parameter of energy."""
return self._energy
def get_energy(self):
"""Return fitted parameter of energy."""
warnings.warn("BulkModulus.get_energy() is deprecated."
"Use BulkModulus.energy attribute.",
DeprecationWarning)
return self._energy
def get_parameters(self):
"""Return fitted parameters."""
return (self._energy,
self._bulk_modulus,
self._b_prime,
self._volume)
def get_eos(self):
"""Return EOS function as a python method."""
warnings.warn("BulkModulus.get_eos() is deprecated.",
DeprecationWarning)
return self._eos
def plot(self):
"""Plot fitted EOS curve."""
import matplotlib.pyplot as plt
ep = self.get_parameters()
vols = self._volumes
volume_points = np.linspace(min(vols), max(vols), 201)
fig, ax = plt.subplots()
ax.plot(volume_points, self._eos(volume_points, *ep), 'r-')
ax.plot(vols, self._energies, 'bo', markersize=4)
return plt
class QHA(object):
"""Quasi harmonic approximation class."""
def __init__(self,
volumes, # angstrom^3
electronic_energies, # eV
temperatures, # K
cv, # J/K/mol
entropy, # J/K/mol
fe_phonon, # kJ/mol
eos='vinet',
t_max=None,
energy_plot_factor=None):
"""Init method.
Parameters
----------
volumes: array_like
Unit cell volumes (V) in angstrom^3.
dtype='double'
shape=(volumes,)
electronic_energies: array_like
Electronic energies (U_el) or electronic free energies (F_el) in eV.
It is assumed as formar if ndim==1 and latter if ndim==2.
dtype='double'
shape=(volumes,) or (temperatuers, volumes)
temperatures: array_like
Temperatures ascending order (T) in K.
dtype='double'
shape=(temperatures,)
cv: array_like
Phonon Heat capacity at constant volume in J/K/mol.
dtype='double'
shape=(temperatuers, volumes)
entropy: array_like
Phonon entropy at constant volume (S_ph) in J/K/mol.
dtype='double'
shape=(temperatuers, volumes)
fe_phonon: array_like
Phonon Helmholtz free energy (F_ph) in kJ/mol.
dtype='double'
shape=(temperatuers, volumes)
eos: str
Equation of state used for fitting F vs V.
'vinet', 'murnaghan' or 'birch_murnaghan'.
t_max: float
Maximum temperature to be calculated. This has to be not
greater than the temperature of the third element from the
end of 'temperatre' elements. If max_t=None, the temperature
of the third element from the end is used.
energy_plot_factor: float
This value is multiplied to energy like values only in plotting.
"""
self._volumes = np.array(volumes)
self._electronic_energies = np.array(electronic_energies)
self._all_temperatures = np.array(temperatures)
self._cv = np.array(cv)
self._entropy = np.array(entropy)
self._fe_phonon = np.array(fe_phonon) / EvTokJmol
self._eos = get_eos(eos)
self._t_max = t_max
self._energy_plot_factor = energy_plot_factor
self._temperatures = None
self._equiv_volumes = None
self._equiv_energies = None
self._equiv_bulk_modulus = None
self._equiv_parameters = None
self._free_energies = None
self._num_elems = None
self._thermal_expansions = None
self._cp_numerical = None
self._volume_entropy_parameters = None
self._volume_cv_parameters = None
self._volume_entropy = None
self._volume_cv = None
self._cp_polyfit = None
self._dsdv = None
self._gruneisen_parameters = None
self._len = None
@property
def thermal_expansion(self):
"""Return volumetric thermal expansion coefficients at temperatures."""
return self._thermal_expansions[:self._len]
@property
def helmholtz_volume(self):
"""Return Helmholtz free energies at temperatures and volumes."""
return self._free_energies[:self._len]
@property
def volume_temperature(self):
"""Return equilibrium volumes at temperatures."""
return self._equiv_volumes[:self._len]
@property
def gibbs_temperature(self):
"""Return Gibbs free energies at temperatures."""
return self._equiv_energies[:self._len]
@property
def bulk_modulus_temperature(self):
"""Return bulk modulus vs temperature data."""
return self._equiv_bulk_modulus[:self._len]
@property
def heat_capacity_P_numerical(self):
"""Return heat capacities at constant pressure at temperatures.
Values are computed by numerical derivative of Gibbs free energy.
"""
return self._cp_numerical[:self._len]
@property
def heat_capacity_P_polyfit(self):
"""Return heat capacities at constant pressure at temperatures.
Volumes are computed in another way to heat_capacity_P_numerical
for the better numerical behaviour. But this does not work
when temperature dependent electronic_energies is supplied.
"""
if self._electronic_energies.ndim == 1:
return self._cp_polyfit[:self._len]
else:
return None
@property
def gruneisen_temperature(self):
"""Return Gruneisen parameters at temperatures."""
return self._gruneisen_parameters[:self._len]
def run(self, verbose=False):
"""Fit parameters to EOS at temperatures.
Even if fitting failed, simply omit the volume point. In this case,
the failed temperature point doesn't exist in the returned arrays.
"""
if verbose:
print(("#%11s" + "%14s" * 4) % ("T", "E_0", "B_0", "B'_0", "V_0"))
# Plus one temperature point is necessary for computing e.g. beta.
num_elems = self._get_num_elems(self._all_temperatures) + 1
if num_elems > len(self._all_temperatures):
num_elems -= 1
temperatures = []
parameters = []
free_energies = []
for i in range(num_elems): # loop over temperaturs
if self._electronic_energies.ndim == 1:
el_energy = self._electronic_energies
else:
el_energy = self._electronic_energies[i]
fe = [ph_e + el_e
for ph_e, el_e in zip(self._fe_phonon[i], el_energy)]
try:
ep = fit_to_eos(self._volumes, fe, self._eos)
except TypeError:
print("Fitting failure at T=%.1f" % self._all_temperatures[i])
if ep is None:
# Simply omit volume point where the fitting failed.
continue
else:
[ee, eb, ebp, ev] = ep
t = self._all_temperatures[i]
temperatures.append(t)
parameters.append(ep)
free_energies.append(fe)
if verbose:
print(("%14.6f" * 5) %
(t, ep[0], ep[1] * EVAngstromToGPa, ep[2], ep[3]))
self._free_energies = np.array(free_energies)
self._temperatures = np.array(temperatures)
self._equiv_parameters = np.array(parameters)
self._equiv_volumes = np.array(self._equiv_parameters[:, 3])
self._equiv_energies = np.array(self._equiv_parameters[:, 0])
self._equiv_bulk_modulus = np.array(
self._equiv_parameters[:, 1] * EVAngstromToGPa)
self._num_elems = len(self._temperatures)
# For computing following values at temperatures, finite difference
# method is used. Therefore number of temperature points are needed
# larger than self._num_elems that nearly equals to the temparature
# point we expect.
self._set_thermal_expansion()
self._set_heat_capacity_P_numerical()
self._set_heat_capacity_P_polyfit()
self._set_gruneisen_parameter() # To be run after thermal expansion.
self._len = len(self._thermal_expansions)
assert(self._len + 1 == self._num_elems)
def plot(self, thin_number=10, volume_temp_exp=None):
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['font.family'] = 'serif'
plt.rcParams['text.usetex'] = True
fig, axs = plt.subplots(1, 3, figsize=(7, 3.5))
axs[0].xaxis.set_ticks_position('both')
axs[0].yaxis.set_ticks_position('both')
axs[0].xaxis.set_tick_params(which='both', direction='in')
axs[0].yaxis.set_tick_params(which='both', direction='in')
self._plot_helmholtz_volume(axs[0], thin_number=thin_number)
axs[1].xaxis.set_ticks_position('both')
axs[1].yaxis.set_ticks_position('both')
axs[1].xaxis.set_tick_params(which='both', direction='in')
axs[1].yaxis.set_tick_params(which='both', direction='in')
self._plot_volume_temperature(axs[1], exp_data=volume_temp_exp)
axs[2].xaxis.set_ticks_position('both')
axs[2].yaxis.set_ticks_position('both')
axs[2].xaxis.set_tick_params(which='both', direction='in')
axs[2].yaxis.set_tick_params(which='both', direction='in')
self._plot_thermal_expansion(axs[2])
plt.tight_layout()
return plt
def get_helmholtz_volume(self):
warnings.warn("QHA.get_helmholtz_volume() is deprecated."
"Use helmholtz_volume attribute.",
DeprecationWarning)
return self.helmholtz_volume
def plot_helmholtz_volume(self,
thin_number=10,
xlabel=r'Volume $(\AA^3)$',
ylabel='Free energy'):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_helmholtz_volume(ax,
thin_number=thin_number,
xlabel=xlabel,
ylabel=ylabel)
return plt
def plot_pdf_helmholtz_volume(self,
thin_number=10,
filename='helmholtz-volume.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_helmholtz_volume(ax, thin_number=thin_number)
plt.savefig(filename)
plt.close()
def write_helmholtz_volume(self, filename='helmholtz-volume.dat'):
w = open(filename, 'w')
for i, (t, ep, fe) in enumerate(zip(self._temperatures,
self._equiv_parameters,
self._free_energies)):
if i == self._len:
break
w.write("# Temperature: %f\n" % t)
w.write("# Parameters: %f %f %f %f\n" % tuple(ep))
for j, v in enumerate(self._volumes):
w.write("%20.15f %25.15f\n" % (v, fe[j]))
w.write("\n\n")
w.close()
def write_helmholtz_volume_fitted(self,
thin_number,
filename='helholtz-volume_fitted.dat'):
if self._energy_plot_factor is None:
_energy_plot_factor = 1
else:
_energy_plot_factor = self._energy_plot_factor
volume_points = np.linspace(
min(self._volumes), max(self._volumes), 201)
selected_volumes = []
selected_energies = []
for i, t in enumerate(self._temperatures[:self._len]):
if i % thin_number == 0:
selected_volumes.append(self._equiv_volumes[i])
selected_energies.append(self._equiv_energies[i])
for i, t in enumerate(self._temperatures[:self._len]):
if t >= 298:
if i > 0:
de = self._equiv_energies[i] - self._equiv_energies[i - 1]
dt = t - self._temperatures[i - 1]
e0 = ((298 - self._temperatures[i - 1]) / dt * de +
self._equiv_energies[i - 1])
else:
e0 = 0
break
e0 *= _energy_plot_factor
_data_vol_points = []
_data_eos = []
for i, t in enumerate(self._temperatures[:self._len]):
if i % thin_number == 0:
_data_vol_points.append(
np.array(self._free_energies[i]) * _energy_plot_factor - e0)
_data_eos.append(
self._eos(volume_points, * self._equiv_parameters[i])
* _energy_plot_factor - e0)
data_eos = np.array(_data_eos).T
data_vol_points = np.array(_data_vol_points).T
data_min = (np.array(selected_energies) * _energy_plot_factor - e0)
with open(filename, 'w') as w:
w.write("# Volume points\n")
for (j, k) in zip(self._volumes, data_vol_points):
w.write("%10.5f " % j)
for l in k:
w.write("%10.5f" % l)
w.write("\n")
w.write("\n# Fitted data\n")
for (m, n) in zip(volume_points, data_eos):
w.write("%10.5f " % m)
for ll in n:
w.write("%10.5f" % ll)
w.write("\n")
w.write("\n# Minimas\n")
for (a, b) in zip(selected_volumes, data_min):
w.write("%10.5f %10.5f %s" % (a, b, '\n'))
w.write('\n')
def get_volume_temperature(self):
warnings.warn("QHA.get_volume_temperature() is deprecated."
"Use volume_temperature attribute.",
DeprecationWarning)
return self.volume_temperature
def plot_volume_temperature(self, exp_data=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_volume_temperature(ax, exp_data=exp_data)
return plt
def plot_pdf_volume_temperature(self,
exp_data=None,
filename='volume-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_volume_temperature(ax, exp_data=exp_data)
plt.savefig(filename)
plt.close()
def write_volume_temperature(self, filename='volume-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%25.15f %25.15f\n" % (self._temperatures[i],
self._equiv_volumes[i]))
w.close()
def get_thermal_expansion(self):
warnings.warn("QHA.get_thermal_expansion() is deprecated."
"Use thermal_expansion attribute.",
DeprecationWarning)
return self.thermal_expansion
def plot_thermal_expansion(self):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_thermal_expansion(ax)
return plt
def plot_pdf_thermal_expansion(self, filename='thermal_expansion.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_thermal_expansion(ax)
plt.savefig(filename)
plt.close()
def write_thermal_expansion(self, filename='thermal_expansion.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%25.15f %25.15f\n" % (self._temperatures[i],
self._thermal_expansions[i]))
w.close()
def get_gibbs_temperature(self):
return self.gibbs_temperature
def plot_gibbs_temperature(self,
xlabel='Temperature (K)',
ylabel='Gibbs free energy'):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_gibbs_temperature(ax, xlabel=xlabel, ylabel=ylabel)
return plt
def plot_pdf_gibbs_temperature(self, filename='gibbs-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_gibbs_temperature(ax)
plt.savefig(filename)
plt.close()
def write_gibbs_temperature(self, filename='gibbs-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %25.15f\n" % (self._temperatures[i],
self._equiv_energies[i]))
w.close()
def get_bulk_modulus_temperature(self):
return self.bulk_modulus_temperature
def plot_bulk_modulus_temperature(self,
xlabel='Temperature (K)',
ylabel='Bulk modulus'):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_bulk_modulus_temperature(ax,
xlabel=xlabel,
ylabel=ylabel)
return plt
def plot_pdf_bulk_modulus_temperature(
self,
filename='bulk_modulus-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_bulk_modulus_temperature(ax)
plt.savefig(filename)
plt.close()
def write_bulk_modulus_temperature(
self,
filename='bulk_modulus-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %25.15f\n" % (self._temperatures[i],
self._equiv_bulk_modulus[i]))
w.close()
def get_heat_capacity_P_numerical(self):
return self.heat_capacity_P_numerical
def plot_heat_capacity_P_numerical(self, Z=1, exp_data=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_heat_capacity_P_numerical(ax, Z=Z, exp_data=exp_data)
return plt
def plot_pdf_heat_capacity_P_numerical(self,
exp_data=None,
filename='Cp-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_heat_capacity_P_numerical(ax, exp_data=exp_data)
plt.savefig(filename)
plt.close()
def write_heat_capacity_P_numerical(self, filename='Cp-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %20.15f\n" % (self._temperatures[i],
self._cp_numerical[i]))
w.close()
def get_heat_capacity_P_polyfit(self):
return self.heat_capacity_P_polyfit
def plot_heat_capacity_P_polyfit(self, Z=1, exp_data=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_heat_capacity_P_polyfit(ax, Z=Z, exp_data=exp_data)
return plt
def plot_pdf_heat_capacity_P_polyfit(
self,
exp_data=None,
filename='Cp-temperature_polyfit.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_heat_capacity_P_polyfit(ax, exp_data=exp_data)
plt.savefig(filename)
plt.close()
def write_heat_capacity_P_polyfit(self,
filename='Cp-temperature_polyfit.dat',
filename_ev='entropy-volume.dat',
filename_cvv='Cv-volume.dat',
filename_dsdvt='dsdv-temperature.dat'):
wve = open(filename_ev, 'w')
wvcv = open(filename_cvv, 'w')
for i in range(1, self._len):
t = self._temperatures[i]
wve.write("# temperature %20.15f\n" % t)
wve.write("# %20.15f %20.15f %20.15f %20.15f %20.15f\n" %
tuple(self._volume_entropy_parameters[i - 1]))
wvcv.write("# temperature %20.15f\n" % t)
wvcv.write("# %20.15f %20.15f %20.15f %20.15f %20.15f\n" %
tuple(self._volume_cv_parameters[i - 1]))
for ve, vcv in zip(self._volume_entropy[i - 1],
self._volume_cv[i - 1]):
wve.write("%20.15f %20.15f\n" % tuple(ve))
wvcv.write("%20.15f %20.15f\n" % tuple(vcv))
wve.write("\n\n")
wvcv.write("\n\n")
wve.close()
wvcv.close()
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %20.15f\n" % (self._temperatures[i],
self._cp_polyfit[i]))
w.close()
w = open(filename_dsdvt, 'w') # GPa
for i in range(self._len):
w.write("%20.15f %20.15f\n" % (self._temperatures[i],
self._dsdv[i] * 1e21 / Avogadro))
w.close()
def get_gruneisen_temperature(self):
return self.gruneisen_temperature
def plot_gruneisen_temperature(self):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_gruneisen_temperature(ax)
return plt
def plot_pdf_gruneisen_temperature(self,
filename='gruneisen-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_gruneisen_temperature(ax)
plt.savefig(filename)
plt.close()
def write_gruneisen_temperature(self,
filename='gruneisen-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %25.15f\n" % (self._temperatures[i],
self._gruneisen_parameters[i]))
w.close()
def _plot_helmholtz_volume(self,
ax,
thin_number=10,
xlabel=r'Volume $(\AA^3)$',
ylabel='Free energy'):
if self._energy_plot_factor is None:
_energy_plot_factor = 1
_ylabel = ylabel + ' (eV)'
else:
_energy_plot_factor = self._energy_plot_factor
_ylabel = ylabel
volume_points = np.linspace(min(self._volumes),
max(self._volumes),
201)
selected_volumes = []
selected_energies = []
thin_index = 0
for i, t in enumerate(self._temperatures[:self._len]):
if i % thin_number == 0:
selected_volumes.append(self._equiv_volumes[i])
selected_energies.append(self._equiv_energies[i])
for i, t in enumerate(self._temperatures[:self._len]):
if t >= 298:
if i > 0:
de = self._equiv_energies[i] - self._equiv_energies[i - 1]
dt = t - self._temperatures[i - 1]
e0 = ((298 - self._temperatures[i - 1]) / dt * de +
self._equiv_energies[i - 1])
else:
e0 = 0
break
e0 *= _energy_plot_factor
for i, t in enumerate(self._temperatures[:self._len]):
if i % thin_number == 0:
ax.plot(self._volumes,
np.array(self._free_energies[i]) * _energy_plot_factor
- e0,
'bo', markeredgecolor='b', markersize=3)
ax.plot(volume_points,
self._eos(volume_points, * self._equiv_parameters[i])
* _energy_plot_factor - e0, 'b-')
thin_index = i
for i, j in enumerate((0, thin_index)):
ax.text(self._volumes[-2],
(self._free_energies[j, -1] + (1 - i * 2) * 0.1 - 0.05) *
_energy_plot_factor - e0,
"%dK" % int(self._temperatures[j]),
fontsize=8)
ax.plot(selected_volumes,
np.array(selected_energies) * _energy_plot_factor - e0,
'ro-', markeredgecolor='r', markersize=3)
ax.set_xlabel(xlabel)
ax.set_ylabel(_ylabel)
def _plot_volume_temperature(self,
ax,
exp_data=None,
xlabel='Temperature (K)',
ylabel=r'Volume $(\AA^3)$'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
self._equiv_volumes[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
# exp
if exp_data:
ax.plot(exp_data[0], exp_data[1], 'ro')
def _plot_thermal_expansion(
self,
ax,
xlabel='Temperature (K)',
ylabel=r'Thermal expansion $(\mathrm{K}^{-1})$'):
from matplotlib.ticker import ScalarFormatter
class FixedScaledFormatter(ScalarFormatter):
def __init__(self):
ScalarFormatter.__init__(self, useMathText=True)
def _set_orderOfMagnitude(self, range):
self.orderOfMagnitude = -6
ax.yaxis.set_major_formatter(FixedScaledFormatter())
ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0))
beta = np.array(self._thermal_expansions)
ax.plot(self._temperatures[:self._len],
beta[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def _plot_gibbs_temperature(self,
ax,
xlabel='Temperature (K)',
ylabel='Gibbs free energy (eV)'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
self._equiv_energies[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _plot_bulk_modulus_temperature(self,
ax,
xlabel='Temperature (K)',
ylabel='Bulk modulus (GPa)'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
self._equiv_bulk_modulus[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _plot_heat_capacity_P_numerical(
self,
ax,
Z=1,
exp_data=None,
xlabel='Temperature (K)',
ylabel=r'$C\mathrm{_P}$ $\mathrm{(J/mol\cdot K)}$'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
np.array(self._cp_numerical[:self._len]) / Z,
'r-')
# exp
if exp_data:
ax.plot(exp_data[0], exp_data[1], 'ro')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _plot_heat_capacity_P_polyfit(
self,
ax,
Z=1,
exp_data=None,
xlabel='Temperature (K)',
ylabel=r'$C\mathrm{_P}$ $\mathrm{(J/mol\cdot K)}$'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
np.array(self._cp_polyfit[:self._len]) / Z,
'r-')
# exp
if exp_data:
ax.plot(exp_data[0], exp_data[1], 'ro')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _plot_gruneisen_temperature(self,
ax,
xlabel='Temperature (K)',
ylabel='Gruneisen parameter'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
self._gruneisen_parameters[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _set_thermal_expansion(self):
beta = [0.]
for i in range(1, self._num_elems - 1):
dt = self._temperatures[i + 1] - self._temperatures[i - 1]
dv = self._equiv_volumes[i + 1] - self._equiv_volumes[i - 1]
beta.append(dv / dt / self._equiv_volumes[i])
self._thermal_expansions = beta
def _set_heat_capacity_P_numerical(self):
cp = []
g = np.array(self._equiv_energies) * EvTokJmol * 1000
cp.append(0.0)
for i in range(1, self._num_elems - 1):
t = self._temperatures[i]
parameters = np.polyfit(self._temperatures[i - 1:i + 2],
g[i - 1: i + 2], 2)
cp.append(- (2 * parameters[0]) * t)
self._cp_numerical = cp
def _set_heat_capacity_P_polyfit(self):
cp = [0.0]
dsdv = [0.0]
self._volume_entropy_parameters = []
self._volume_cv_parameters = []
self._volume_entropy = []
self._volume_cv = []
for j in range(1, self._num_elems - 1):
t = self._temperatures[j]
x = self._equiv_volumes[j]
try:
parameters = np.polyfit(self._volumes, self._cv[j], 4)
except np.lib.polynomial.RankWarning:
msg = [
"Failed to fit heat capacities to polynomial of degree 4."]
if len(self._volumes) < 5:
msg += [
"At least 5 volume points are needed for the fitting."]
raise RuntimeError("\n".join(msg))
cv_p = np.dot(parameters, np.array([x**4, x**3, x**2, x, 1]))
self._volume_cv_parameters.append(parameters)
try:
parameters = np.polyfit(self._volumes, self._entropy[j], 4)
except np.lib.polynomial.RankWarning:
msg = [
"Failed to fit entropies to polynomial of degree 4."]
if len(self._volumes) < 5:
msg += [
"At least 5 volume points are needed for the fitting."]
raise RuntimeError("\n".join(msg))
dsdv_t = np.dot(parameters[:4], np.array(
[4 * x**3, 3 * x**2, 2 * x, 1]))
self._volume_entropy_parameters.append(parameters)
try:
parameters = np.polyfit(self._temperatures[j - 1:j + 2],
self._equiv_volumes[j - 1: j + 2], 2)
except np.lib.polynomial.RankWarning:
msg = ("Failed to fit equilibrium volumes vs T to "
"polynomial of degree 2.")
raise RuntimeError(msg)
dvdt = parameters[0] * 2 * t + parameters[1]
cp.append(cv_p + t * dvdt * dsdv_t)
dsdv.append(dsdv_t)
self._volume_cv.append(np.array([self._volumes, self._cv[j]]).T)
self._volume_entropy.append(np.array([self._volumes,
self._entropy[j]]).T)
self._cp_polyfit = cp
self._dsdv = dsdv
def _set_gruneisen_parameter(self):
gamma = [0]
for i in range(1, self._num_elems - 1):
v = self._equiv_volumes[i]
kt = self._equiv_bulk_modulus[i]
beta = self._thermal_expansions[i]
try:
parameters = np.polyfit(self._volumes, self._cv[i], 4)
except np.lib.polynomial.RankWarning:
msg = [
"Failed to fit heat capacities to polynomial of degree 4."]
if len(self._volumes) < 5:
msg += [
"At least 5 volume points are needed for the fitting."]
raise RuntimeError("\n".join(msg))
cv = (np.dot(parameters, [v**4, v**3, v**2, v, 1]) /
v / 1000 / EvTokJmol * EVAngstromToGPa)
if cv < 1e-10:
gamma.append(0.0)
else:
gamma.append(beta * kt / cv)
self._gruneisen_parameters = gamma
def _get_num_elems(self, temperatures):
if self._t_max is None:
return len(temperatures)
else:
i = np.argmin(np.abs(temperatures - self._t_max))
return i + 1
def _set_rcParams(self, plt):
plt.rcParams['backend'] = 'PDF'
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['font.family'] = 'serif'
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['figure.subplot.left'] = 0.25
plt.rcParams['figure.subplot.bottom'] = 0.15
plt.rcParams['figure.figsize'] = 4, 6
plt.rcParams['text.usetex'] = True
| phonopy/qha/core.py | 40,335 | Bulk modulus class.
This class is used to calculate bulk modulus only from temperature
independent energy input.
Quasi harmonic approximation class.
Init method.
volumes : array_like
Unit cell volumes where energies are obtained.
shape=(volumes, ), dtype='double'.
energies : array_like
Energies obtained at volumes.
shape=(volumes, ), dtype='double'.
eos : str
Identifier of equation of states function.
Init method.
Parameters
----------
volumes: array_like
Unit cell volumes (V) in angstrom^3.
dtype='double'
shape=(volumes,)
electronic_energies: array_like
Electronic energies (U_el) or electronic free energies (F_el) in eV.
It is assumed as formar if ndim==1 and latter if ndim==2.
dtype='double'
shape=(volumes,) or (temperatuers, volumes)
temperatures: array_like
Temperatures ascending order (T) in K.
dtype='double'
shape=(temperatures,)
cv: array_like
Phonon Heat capacity at constant volume in J/K/mol.
dtype='double'
shape=(temperatuers, volumes)
entropy: array_like
Phonon entropy at constant volume (S_ph) in J/K/mol.
dtype='double'
shape=(temperatuers, volumes)
fe_phonon: array_like
Phonon Helmholtz free energy (F_ph) in kJ/mol.
dtype='double'
shape=(temperatuers, volumes)
eos: str
Equation of state used for fitting F vs V.
'vinet', 'murnaghan' or 'birch_murnaghan'.
t_max: float
Maximum temperature to be calculated. This has to be not
greater than the temperature of the third element from the
end of 'temperatre' elements. If max_t=None, the temperature
of the third element from the end is used.
energy_plot_factor: float
This value is multiplied to energy like values only in plotting.
Return fitted parameter B'.
Return bulk modulus.
Return bulk modulus vs temperature data.
Return fitted parameter of energy.
Return volume at equilibrium.
Return fitted parameter B'.
Return bulk modulus.
Return fitted parameter of energy.
Return EOS function as a python method.
Return volume at equilibrium.
Return fitted parameters.
Return Gibbs free energies at temperatures.
Return Gruneisen parameters at temperatures.
Return heat capacities at constant pressure at temperatures.
Values are computed by numerical derivative of Gibbs free energy.
Return heat capacities at constant pressure at temperatures.
Volumes are computed in another way to heat_capacity_P_numerical
for the better numerical behaviour. But this does not work
when temperature dependent electronic_energies is supplied.
Return Helmholtz free energies at temperatures and volumes.
Plot fitted EOS curve.
Fit parameters to EOS at temperatures.
Even if fitting failed, simply omit the volume point. In this case,
the failed temperature point doesn't exist in the returned arrays.
Return volumetric thermal expansion coefficients at temperatures.
Return equilibrium volumes at temperatures.
Phonopy QHA module.
Copyright (C) 2012 Atsushi Togo All rights reserved. This file is part of phonopy. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the phonopy project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. angstrom^3 eV K J/K/mol J/K/mol kJ/mol Plus one temperature point is necessary for computing e.g. beta. loop over temperaturs Simply omit volume point where the fitting failed. For computing following values at temperatures, finite difference method is used. Therefore number of temperature points are needed larger than self._num_elems that nearly equals to the temparature point we expect. To be run after thermal expansion. GPa exp exp exp | 4,883 | en | 0.736608 |
import os
from argparse import ArgumentParser
from time import time
import yaml
import numpy as np
from fx_replicator import (
build_model, load_wave, save_wave, sliding_window, LossFunc
)
import nnabla as nn
#import nnabla_ext.cudnn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla.utils.save
import tqdm
def main():
args = parse_args()
with open(args.config_file) as fp:
config = yaml.safe_load(fp)
input_timesteps = config["input_timesteps"]
output_timesteps = config["output_timesteps"]
batch_size = config["batch_size"]
data = load_wave(args.input_file)
print("data.shape is:", data.shape)
print("data.len is:", len(data))
"""
from nnabla.ext_utils import get_extension_context
cuda_device_id = 0
ctx = get_extension_context('cudnn', device_id=cuda_device_id)
print("Context: {}".format(ctx))
nn.set_default_context(ctx) # Set CUDA as a default context.
"""
# padding and rounded up to the batch multiple
block_size = output_timesteps * batch_size
prepad = input_timesteps - output_timesteps
postpad = len(data) % block_size
print("postpad", block_size - postpad)
padded = np.concatenate((
np.zeros(prepad, np.float32),
data,
np.zeros(block_size - postpad, np.float32)))
x = sliding_window(padded, input_timesteps, output_timesteps)
x = x[:, :, np.newaxis]
y = np.zeros_like(x)
batchlen = x.shape[0]
print("x.length is:",batchlen)
xx = nn.Variable((batch_size , input_timesteps, 1))
nn.load_parameters("best_result.h5")
print("xx.shape is:", xx.shape)
yy = build_model(xx)
print("yy.shape is:", yy.shape)
print("x.shape in the loop is:", x[32:32 + batch_size , : , : ].shape)
start1 = time()
for step in range(0, batchlen , batch_size):
xx.d = x[step:step + batch_size , : , : ]
yy.forward()
y[step:step + batch_size , : , : ] = yy.d
proc_time = time() - start1
print(proc_time)
print(step)
y = y[:, -output_timesteps:, :].reshape(-1)[:len(data)]
save_wave(y, args.output_file)
print("finished\n")
proc_time = time() - start1
print(proc_time)
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--config_file", "-c", default="./config.yml",
help="configuration file (*.yml)")
parser.add_argument(
"--input_file", "-i",
help="input wave file (48kHz/mono, *.wav)")
parser.add_argument(
"--output_file", "-o", default="./predicted.wav",
help="output wave file (48kHz/mono, *.wav)")
parser.add_argument(
"--model_file", "-m",
help="input model file (*.h5)")
return parser.parse_args()
if __name__ == '__main__':
main()
| predict.py | 2,870 | import nnabla_ext.cudnn padding and rounded up to the batch multiple | 68 | en | 0.330351 |
"""
This module contains utiliy functions for fields which are used by both the
:mod:`~sphinxcontrib_django2.docstrings.attributes` and
:mod:`~sphinxcontrib_django2.docstrings.classes` modules.
"""
from django.apps import apps
from django.contrib import contenttypes
from django.db import models
from django.utils.encoding import force_str
def get_field_type(field, include_role=True):
"""
Get the type of a field including the correct intersphinx mappings.
:param field: The field
:type field: ~django.db.models.Field
:param include_directive: Whether or not the role :any:`py:class` should be included
:type include_directive: bool
:return: The type of the field
:rtype: str
"""
if isinstance(field, models.fields.related.RelatedField):
if isinstance(field.remote_field.model, str):
# This happens with foreign keys of abstract models
to = field.remote_field.model
else:
to = f"{field.remote_field.model.__module__}.{field.remote_field.model.__name__}"
return f":class:`~{type(field).__module__}.{type(field).__name__}` to :class:`~{to}`"
elif isinstance(field, models.fields.reverse_related.ForeignObjectRel):
to = field.remote_field.model
return (
f"Reverse :class:`~{type(field.remote_field).__module__}."
f"{type(field.remote_field).__name__}` from :class:`~{to.__module__}.{to.__name__}`"
)
else:
if include_role:
# For the docstrings of attributes, the :class: role is required
return f":class:`~{type(field).__module__}.{type(field).__name__}`"
else:
# For the :param: role in class docstrings, the :class: role is not required
return f"~{type(field).__module__}.{type(field).__name__}"
def get_field_verbose_name(field):
"""
Get the verbose name of the field.
If the field has a ``help_text``, it is also included.
In case the field is a related field, the ``related_name`` is used to link to the remote model.
For reverse related fields, the originating field is linked.
:param field: The field
:type field: ~django.db.models.Field
"""
help_text = ""
# Check whether the field is a reverse related field
if isinstance(field, models.fields.reverse_related.ForeignObjectRel):
# Convert related name to a readable name if ``snake_case`` is used
related_name = (
field.related_name.replace("_", " ") if field.related_name else None
)
if isinstance(field, models.fields.reverse_related.OneToOneRel):
# If a related name is given, use it, else use the verbose name of the remote model
related_name = related_name or field.remote_field.model._meta.verbose_name
# If field is a OneToOne field, use the prefix "The"
verbose_name = (
f"The {related_name} of this {field.model._meta.verbose_name}"
)
else:
# This means field is an instance of ManyToOneRel or ManyToManyRel
# If a related name is given, use it, else use the verbose name of the remote model
related_name = (
related_name or field.remote_field.model._meta.verbose_name_plural
)
# If field is a foreign key or a ManyToMany field, use the prefix "All"
verbose_name = (
f"All {related_name} of this {field.model._meta.verbose_name}"
)
# Always link to the origin of the reverse related field
verbose_name += (
f" (related name of :attr:`~{field.remote_field.model.__module__}"
f".{field.remote_field.model.__name__}.{field.remote_field.name}`)"
)
elif isinstance(field, contenttypes.fields.GenericForeignKey):
# GenericForeignKey does not inherit from django.db.models.Field and has no verbose_name
return (
f"Generic foreign key to the :class:`~django.contrib.contenttypes.models.ContentType` "
f"specified in "
f":attr:`~{field.model.__module__}.{field.model.__name__}.{field.ct_field}`"
)
else:
# This means the field is either a normal field or a forward related field
# If the field is a primary key, include a notice
primary_key = "Primary key: " if field.primary_key else ""
field_verbose_name = force_str(field.verbose_name)
# Make the first letter upper case while leave the rest unchanged
# (str.capitalize() would make the rest lower case, e.g. ID => Id)
verbose_name = (
primary_key + field_verbose_name[:1].upper() + field_verbose_name[1:]
)
help_text = force_str(field.help_text)
# Add help text if field has one
if help_text:
# Separate verbose name and help text by a dot
if not verbose_name.endswith("."):
verbose_name += ". "
verbose_name += help_text
if isinstance(field, models.fields.related.RelatedField):
# If field is a forward related field, reference the remote model
to = field.remote_field.model
if isinstance(to, str):
# This happens with foreign keys of abstract models
if "." in to:
to = apps.get_model(to)
elif to == "self":
to = field.model
else:
to = apps.get_model(field.model._meta.app_label, to)
# If a related name is defined
if hasattr(field.remote_field, "related_name"):
related_name = (
field.remote_field.related_name or field.model.__name__.lower()
)
verbose_name += (
f" (related name: :attr:`~{to.__module__}.{to.__name__}.{related_name}`)"
)
return verbose_name
| sphinxcontrib_django2/docstrings/field_utils.py | 5,848 | Get the type of a field including the correct intersphinx mappings.
:param field: The field
:type field: ~django.db.models.Field
:param include_directive: Whether or not the role :any:`py:class` should be included
:type include_directive: bool
:return: The type of the field
:rtype: str
Get the verbose name of the field.
If the field has a ``help_text``, it is also included.
In case the field is a related field, the ``related_name`` is used to link to the remote model.
For reverse related fields, the originating field is linked.
:param field: The field
:type field: ~django.db.models.Field
This module contains utiliy functions for fields which are used by both the
:mod:`~sphinxcontrib_django2.docstrings.attributes` and
:mod:`~sphinxcontrib_django2.docstrings.classes` modules.
This happens with foreign keys of abstract models For the docstrings of attributes, the :class: role is required For the :param: role in class docstrings, the :class: role is not required Check whether the field is a reverse related field Convert related name to a readable name if ``snake_case`` is used If a related name is given, use it, else use the verbose name of the remote model If field is a OneToOne field, use the prefix "The" This means field is an instance of ManyToOneRel or ManyToManyRel If a related name is given, use it, else use the verbose name of the remote model If field is a foreign key or a ManyToMany field, use the prefix "All" Always link to the origin of the reverse related field GenericForeignKey does not inherit from django.db.models.Field and has no verbose_name This means the field is either a normal field or a forward related field If the field is a primary key, include a notice Make the first letter upper case while leave the rest unchanged (str.capitalize() would make the rest lower case, e.g. ID => Id) Add help text if field has one Separate verbose name and help text by a dot If field is a forward related field, reference the remote model This happens with foreign keys of abstract models If a related name is defined | 2,057 | en | 0.833739 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.