max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
compiler-rt/unittests/lit_unittest_cfg_utils.py
|
medismailben/llvm-project
| 2,338
|
6628751
|
<filename>compiler-rt/unittests/lit_unittest_cfg_utils.py<gh_stars>1000+
# Put all 64-bit tests in the shadow-memory parallelism group. We throttle those
# in our common lit config (lit.common.unit.cfg.py).
def darwin_sanitizer_parallelism_group_func(test):
return "shadow-memory" if "x86_64" in test.file_path else None
|
<filename>compiler-rt/unittests/lit_unittest_cfg_utils.py<gh_stars>1000+
# Put all 64-bit tests in the shadow-memory parallelism group. We throttle those
# in our common lit config (lit.common.unit.cfg.py).
def darwin_sanitizer_parallelism_group_func(test):
return "shadow-memory" if "x86_64" in test.file_path else None
|
en
| 0.704809
|
# Put all 64-bit tests in the shadow-memory parallelism group. We throttle those # in our common lit config (lit.common.unit.cfg.py).
| 1.635064
| 2
|
employee/views.py
|
AnnabelNkir/MVC-CrudCapability
| 0
|
6628752
|
<gh_stars>0
from django.shortcuts import render,redirect
import employee
from .models import Employee
from .forms import *
from .forms import EmployeeForm
def employees_list(request):
employees = Employee.objects.order_by('-id')
context = {
'employees': employees,
}
return render(request, 'list.html', context)
def create_employee(request):
form = EmployeeForm()
if request.method == 'POST':
form = EmployeeForm(request.POST)
if form.is_valid():
form.save()
return redirect('employees-list')
context = {
'form': form,
}
return render(request, 'create.html', context)
def edit_employee(request, id):
employee = Employee.objects.get(id=id)
form = EditEmployeeForm(instance=employee)
if request.method == 'POST':
form = EditEmployeeForm(request.POST, instance=employee)
if form.is_valid():
form.save()
return redirect('employees-list')
context = {
'employee': employee,
'form': form,
}
return render(request, 'edit.html', context)
def delete_employee(request, id):
employee = Employee.objects.get(id=id)
if request.method == 'POST':
employee.delete()
return redirect('employees-list')
context = {
'employee': employee,
}
return render(request, 'delete.html', context)
|
from django.shortcuts import render,redirect
import employee
from .models import Employee
from .forms import *
from .forms import EmployeeForm
def employees_list(request):
employees = Employee.objects.order_by('-id')
context = {
'employees': employees,
}
return render(request, 'list.html', context)
def create_employee(request):
form = EmployeeForm()
if request.method == 'POST':
form = EmployeeForm(request.POST)
if form.is_valid():
form.save()
return redirect('employees-list')
context = {
'form': form,
}
return render(request, 'create.html', context)
def edit_employee(request, id):
employee = Employee.objects.get(id=id)
form = EditEmployeeForm(instance=employee)
if request.method == 'POST':
form = EditEmployeeForm(request.POST, instance=employee)
if form.is_valid():
form.save()
return redirect('employees-list')
context = {
'employee': employee,
'form': form,
}
return render(request, 'edit.html', context)
def delete_employee(request, id):
employee = Employee.objects.get(id=id)
if request.method == 'POST':
employee.delete()
return redirect('employees-list')
context = {
'employee': employee,
}
return render(request, 'delete.html', context)
|
none
| 1
| 2.218359
| 2
|
|
HackerRank/number_strings.py
|
Haroldgm/Python
| 0
|
6628753
|
# https://www.hackerrank.com/challenges/python-print/problem
# The included code stub will read an integer, , from STDIN.
# Without using any string methods, try to print the following:
# Note that "" represents the consecutive values in between.
# Example n = 5
#Print the string 12345
if __name__ == '__main__':
n = int(input())
print(*range(1,n+1), sep='')
|
# https://www.hackerrank.com/challenges/python-print/problem
# The included code stub will read an integer, , from STDIN.
# Without using any string methods, try to print the following:
# Note that "" represents the consecutive values in between.
# Example n = 5
#Print the string 12345
if __name__ == '__main__':
n = int(input())
print(*range(1,n+1), sep='')
|
en
| 0.834123
|
# https://www.hackerrank.com/challenges/python-print/problem # The included code stub will read an integer, , from STDIN. # Without using any string methods, try to print the following: # Note that "" represents the consecutive values in between. # Example n = 5 #Print the string 12345
| 4.065676
| 4
|
src/neon/testing/__init__.py
|
MUTTERSCHIFF/ngraph-neon
| 13
|
6628754
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function
from neon.testing.decorators import with_error_settings, raise_all_numpy_errors
from neon.testing.error_check import assert_allclose
from neon.testing.random import RandomTensorGenerator
from neon.testing.execution import executor, ExecutorFactory, \
numeric_derivative, check_derivative, is_flex_factory
from neon.testing.conv_utils import ConvParams, reference_conv, reference_deconv_bprop, \
reference_deconv_fprop
__all__ = [
'with_error_settings',
'raise_all_numpy_errors',
'assert_allclose',
'RandomTensorGenerator',
'executor',
'ExecutorFactory',
'numeric_derivative',
'check_derivative',
'ConvParams',
'reference_conv',
'reference_deconv_bprop',
'reference_deconv_fprop',
'is_flex_factory',
]
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function
from neon.testing.decorators import with_error_settings, raise_all_numpy_errors
from neon.testing.error_check import assert_allclose
from neon.testing.random import RandomTensorGenerator
from neon.testing.execution import executor, ExecutorFactory, \
numeric_derivative, check_derivative, is_flex_factory
from neon.testing.conv_utils import ConvParams, reference_conv, reference_deconv_bprop, \
reference_deconv_fprop
__all__ = [
'with_error_settings',
'raise_all_numpy_errors',
'assert_allclose',
'RandomTensorGenerator',
'executor',
'ExecutorFactory',
'numeric_derivative',
'check_derivative',
'ConvParams',
'reference_conv',
'reference_deconv_bprop',
'reference_deconv_fprop',
'is_flex_factory',
]
|
en
| 0.727908
|
# ****************************************************************************** # Copyright 2017-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ******************************************************************************
| 1.787065
| 2
|
apps/opsdocs/apps.py
|
ykyk1229/TurtleDove
| 1
|
6628755
|
<gh_stars>1-10
from django.apps import AppConfig
class OpsdocsConfig(AppConfig):
name = 'opsdocs'
|
from django.apps import AppConfig
class OpsdocsConfig(AppConfig):
name = 'opsdocs'
|
none
| 1
| 1.028528
| 1
|
|
Python/Discord/olds/serverPrefix.py
|
programmer-666/Codes
| 0
|
6628756
|
# actual checks
import discord
from discord.ext import commands, tasks
cl = commands.Bot(command_prefix='%')
@cl.event
async def on_ready():
print('ready')
cl.run('<KEY>')
|
# actual checks
import discord
from discord.ext import commands, tasks
cl = commands.Bot(command_prefix='%')
@cl.event
async def on_ready():
print('ready')
cl.run('<KEY>')
|
es
| 0.934346
|
# actual checks
| 2.317033
| 2
|
saleor/graphql/order/filters.py
|
Iahack/saleor
| 9
|
6628757
|
from django.db.models import Q
from django_filters import CharFilter, NumberFilter
from graphene_django.filter.filterset import GlobalIDMultipleChoiceFilter
from ...order import models
from ..core.filters import DistinctFilterSet
class OrderFilter(DistinctFilterSet):
"""Filter class for order query.
Field id is a GraphQL type ID, while order_id represents database
primary key.
"""
id = GlobalIDMultipleChoiceFilter(name='id', label='GraphQL ID')
order_id = NumberFilter(method='order_id_lookup', label='Database ID')
created__gte = CharFilter(
name='created', lookup_expr='gte', label='ISO 8601 standard')
created__lte = CharFilter(
name='created', lookup_expr='lte', label='ISO 8601 standard')
user = CharFilter(method='filter_by_order_customer')
class Meta:
model = models.Order
fields = {
'status': ['exact'],
'total_net': ['exact', 'lte', 'gte']}
def order_id_lookup(self, queryset, name, value):
return queryset.filter(pk__exact=value)
def filter_by_order_customer(self, queryset, name, value):
return queryset.filter(
Q(user__email__icontains=value) |
Q(user__default_billing_address__first_name__icontains=value) |
Q(user__default_billing_address__last_name__icontains=value) |
Q(user_email__icontains=value))
|
from django.db.models import Q
from django_filters import CharFilter, NumberFilter
from graphene_django.filter.filterset import GlobalIDMultipleChoiceFilter
from ...order import models
from ..core.filters import DistinctFilterSet
class OrderFilter(DistinctFilterSet):
"""Filter class for order query.
Field id is a GraphQL type ID, while order_id represents database
primary key.
"""
id = GlobalIDMultipleChoiceFilter(name='id', label='GraphQL ID')
order_id = NumberFilter(method='order_id_lookup', label='Database ID')
created__gte = CharFilter(
name='created', lookup_expr='gte', label='ISO 8601 standard')
created__lte = CharFilter(
name='created', lookup_expr='lte', label='ISO 8601 standard')
user = CharFilter(method='filter_by_order_customer')
class Meta:
model = models.Order
fields = {
'status': ['exact'],
'total_net': ['exact', 'lte', 'gte']}
def order_id_lookup(self, queryset, name, value):
return queryset.filter(pk__exact=value)
def filter_by_order_customer(self, queryset, name, value):
return queryset.filter(
Q(user__email__icontains=value) |
Q(user__default_billing_address__first_name__icontains=value) |
Q(user__default_billing_address__last_name__icontains=value) |
Q(user_email__icontains=value))
|
en
| 0.822852
|
Filter class for order query. Field id is a GraphQL type ID, while order_id represents database primary key.
| 2.15558
| 2
|
core/python/kungfu/command/account/edit.py
|
lf-shaw/kungfu
| 2,209
|
6628758
|
import click
from kungfu.command.account import account, pass_ctx_from_parent, make_questions, encrypt
from PyInquirer import prompt
@account.command()
@click.option('--receive_md', is_flag=True, help='receive market data with this account')
@click.option('-i', '--id', type=str, required=True, help='id')
@click.pass_context
def edit(ctx, receive_md, id):
pass_ctx_from_parent(ctx)
account_id = ctx.source + '_' + id
account_data = ctx.db.find_account(account_id)
if account_data:
answers = encrypt(ctx.schema, prompt(make_questions(ctx.schema, account_data['config'])))
if receive_md:
ctx.db.reset_receive_md(ctx.source)
receive_md = receive_md or account_data['receive_md']
ctx.db.add_account(account_id=account_id, source_name=ctx.source, receive_md=receive_md, config=answers)
else:
click.echo('Account not found')
|
import click
from kungfu.command.account import account, pass_ctx_from_parent, make_questions, encrypt
from PyInquirer import prompt
@account.command()
@click.option('--receive_md', is_flag=True, help='receive market data with this account')
@click.option('-i', '--id', type=str, required=True, help='id')
@click.pass_context
def edit(ctx, receive_md, id):
pass_ctx_from_parent(ctx)
account_id = ctx.source + '_' + id
account_data = ctx.db.find_account(account_id)
if account_data:
answers = encrypt(ctx.schema, prompt(make_questions(ctx.schema, account_data['config'])))
if receive_md:
ctx.db.reset_receive_md(ctx.source)
receive_md = receive_md or account_data['receive_md']
ctx.db.add_account(account_id=account_id, source_name=ctx.source, receive_md=receive_md, config=answers)
else:
click.echo('Account not found')
|
none
| 1
| 2.510924
| 3
|
|
Leetcode/Best_Time_to_Buy_and_Sell_Stock_II.py
|
jiangtianyu2009/bop
| 1
|
6628759
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
sum = 0
p = 1
while p < len(prices):
if prices[p] > prices[p - 1]:
sum = sum + prices[p] - prices[p - 1]
p = p + 1
return sum
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
sum = 0
p = 1
while p < len(prices):
if prices[p] > prices[p - 1]:
sum = sum + prices[p] - prices[p - 1]
p = p + 1
return sum
|
none
| 1
| 2.841203
| 3
|
|
src/legacy/python/legacy_gui/rlbot_legacy_gui/preset_editors.py
|
VirxEC/RLBot
| 408
|
6628760
|
import os
import json
from PyQt5 import QtWidgets, QtCore
import configparser
import pathlib
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QListWidgetItem
from rlbot_legacy_gui.design.car_customisation import Ui_LoadoutPresetCustomiser
from rlbot_legacy_gui.design.agent_customisation import Ui_AgentPresetCustomiser
from rlbot_legacy_gui.presets import AgentPreset, LoadoutPreset
from rlbot.utils.file_util import get_python_root
def index_of_config_path_in_combobox(combobox, config_path):
for i in range(combobox.count()):
preset = combobox.itemData(i)
if preset.config_path == config_path:
return i
return None
def index_of_config_path_in_listwidget(listwidget, config_path):
for i in range(listwidget.count()):
list_item = listwidget.item(i)
if list_item.data(Qt.UserRole).config_path == config_path:
return i
return None
class BasePresetEditor(QtWidgets.QMainWindow):
"""
The Base of the popup windows to modify a Preset, handles the basic method of editing the preset
"""
def __init__(self, qt_gui, presets: dict, root_combobox: QtWidgets.QComboBox, preset_class):
super().__init__()
self.setupUi(self)
self.setWindowIcon(qt_gui.windowIcon())
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.qt_gui = qt_gui
self.presets = presets
self.root_combobox = root_combobox
self.preset_class = preset_class
self.load_variables()
self.connect_functions()
self.update_presets_widgets()
def popup(self):
self.show()
self.presets_listwidget.setCurrentRow(0)
def load_variables(self):
pass
def get_current_preset(self):
return self.presets_listwidget.currentItem().data(Qt.UserRole)
def connect_functions(self):
self.presets_listwidget.itemSelectionChanged.connect(self.load_selected_preset)
self.preset_name_lineedit.editingFinished.connect(self.preset_name_changed)
self.preset_new_pushbutton.clicked.connect(self.add_new_preset)
self.preset_load_pushbutton.clicked.connect(self.load_preset_cfg)
self.preset_save_pushbutton.clicked.connect(self.save_preset_cfg)
def load_selected_preset(self):
if len(self.presets_listwidget.selectedItems()) == 0:
self.right_frame.setEnabled(False)
return
self.right_frame.setEnabled(True)
preset = self.get_current_preset()
self.preset_name_lineedit.setText(preset.get_name())
self.preset_path_lineedit.setText(preset.config_path)
def update_presets_widgets(self):
self.presets_listwidget.clear()
for config_path, preset in self.presets.items():
item = QListWidgetItem(preset.get_name(), self.presets_listwidget)
item.setData(Qt.UserRole, preset)
self.presets_listwidget.addItem(item)
# Also updates the combobox which you can select the agent preset for the bot through
current_text = self.root_combobox.currentText()
self.root_combobox.blockSignals(True)
self.root_combobox.clear()
for config_path, preset in self.presets.items():
self.root_combobox.addItem(preset.get_name(), preset)
self.root_combobox.setCurrentText(current_text)
self.root_combobox.blockSignals(False)
def preset_name_changed(self):
new_name = self.preset_name_lineedit.text()
self.update_preset_name(self.get_current_preset(), new_name)
def update_preset_name(self, preset, new_name):
preset_index = index_of_config_path_in_listwidget(self.presets_listwidget, preset.config_path)
if preset_index is not None:
self.presets_listwidget.item(preset_index).setText(new_name)
preset.name = new_name
def add_new_preset(self):
name = self.validate_name("new preset", None)
preset = self.preset_class(name)
preset_key = "temp:" + name
self.presets[preset_key] = preset
self.update_presets_widgets()
for i in range(self.presets_listwidget.count()):
list_item = self.presets_listwidget.item(i)
if list_item.data(Qt.UserRole) == preset:
self.presets_listwidget.setCurrentRow(i)
def load_preset_cfg(self):
file_path = QtWidgets.QFileDialog.getOpenFileName(self, 'Load Config', '', 'Config Files (*.cfg)')[0]
if not os.path.isfile(file_path):
return
if pathlib.Path(file_path).suffix != '.cfg':
self.popup_message("This file is not a config file!", "Invalid File Extension",
QtWidgets.QMessageBox.Warning)
return
try:
preset = self.preset_class(self.validate_name(pathlib.Path(file_path).stem, None), file_path)
except configparser.NoSectionError:
self.popup_message("This file does not have the right sections!", "Invalid Config File",
QtWidgets.QMessageBox.Warning)
return
self.presets[preset.get_name()] = preset
self.update_presets_widgets()
self.presets_listwidget.setCurrentItem(
self.presets_listwidget.findItems(preset.get_name(), QtCore.Qt.MatchExactly)[0])
return preset
def save_preset_cfg(self, time_out=0):
preset = self.get_current_preset()
if preset.config_path is None:
file_path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save Config', '', 'Config Files (*.cfg)')[0]
if file_path is None or not file_path:
return
preset.config_path = os.path.realpath(file_path)
# Remove this preset from the dict since it is not currently keyed by config path
self.presets = {k: v for k, v in self.presets.items() if v != preset}
for key in list(self.presets.keys()):
if self.presets[key] == preset:
del self.presets[key]
# Add it back keyed by config path
self.presets[preset.config_path] = preset
new_name = self.validate_name(pathlib.Path(preset.config_path).stem, preset)
self.update_preset_name(preset, new_name)
preset.save_config(time_out=time_out, message_out=self.statusbar.showMessage)
self.load_selected_preset()
self.update_presets_widgets()
else:
preset.save_config(time_out=time_out, message_out=self.statusbar.showMessage)
def validate_name(self, name, preset, copy_index=None):
value = name
if copy_index is not None:
value += " (" + str(copy_index) + ")"
for key, p in self.presets.items():
if p.get_name() == value:
if p is preset:
return value
return self.validate_name(name, preset, (copy_index or 1) + 1)
return value
def popup_message(self, message: str, title: str, icon=QtWidgets.QMessageBox.Warning):
popup = QtWidgets.QMessageBox(self)
popup.setIcon(icon)
popup.setWindowTitle(title)
popup.setText(message)
popup.setStandardButtons(QtWidgets.QMessageBox.Ok)
popup.exec_()
return
class CarCustomisationDialog(BasePresetEditor, Ui_LoadoutPresetCustomiser):
"""
The class extending BasePresetEditor to allow some loadout preset specific preset editing, like handling item names
"""
def __init__(self, qt_gui):
super().__init__(qt_gui, qt_gui.loadout_presets, qt_gui.loadout_preset_combobox, LoadoutPreset)
def load_variables(self):
super().load_variables()
self.create_config_headers_dicts()
self.longlabel_to_id, self.id_to_longlabel = self.get_item_dicts()
self.prefill_comboboxes()
def load_preset_cfg(self) -> LoadoutPreset:
return super().load_preset_cfg()
def connect_functions(self):
super().connect_functions()
for config_widget in self.config_widgets_to_headers:
if isinstance(config_widget, QtWidgets.QComboBox):
config_widget.currentIndexChanged.connect(self.update_spinbox_and_combobox)
elif isinstance(config_widget, QtWidgets.QAbstractSpinBox):
config_widget.valueChanged.connect(self.update_spinbox_and_combobox)
def update_spinbox_and_combobox(self):
# Updates the corresponding widget (ie update spinbox if combobox edited)
updated_widget = self.sender()
# config_headers contains the config_header (team) and the config_item (ie decal)
config_headers = self.config_widgets_to_headers[updated_widget]
# widget_list contains the spinbox and combobox (if it exists) associated with that item
widget_list = self.config_headers_to_widgets[config_headers[0]][config_headers[1]]
item_id = 0
if len(widget_list) != 1:
# there is a widget to update
for widget_to_update in widget_list:
if widget_to_update is updated_widget:
# no need to update itself, therefore continue
continue
config_headers = self.config_widgets_to_headers[widget_to_update]
category = self.config_headers_to_categories[config_headers[1]]
if isinstance(widget_to_update, QtWidgets.QComboBox):
# update combobox by selecting decal_labels.index(self.categorised_items[updated_widget.value()])
item_id = updated_widget.value()
try:
# try to get item name from id in self.id_to_longlabel
item_name = self.id_to_longlabel[category][item_id]
try:
# try to select item in combobox
widget_to_update.setCurrentText(item_name)
except ValueError:
# print('Error: Item ID entered does not belong in this category. (%s)' % item_name)
widget_to_update.setCurrentText('Unknown')
except KeyError:
# unknown item selected, the id exists in no category
# print('Unknown item ID entered (%s) in %s' % (item_id, widget_to_update.objectName()))
widget_to_update.setCurrentText('Unknown')
elif isinstance(widget_to_update, QtWidgets.QAbstractSpinBox):
item_longlabel = updated_widget.currentText()
if item_longlabel == "Unknown":
item_id = 0
continue
# get the id of the item, if this throws an error the dict got somehow messed up,
# since the combobox was originally loaded from the same dict
item_id = self.longlabel_to_id[category][item_longlabel]
# set the spinbox to the new value
widget_to_update.setValue(item_id)
else:
item_id = updated_widget.value()
preset = self.get_current_preset()
preset.config.set_value(config_headers[0], config_headers[1], item_id)
if self.preset_autosave_checkbox.isChecked() and preset.config_path is not None and os.path.isfile(
preset.config_path):
self.save_preset_cfg(10)
def load_selected_preset(self):
super().load_selected_preset()
for config_header_key, config_header in self.get_current_preset().config.headers.items():
# for config_header in config, update widget value
for config_value_key in config_header.values:
if config_value_key == "name":
continue
try:
widgets = self.config_headers_to_widgets[config_header_key][config_value_key]
try:
for widget in widgets:
# only update spinboxes - let comboboxes update through spinbox update detection.
if isinstance(widget, QtWidgets.QAbstractSpinBox):
# widget.setValue(config_header_value.value)
widget.setValue(config_header.get(config_value_key))
except:
# print("An error occurred")
pass
except KeyError:
# print('Unknown loadout config header entry: %s' % config_value_key)
pass
def create_config_headers_dicts(self):
"""
Creates the config_headers_to_widgets and config_widgets_to_headers and config_headers_to_categories dicts
"""
self.config_headers_to_widgets = {
# blue stuff
'Bot Loadout': {
'team_color_id': (self.blue_primary_spinbox,),
'custom_color_id': (self.blue_secondary_spinbox,),
'car_id': (self.blue_car_spinbox, self.blue_car_combobox),
'decal_id': (self.blue_decal_spinbox, self.blue_decal_combobox),
'wheels_id': (self.blue_wheels_spinbox, self.blue_wheels_combobox),
'boost_id': (self.blue_boost_spinbox, self.blue_boost_combobox),
'antenna_id': (self.blue_antenna_spinbox, self.blue_antenna_combobox),
'hat_id': (self.blue_hat_spinbox, self.blue_hat_combobox),
'paint_finish_id': (self.blue_paint_finish_spinbox, self.blue_paint_finish_combobox),
'custom_finish_id': (self.blue_custom_finish_spinbox, self.blue_custom_finish_combobox),
'engine_audio_id': (self.blue_engine_spinbox, self.blue_engine_combobox),
'trails_id': (self.blue_trails_spinbox, self.blue_trails_combobox),
'goal_explosion_id': (self.blue_goal_explosion_spinbox, self.blue_goal_explosion_combobox)
},
'Bot Loadout Orange': {
'team_color_id': (self.orange_primary_spinbox,),
'custom_color_id': (self.orange_secondary_spinbox,),
'car_id': (self.orange_car_spinbox, self.orange_car_combobox),
'decal_id': (self.orange_decal_spinbox, self.orange_decal_combobox),
'wheels_id': (self.orange_wheels_spinbox, self.orange_wheels_combobox),
'boost_id': (self.orange_boost_spinbox, self.orange_boost_combobox),
'antenna_id': (self.orange_antenna_spinbox, self.orange_antenna_combobox),
'hat_id': (self.orange_hat_spinbox, self.orange_hat_combobox),
'paint_finish_id': (self.orange_paint_finish_spinbox, self.orange_paint_finish_combobox),
'custom_finish_id': (self.orange_custom_finish_spinbox, self.orange_custom_finish_combobox),
'engine_audio_id': (self.orange_engine_spinbox, self.orange_engine_combobox),
'trails_id': (self.orange_trails_spinbox, self.orange_trails_combobox),
'goal_explosion_id': (self.orange_goal_explosion_spinbox, self.orange_goal_explosion_combobox)
},
}
self.config_widgets_to_headers = {}
for header_1, _field_dict in self.config_headers_to_widgets.items():
for header_2, _widgets in _field_dict.items():
for _widget in _widgets:
self.config_widgets_to_headers[_widget] = (header_1, header_2)
self.config_headers_to_categories = {
'car_id': 'Body',
'decal_id': 'Decal',
'wheels_id': 'Wheels',
'boost_id': 'Rocket Boost',
'antenna_id': 'Antenna',
'hat_id': 'Topper',
'paint_finish_id': 'Paint Finish',
'custom_finish_id': 'Paint Finish',
'engine_audio_id': 'Engine Audio',
'trails_id': 'Trail',
'goal_explosion_id': 'Goal Explosion'
}
def prefill_comboboxes(self):
for widget, config_headers in self.config_widgets_to_headers.items():
if isinstance(widget, QtWidgets.QComboBox):
config_headers = self.config_widgets_to_headers[widget]
config_category = self.config_headers_to_categories[config_headers[1]]
sorted_list = list(self.longlabel_to_id[config_category].keys())
sorted_list.sort()
widget.addItems(sorted_list + ['Unknown'])
@staticmethod
def get_item_dicts():
"""
Creates two item dicts and returns them, both are sorted by the Slot type
:return: First dict is LongLabel to ID, second is ID to LongLabel
"""
json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rocket_league_items.json')
with open(json_path, 'r', encoding='utf8') as f:
sorted_items = json.load(f, parse_int=True).items()
longlabel_to_id = {}
id_to_longlabel = {}
for slot, items in sorted_items:
type_longlabel_to_id = {item['LongLabel']: int(item['ID']) for item in items}
type_id_to_longlabel = {int(item['ID']): item['LongLabel'] for item in items}
longlabel_to_id[slot] = type_longlabel_to_id
id_to_longlabel[slot] = type_id_to_longlabel
return longlabel_to_id, id_to_longlabel
class AgentCustomisationDialog(BasePresetEditor, Ui_AgentPresetCustomiser):
"""
The class extending BasePresetEditor for some agent config specific edits, e.g. selecting the agent file
"""
def __init__(self, qt_gui):
super().__init__(qt_gui, qt_gui.agent_presets, qt_gui.agent_preset_combobox, AgentPreset)
def load_variables(self):
super().load_variables()
self.grid_layout = QtWidgets.QGridLayout(self.agent_parameters_groupbox)
self.extra_parameter_widgets = []
def load_preset_cfg(self) -> AgentPreset:
return super().load_preset_cfg()
def connect_functions(self):
super().connect_functions()
self.python_file_select_button.clicked.connect(self.load_python_file)
def load_selected_preset(self):
super().load_selected_preset()
preset = self.get_current_preset()
bot_parameters = preset.config["Bot Parameters"]
self.preset_python_file_lineedit.setText(preset.config.get("Locations", "python_file"))
self.add_parameters_to_gui(bot_parameters)
def add_parameters_to_gui(self, bot_parameters):
# clear layout
while self.grid_layout.count():
child_widget = self.grid_layout.takeAt(0).widget()
self.grid_layout.removeWidget(child_widget)
child_widget.setParent(None)
child_widget.deleteLater()
config_values = bot_parameters.values
parent = self.agent_parameters_groupbox
for row_no, (key, config_value) in enumerate(config_values.items()):
label = QtWidgets.QLabel(str(key) + ':', parent)
label.setObjectName("label_%s" % key)
self.grid_layout.addWidget(label, row_no, 0)
self.extra_parameter_widgets.append(label)
def update_event(new_value, config_item=config_value):
config_item.set_value(new_value)
preset = self.get_current_preset()
if self.preset_autosave_checkbox.isChecked() and preset.config_path is not None and os.path.isfile(
preset.config_path):
self.save_preset_cfg(10)
if config_value.type is int:
value_widget = QtWidgets.QSpinBox(parent)
value_widget.setValue(int(config_value.get_value()))
value_widget.valueChanged.connect(update_event)
elif config_value.type is bool:
value_widget = QtWidgets.QCheckBox(parent)
value_widget.setChecked(bool(config_value.get_value()))
value_widget.clicked.connect(update_event)
elif config_value.type is float:
value_widget = QtWidgets.QDoubleSpinBox(parent)
value_widget.setValue(float(config_value.get_value()))
value_widget.valueChanged.connect(update_event)
else:
# handle everything else as a string
value_widget = QtWidgets.QLineEdit(parent)
value_widget.setText(config_value.get_value())
value_widget.textChanged.connect(update_event)
value_widget.setObjectName('value_%s' % key)
label.setToolTip(config_value.description)
value_widget.setToolTip(config_value.description)
self.grid_layout.addWidget(value_widget, row_no, 1)
self.grid_layout.setColumnStretch(0, 1)
self.grid_layout.setColumnStretch(1, 2)
self.resize(self.minimumSizeHint())
def load_python_file(self):
file_path = QtWidgets.QFileDialog.getOpenFileName(self, 'Load Agent Class', '', 'Python Files (*.py)')[0]
if not file_path or not os.path.exists(file_path):
return
preset = self.get_current_preset()
try:
preset.load_agent_class(file_path)
except (FileNotFoundError, ModuleNotFoundError) as e:
self.popup_message(str(e), "Invalid Python File", QtWidgets.QMessageBox.Information)
return
if preset.config_path is None or not os.path.isfile(preset.config_path):
start = get_python_root()
else:
start = os.path.dirname(preset.config_path)
try:
rel_path = os.path.relpath(file_path, start)
except ValueError:
rel_path = file_path
preset.config.set_value("Locations", "python_file", rel_path)
self.preset_python_file_lineedit.setText(rel_path)
bot_parameters = preset.config["Bot Parameters"]
self.add_parameters_to_gui(bot_parameters)
|
import os
import json
from PyQt5 import QtWidgets, QtCore
import configparser
import pathlib
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QListWidgetItem
from rlbot_legacy_gui.design.car_customisation import Ui_LoadoutPresetCustomiser
from rlbot_legacy_gui.design.agent_customisation import Ui_AgentPresetCustomiser
from rlbot_legacy_gui.presets import AgentPreset, LoadoutPreset
from rlbot.utils.file_util import get_python_root
def index_of_config_path_in_combobox(combobox, config_path):
for i in range(combobox.count()):
preset = combobox.itemData(i)
if preset.config_path == config_path:
return i
return None
def index_of_config_path_in_listwidget(listwidget, config_path):
for i in range(listwidget.count()):
list_item = listwidget.item(i)
if list_item.data(Qt.UserRole).config_path == config_path:
return i
return None
class BasePresetEditor(QtWidgets.QMainWindow):
"""
The Base of the popup windows to modify a Preset, handles the basic method of editing the preset
"""
def __init__(self, qt_gui, presets: dict, root_combobox: QtWidgets.QComboBox, preset_class):
super().__init__()
self.setupUi(self)
self.setWindowIcon(qt_gui.windowIcon())
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.qt_gui = qt_gui
self.presets = presets
self.root_combobox = root_combobox
self.preset_class = preset_class
self.load_variables()
self.connect_functions()
self.update_presets_widgets()
def popup(self):
self.show()
self.presets_listwidget.setCurrentRow(0)
def load_variables(self):
pass
def get_current_preset(self):
return self.presets_listwidget.currentItem().data(Qt.UserRole)
def connect_functions(self):
self.presets_listwidget.itemSelectionChanged.connect(self.load_selected_preset)
self.preset_name_lineedit.editingFinished.connect(self.preset_name_changed)
self.preset_new_pushbutton.clicked.connect(self.add_new_preset)
self.preset_load_pushbutton.clicked.connect(self.load_preset_cfg)
self.preset_save_pushbutton.clicked.connect(self.save_preset_cfg)
def load_selected_preset(self):
if len(self.presets_listwidget.selectedItems()) == 0:
self.right_frame.setEnabled(False)
return
self.right_frame.setEnabled(True)
preset = self.get_current_preset()
self.preset_name_lineedit.setText(preset.get_name())
self.preset_path_lineedit.setText(preset.config_path)
def update_presets_widgets(self):
self.presets_listwidget.clear()
for config_path, preset in self.presets.items():
item = QListWidgetItem(preset.get_name(), self.presets_listwidget)
item.setData(Qt.UserRole, preset)
self.presets_listwidget.addItem(item)
# Also updates the combobox which you can select the agent preset for the bot through
current_text = self.root_combobox.currentText()
self.root_combobox.blockSignals(True)
self.root_combobox.clear()
for config_path, preset in self.presets.items():
self.root_combobox.addItem(preset.get_name(), preset)
self.root_combobox.setCurrentText(current_text)
self.root_combobox.blockSignals(False)
def preset_name_changed(self):
new_name = self.preset_name_lineedit.text()
self.update_preset_name(self.get_current_preset(), new_name)
def update_preset_name(self, preset, new_name):
preset_index = index_of_config_path_in_listwidget(self.presets_listwidget, preset.config_path)
if preset_index is not None:
self.presets_listwidget.item(preset_index).setText(new_name)
preset.name = new_name
def add_new_preset(self):
name = self.validate_name("new preset", None)
preset = self.preset_class(name)
preset_key = "temp:" + name
self.presets[preset_key] = preset
self.update_presets_widgets()
for i in range(self.presets_listwidget.count()):
list_item = self.presets_listwidget.item(i)
if list_item.data(Qt.UserRole) == preset:
self.presets_listwidget.setCurrentRow(i)
def load_preset_cfg(self):
file_path = QtWidgets.QFileDialog.getOpenFileName(self, 'Load Config', '', 'Config Files (*.cfg)')[0]
if not os.path.isfile(file_path):
return
if pathlib.Path(file_path).suffix != '.cfg':
self.popup_message("This file is not a config file!", "Invalid File Extension",
QtWidgets.QMessageBox.Warning)
return
try:
preset = self.preset_class(self.validate_name(pathlib.Path(file_path).stem, None), file_path)
except configparser.NoSectionError:
self.popup_message("This file does not have the right sections!", "Invalid Config File",
QtWidgets.QMessageBox.Warning)
return
self.presets[preset.get_name()] = preset
self.update_presets_widgets()
self.presets_listwidget.setCurrentItem(
self.presets_listwidget.findItems(preset.get_name(), QtCore.Qt.MatchExactly)[0])
return preset
def save_preset_cfg(self, time_out=0):
preset = self.get_current_preset()
if preset.config_path is None:
file_path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save Config', '', 'Config Files (*.cfg)')[0]
if file_path is None or not file_path:
return
preset.config_path = os.path.realpath(file_path)
# Remove this preset from the dict since it is not currently keyed by config path
self.presets = {k: v for k, v in self.presets.items() if v != preset}
for key in list(self.presets.keys()):
if self.presets[key] == preset:
del self.presets[key]
# Add it back keyed by config path
self.presets[preset.config_path] = preset
new_name = self.validate_name(pathlib.Path(preset.config_path).stem, preset)
self.update_preset_name(preset, new_name)
preset.save_config(time_out=time_out, message_out=self.statusbar.showMessage)
self.load_selected_preset()
self.update_presets_widgets()
else:
preset.save_config(time_out=time_out, message_out=self.statusbar.showMessage)
def validate_name(self, name, preset, copy_index=None):
value = name
if copy_index is not None:
value += " (" + str(copy_index) + ")"
for key, p in self.presets.items():
if p.get_name() == value:
if p is preset:
return value
return self.validate_name(name, preset, (copy_index or 1) + 1)
return value
def popup_message(self, message: str, title: str, icon=QtWidgets.QMessageBox.Warning):
popup = QtWidgets.QMessageBox(self)
popup.setIcon(icon)
popup.setWindowTitle(title)
popup.setText(message)
popup.setStandardButtons(QtWidgets.QMessageBox.Ok)
popup.exec_()
return
class CarCustomisationDialog(BasePresetEditor, Ui_LoadoutPresetCustomiser):
"""
The class extending BasePresetEditor to allow some loadout preset specific preset editing, like handling item names
"""
def __init__(self, qt_gui):
super().__init__(qt_gui, qt_gui.loadout_presets, qt_gui.loadout_preset_combobox, LoadoutPreset)
def load_variables(self):
super().load_variables()
self.create_config_headers_dicts()
self.longlabel_to_id, self.id_to_longlabel = self.get_item_dicts()
self.prefill_comboboxes()
def load_preset_cfg(self) -> LoadoutPreset:
return super().load_preset_cfg()
def connect_functions(self):
super().connect_functions()
for config_widget in self.config_widgets_to_headers:
if isinstance(config_widget, QtWidgets.QComboBox):
config_widget.currentIndexChanged.connect(self.update_spinbox_and_combobox)
elif isinstance(config_widget, QtWidgets.QAbstractSpinBox):
config_widget.valueChanged.connect(self.update_spinbox_and_combobox)
def update_spinbox_and_combobox(self):
# Updates the corresponding widget (ie update spinbox if combobox edited)
updated_widget = self.sender()
# config_headers contains the config_header (team) and the config_item (ie decal)
config_headers = self.config_widgets_to_headers[updated_widget]
# widget_list contains the spinbox and combobox (if it exists) associated with that item
widget_list = self.config_headers_to_widgets[config_headers[0]][config_headers[1]]
item_id = 0
if len(widget_list) != 1:
# there is a widget to update
for widget_to_update in widget_list:
if widget_to_update is updated_widget:
# no need to update itself, therefore continue
continue
config_headers = self.config_widgets_to_headers[widget_to_update]
category = self.config_headers_to_categories[config_headers[1]]
if isinstance(widget_to_update, QtWidgets.QComboBox):
# update combobox by selecting decal_labels.index(self.categorised_items[updated_widget.value()])
item_id = updated_widget.value()
try:
# try to get item name from id in self.id_to_longlabel
item_name = self.id_to_longlabel[category][item_id]
try:
# try to select item in combobox
widget_to_update.setCurrentText(item_name)
except ValueError:
# print('Error: Item ID entered does not belong in this category. (%s)' % item_name)
widget_to_update.setCurrentText('Unknown')
except KeyError:
# unknown item selected, the id exists in no category
# print('Unknown item ID entered (%s) in %s' % (item_id, widget_to_update.objectName()))
widget_to_update.setCurrentText('Unknown')
elif isinstance(widget_to_update, QtWidgets.QAbstractSpinBox):
item_longlabel = updated_widget.currentText()
if item_longlabel == "Unknown":
item_id = 0
continue
# get the id of the item, if this throws an error the dict got somehow messed up,
# since the combobox was originally loaded from the same dict
item_id = self.longlabel_to_id[category][item_longlabel]
# set the spinbox to the new value
widget_to_update.setValue(item_id)
else:
item_id = updated_widget.value()
preset = self.get_current_preset()
preset.config.set_value(config_headers[0], config_headers[1], item_id)
if self.preset_autosave_checkbox.isChecked() and preset.config_path is not None and os.path.isfile(
preset.config_path):
self.save_preset_cfg(10)
def load_selected_preset(self):
super().load_selected_preset()
for config_header_key, config_header in self.get_current_preset().config.headers.items():
# for config_header in config, update widget value
for config_value_key in config_header.values:
if config_value_key == "name":
continue
try:
widgets = self.config_headers_to_widgets[config_header_key][config_value_key]
try:
for widget in widgets:
# only update spinboxes - let comboboxes update through spinbox update detection.
if isinstance(widget, QtWidgets.QAbstractSpinBox):
# widget.setValue(config_header_value.value)
widget.setValue(config_header.get(config_value_key))
except:
# print("An error occurred")
pass
except KeyError:
# print('Unknown loadout config header entry: %s' % config_value_key)
pass
def create_config_headers_dicts(self):
"""
Creates the config_headers_to_widgets and config_widgets_to_headers and config_headers_to_categories dicts
"""
self.config_headers_to_widgets = {
# blue stuff
'Bot Loadout': {
'team_color_id': (self.blue_primary_spinbox,),
'custom_color_id': (self.blue_secondary_spinbox,),
'car_id': (self.blue_car_spinbox, self.blue_car_combobox),
'decal_id': (self.blue_decal_spinbox, self.blue_decal_combobox),
'wheels_id': (self.blue_wheels_spinbox, self.blue_wheels_combobox),
'boost_id': (self.blue_boost_spinbox, self.blue_boost_combobox),
'antenna_id': (self.blue_antenna_spinbox, self.blue_antenna_combobox),
'hat_id': (self.blue_hat_spinbox, self.blue_hat_combobox),
'paint_finish_id': (self.blue_paint_finish_spinbox, self.blue_paint_finish_combobox),
'custom_finish_id': (self.blue_custom_finish_spinbox, self.blue_custom_finish_combobox),
'engine_audio_id': (self.blue_engine_spinbox, self.blue_engine_combobox),
'trails_id': (self.blue_trails_spinbox, self.blue_trails_combobox),
'goal_explosion_id': (self.blue_goal_explosion_spinbox, self.blue_goal_explosion_combobox)
},
'Bot Loadout Orange': {
'team_color_id': (self.orange_primary_spinbox,),
'custom_color_id': (self.orange_secondary_spinbox,),
'car_id': (self.orange_car_spinbox, self.orange_car_combobox),
'decal_id': (self.orange_decal_spinbox, self.orange_decal_combobox),
'wheels_id': (self.orange_wheels_spinbox, self.orange_wheels_combobox),
'boost_id': (self.orange_boost_spinbox, self.orange_boost_combobox),
'antenna_id': (self.orange_antenna_spinbox, self.orange_antenna_combobox),
'hat_id': (self.orange_hat_spinbox, self.orange_hat_combobox),
'paint_finish_id': (self.orange_paint_finish_spinbox, self.orange_paint_finish_combobox),
'custom_finish_id': (self.orange_custom_finish_spinbox, self.orange_custom_finish_combobox),
'engine_audio_id': (self.orange_engine_spinbox, self.orange_engine_combobox),
'trails_id': (self.orange_trails_spinbox, self.orange_trails_combobox),
'goal_explosion_id': (self.orange_goal_explosion_spinbox, self.orange_goal_explosion_combobox)
},
}
self.config_widgets_to_headers = {}
for header_1, _field_dict in self.config_headers_to_widgets.items():
for header_2, _widgets in _field_dict.items():
for _widget in _widgets:
self.config_widgets_to_headers[_widget] = (header_1, header_2)
self.config_headers_to_categories = {
'car_id': 'Body',
'decal_id': 'Decal',
'wheels_id': 'Wheels',
'boost_id': 'Rocket Boost',
'antenna_id': 'Antenna',
'hat_id': 'Topper',
'paint_finish_id': 'Paint Finish',
'custom_finish_id': 'Paint Finish',
'engine_audio_id': 'Engine Audio',
'trails_id': 'Trail',
'goal_explosion_id': 'Goal Explosion'
}
def prefill_comboboxes(self):
for widget, config_headers in self.config_widgets_to_headers.items():
if isinstance(widget, QtWidgets.QComboBox):
config_headers = self.config_widgets_to_headers[widget]
config_category = self.config_headers_to_categories[config_headers[1]]
sorted_list = list(self.longlabel_to_id[config_category].keys())
sorted_list.sort()
widget.addItems(sorted_list + ['Unknown'])
@staticmethod
def get_item_dicts():
"""
Creates two item dicts and returns them, both are sorted by the Slot type
:return: First dict is LongLabel to ID, second is ID to LongLabel
"""
json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rocket_league_items.json')
with open(json_path, 'r', encoding='utf8') as f:
sorted_items = json.load(f, parse_int=True).items()
longlabel_to_id = {}
id_to_longlabel = {}
for slot, items in sorted_items:
type_longlabel_to_id = {item['LongLabel']: int(item['ID']) for item in items}
type_id_to_longlabel = {int(item['ID']): item['LongLabel'] for item in items}
longlabel_to_id[slot] = type_longlabel_to_id
id_to_longlabel[slot] = type_id_to_longlabel
return longlabel_to_id, id_to_longlabel
class AgentCustomisationDialog(BasePresetEditor, Ui_AgentPresetCustomiser):
"""
The class extending BasePresetEditor for some agent config specific edits, e.g. selecting the agent file
"""
def __init__(self, qt_gui):
super().__init__(qt_gui, qt_gui.agent_presets, qt_gui.agent_preset_combobox, AgentPreset)
def load_variables(self):
super().load_variables()
self.grid_layout = QtWidgets.QGridLayout(self.agent_parameters_groupbox)
self.extra_parameter_widgets = []
def load_preset_cfg(self) -> AgentPreset:
return super().load_preset_cfg()
def connect_functions(self):
super().connect_functions()
self.python_file_select_button.clicked.connect(self.load_python_file)
def load_selected_preset(self):
super().load_selected_preset()
preset = self.get_current_preset()
bot_parameters = preset.config["Bot Parameters"]
self.preset_python_file_lineedit.setText(preset.config.get("Locations", "python_file"))
self.add_parameters_to_gui(bot_parameters)
def add_parameters_to_gui(self, bot_parameters):
# clear layout
while self.grid_layout.count():
child_widget = self.grid_layout.takeAt(0).widget()
self.grid_layout.removeWidget(child_widget)
child_widget.setParent(None)
child_widget.deleteLater()
config_values = bot_parameters.values
parent = self.agent_parameters_groupbox
for row_no, (key, config_value) in enumerate(config_values.items()):
label = QtWidgets.QLabel(str(key) + ':', parent)
label.setObjectName("label_%s" % key)
self.grid_layout.addWidget(label, row_no, 0)
self.extra_parameter_widgets.append(label)
def update_event(new_value, config_item=config_value):
config_item.set_value(new_value)
preset = self.get_current_preset()
if self.preset_autosave_checkbox.isChecked() and preset.config_path is not None and os.path.isfile(
preset.config_path):
self.save_preset_cfg(10)
if config_value.type is int:
value_widget = QtWidgets.QSpinBox(parent)
value_widget.setValue(int(config_value.get_value()))
value_widget.valueChanged.connect(update_event)
elif config_value.type is bool:
value_widget = QtWidgets.QCheckBox(parent)
value_widget.setChecked(bool(config_value.get_value()))
value_widget.clicked.connect(update_event)
elif config_value.type is float:
value_widget = QtWidgets.QDoubleSpinBox(parent)
value_widget.setValue(float(config_value.get_value()))
value_widget.valueChanged.connect(update_event)
else:
# handle everything else as a string
value_widget = QtWidgets.QLineEdit(parent)
value_widget.setText(config_value.get_value())
value_widget.textChanged.connect(update_event)
value_widget.setObjectName('value_%s' % key)
label.setToolTip(config_value.description)
value_widget.setToolTip(config_value.description)
self.grid_layout.addWidget(value_widget, row_no, 1)
self.grid_layout.setColumnStretch(0, 1)
self.grid_layout.setColumnStretch(1, 2)
self.resize(self.minimumSizeHint())
def load_python_file(self):
file_path = QtWidgets.QFileDialog.getOpenFileName(self, 'Load Agent Class', '', 'Python Files (*.py)')[0]
if not file_path or not os.path.exists(file_path):
return
preset = self.get_current_preset()
try:
preset.load_agent_class(file_path)
except (FileNotFoundError, ModuleNotFoundError) as e:
self.popup_message(str(e), "Invalid Python File", QtWidgets.QMessageBox.Information)
return
if preset.config_path is None or not os.path.isfile(preset.config_path):
start = get_python_root()
else:
start = os.path.dirname(preset.config_path)
try:
rel_path = os.path.relpath(file_path, start)
except ValueError:
rel_path = file_path
preset.config.set_value("Locations", "python_file", rel_path)
self.preset_python_file_lineedit.setText(rel_path)
bot_parameters = preset.config["Bot Parameters"]
self.add_parameters_to_gui(bot_parameters)
|
en
| 0.722088
|
The Base of the popup windows to modify a Preset, handles the basic method of editing the preset # Also updates the combobox which you can select the agent preset for the bot through # Remove this preset from the dict since it is not currently keyed by config path # Add it back keyed by config path The class extending BasePresetEditor to allow some loadout preset specific preset editing, like handling item names # Updates the corresponding widget (ie update spinbox if combobox edited) # config_headers contains the config_header (team) and the config_item (ie decal) # widget_list contains the spinbox and combobox (if it exists) associated with that item # there is a widget to update # no need to update itself, therefore continue # update combobox by selecting decal_labels.index(self.categorised_items[updated_widget.value()]) # try to get item name from id in self.id_to_longlabel # try to select item in combobox # print('Error: Item ID entered does not belong in this category. (%s)' % item_name) # unknown item selected, the id exists in no category # print('Unknown item ID entered (%s) in %s' % (item_id, widget_to_update.objectName())) # get the id of the item, if this throws an error the dict got somehow messed up, # since the combobox was originally loaded from the same dict # set the spinbox to the new value # for config_header in config, update widget value # only update spinboxes - let comboboxes update through spinbox update detection. # widget.setValue(config_header_value.value) # print("An error occurred") # print('Unknown loadout config header entry: %s' % config_value_key) Creates the config_headers_to_widgets and config_widgets_to_headers and config_headers_to_categories dicts # blue stuff Creates two item dicts and returns them, both are sorted by the Slot type :return: First dict is LongLabel to ID, second is ID to LongLabel The class extending BasePresetEditor for some agent config specific edits, e.g. selecting the agent file # clear layout # handle everything else as a string
| 2.241856
| 2
|
superset/views/datasource/schemas.py
|
razzius/superset
| 18,621
|
6628761
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any
from marshmallow import fields, post_load, Schema
from typing_extensions import TypedDict
class ExternalMetadataParams(TypedDict):
datasource_type: str
database_name: str
schema_name: str
table_name: str
get_external_metadata_schema = {
"datasource_type": "string",
"database_name": "string",
"schema_name": "string",
"table_name": "string",
}
class ExternalMetadataSchema(Schema):
datasource_type = fields.Str(required=True)
database_name = fields.Str(required=True)
schema_name = fields.Str(allow_none=True)
table_name = fields.Str(required=True)
# pylint: disable=no-self-use,unused-argument
@post_load
def normalize(
self, data: ExternalMetadataParams, **kwargs: Any,
) -> ExternalMetadataParams:
return ExternalMetadataParams(
datasource_type=data["datasource_type"],
database_name=data["database_name"],
schema_name=data.get("schema_name", ""),
table_name=data["table_name"],
)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any
from marshmallow import fields, post_load, Schema
from typing_extensions import TypedDict
class ExternalMetadataParams(TypedDict):
datasource_type: str
database_name: str
schema_name: str
table_name: str
get_external_metadata_schema = {
"datasource_type": "string",
"database_name": "string",
"schema_name": "string",
"table_name": "string",
}
class ExternalMetadataSchema(Schema):
datasource_type = fields.Str(required=True)
database_name = fields.Str(required=True)
schema_name = fields.Str(allow_none=True)
table_name = fields.Str(required=True)
# pylint: disable=no-self-use,unused-argument
@post_load
def normalize(
self, data: ExternalMetadataParams, **kwargs: Any,
) -> ExternalMetadataParams:
return ExternalMetadataParams(
datasource_type=data["datasource_type"],
database_name=data["database_name"],
schema_name=data.get("schema_name", ""),
table_name=data["table_name"],
)
|
en
| 0.852985
|
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=no-self-use,unused-argument
| 1.892687
| 2
|
django-rgd-geometry/tests/test_project/urls.py
|
ResonantGeoData/ResonantGeoData
| 40
|
6628762
|
<reponame>ResonantGeoData/ResonantGeoData<gh_stars>10-100
from django.urls import include, path
urlpatterns = [
# Make this distinct from typical production values, to ensure it works dynamically
path('rgd_test/', include('rgd.urls')),
path('rgd_geometry_test/', include('rgd_geometry.urls')),
]
|
from django.urls import include, path
urlpatterns = [
# Make this distinct from typical production values, to ensure it works dynamically
path('rgd_test/', include('rgd.urls')),
path('rgd_geometry_test/', include('rgd_geometry.urls')),
]
|
en
| 0.868473
|
# Make this distinct from typical production values, to ensure it works dynamically
| 1.503417
| 2
|
nexus/meta_api/proto/search_service_pb2.py
|
RobbiNespu/hyperboria
| 54
|
6628763
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nexus/meta_api/proto/search_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nexus.models.proto import \
typed_document_pb2 as nexus_dot_models_dot_proto_dot_typed__document__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nexus/meta_api/proto/search_service.proto',
package='nexus.meta_api.proto',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n)nexus/meta_api/proto/search_service.proto\x12\x14nexus.meta_api.proto\x1a\'nexus/models/proto/typed_document.proto\"l\n\x0eScoredDocument\x12\x39\n\x0etyped_document\x18\x01 \x01(\x0b\x32!.nexus.models.proto.TypedDocument\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x10\n\x08position\x18\x03 \x01(\r\"b\n\x0eSearchResponse\x12>\n\x10scored_documents\x18\x01 \x03(\x0b\x32$.nexus.meta_api.proto.ScoredDocument\x12\x10\n\x08has_next\x18\x02 \x01(\x08\"b\n\rSearchRequest\x12\x0f\n\x07schemas\x18\x01 \x03(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x0c\n\x04page\x18\x03 \x01(\r\x12\x11\n\tpage_size\x18\x04 \x01(\r\x12\x10\n\x08language\x18\x05 \x01(\t2_\n\x06Search\x12U\n\x06search\x12#.nexus.meta_api.proto.SearchRequest\x1a$.nexus.meta_api.proto.SearchResponse\"\x00\x62\x06proto3'
,
dependencies=[nexus_dot_models_dot_proto_dot_typed__document__pb2.DESCRIPTOR,])
_SCOREDDOCUMENT = _descriptor.Descriptor(
name='ScoredDocument',
full_name='nexus.meta_api.proto.ScoredDocument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='typed_document', full_name='nexus.meta_api.proto.ScoredDocument.typed_document', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='score', full_name='nexus.meta_api.proto.ScoredDocument.score', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='position', full_name='nexus.meta_api.proto.ScoredDocument.position', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=216,
)
_SEARCHRESPONSE = _descriptor.Descriptor(
name='SearchResponse',
full_name='nexus.meta_api.proto.SearchResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='scored_documents', full_name='nexus.meta_api.proto.SearchResponse.scored_documents', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='has_next', full_name='nexus.meta_api.proto.SearchResponse.has_next', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=218,
serialized_end=316,
)
_SEARCHREQUEST = _descriptor.Descriptor(
name='SearchRequest',
full_name='nexus.meta_api.proto.SearchRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schemas', full_name='nexus.meta_api.proto.SearchRequest.schemas', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='query', full_name='nexus.meta_api.proto.SearchRequest.query', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page', full_name='nexus.meta_api.proto.SearchRequest.page', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='nexus.meta_api.proto.SearchRequest.page_size', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='language', full_name='nexus.meta_api.proto.SearchRequest.language', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=318,
serialized_end=416,
)
_SCOREDDOCUMENT.fields_by_name['typed_document'].message_type = nexus_dot_models_dot_proto_dot_typed__document__pb2._TYPEDDOCUMENT
_SEARCHRESPONSE.fields_by_name['scored_documents'].message_type = _SCOREDDOCUMENT
DESCRIPTOR.message_types_by_name['ScoredDocument'] = _SCOREDDOCUMENT
DESCRIPTOR.message_types_by_name['SearchResponse'] = _SEARCHRESPONSE
DESCRIPTOR.message_types_by_name['SearchRequest'] = _SEARCHREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ScoredDocument = _reflection.GeneratedProtocolMessageType('ScoredDocument', (_message.Message,), {
'DESCRIPTOR' : _SCOREDDOCUMENT,
'__module__' : 'nexus.meta_api.proto.search_service_pb2'
# @@protoc_insertion_point(class_scope:nexus.meta_api.proto.ScoredDocument)
})
_sym_db.RegisterMessage(ScoredDocument)
SearchResponse = _reflection.GeneratedProtocolMessageType('SearchResponse', (_message.Message,), {
'DESCRIPTOR' : _SEARCHRESPONSE,
'__module__' : 'nexus.meta_api.proto.search_service_pb2'
# @@protoc_insertion_point(class_scope:nexus.meta_api.proto.SearchResponse)
})
_sym_db.RegisterMessage(SearchResponse)
SearchRequest = _reflection.GeneratedProtocolMessageType('SearchRequest', (_message.Message,), {
'DESCRIPTOR' : _SEARCHREQUEST,
'__module__' : 'nexus.meta_api.proto.search_service_pb2'
# @@protoc_insertion_point(class_scope:nexus.meta_api.proto.SearchRequest)
})
_sym_db.RegisterMessage(SearchRequest)
_SEARCH = _descriptor.ServiceDescriptor(
name='Search',
full_name='nexus.meta_api.proto.Search',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=418,
serialized_end=513,
methods=[
_descriptor.MethodDescriptor(
name='search',
full_name='nexus.meta_api.proto.Search.search',
index=0,
containing_service=None,
input_type=_SEARCHREQUEST,
output_type=_SEARCHRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_SEARCH)
DESCRIPTOR.services_by_name['Search'] = _SEARCH
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nexus/meta_api/proto/search_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nexus.models.proto import \
typed_document_pb2 as nexus_dot_models_dot_proto_dot_typed__document__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nexus/meta_api/proto/search_service.proto',
package='nexus.meta_api.proto',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n)nexus/meta_api/proto/search_service.proto\x12\x14nexus.meta_api.proto\x1a\'nexus/models/proto/typed_document.proto\"l\n\x0eScoredDocument\x12\x39\n\x0etyped_document\x18\x01 \x01(\x0b\x32!.nexus.models.proto.TypedDocument\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x10\n\x08position\x18\x03 \x01(\r\"b\n\x0eSearchResponse\x12>\n\x10scored_documents\x18\x01 \x03(\x0b\x32$.nexus.meta_api.proto.ScoredDocument\x12\x10\n\x08has_next\x18\x02 \x01(\x08\"b\n\rSearchRequest\x12\x0f\n\x07schemas\x18\x01 \x03(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x0c\n\x04page\x18\x03 \x01(\r\x12\x11\n\tpage_size\x18\x04 \x01(\r\x12\x10\n\x08language\x18\x05 \x01(\t2_\n\x06Search\x12U\n\x06search\x12#.nexus.meta_api.proto.SearchRequest\x1a$.nexus.meta_api.proto.SearchResponse\"\x00\x62\x06proto3'
,
dependencies=[nexus_dot_models_dot_proto_dot_typed__document__pb2.DESCRIPTOR,])
_SCOREDDOCUMENT = _descriptor.Descriptor(
name='ScoredDocument',
full_name='nexus.meta_api.proto.ScoredDocument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='typed_document', full_name='nexus.meta_api.proto.ScoredDocument.typed_document', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='score', full_name='nexus.meta_api.proto.ScoredDocument.score', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='position', full_name='nexus.meta_api.proto.ScoredDocument.position', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=216,
)
_SEARCHRESPONSE = _descriptor.Descriptor(
name='SearchResponse',
full_name='nexus.meta_api.proto.SearchResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='scored_documents', full_name='nexus.meta_api.proto.SearchResponse.scored_documents', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='has_next', full_name='nexus.meta_api.proto.SearchResponse.has_next', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=218,
serialized_end=316,
)
_SEARCHREQUEST = _descriptor.Descriptor(
name='SearchRequest',
full_name='nexus.meta_api.proto.SearchRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schemas', full_name='nexus.meta_api.proto.SearchRequest.schemas', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='query', full_name='nexus.meta_api.proto.SearchRequest.query', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page', full_name='nexus.meta_api.proto.SearchRequest.page', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='nexus.meta_api.proto.SearchRequest.page_size', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='language', full_name='nexus.meta_api.proto.SearchRequest.language', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=318,
serialized_end=416,
)
_SCOREDDOCUMENT.fields_by_name['typed_document'].message_type = nexus_dot_models_dot_proto_dot_typed__document__pb2._TYPEDDOCUMENT
_SEARCHRESPONSE.fields_by_name['scored_documents'].message_type = _SCOREDDOCUMENT
DESCRIPTOR.message_types_by_name['ScoredDocument'] = _SCOREDDOCUMENT
DESCRIPTOR.message_types_by_name['SearchResponse'] = _SEARCHRESPONSE
DESCRIPTOR.message_types_by_name['SearchRequest'] = _SEARCHREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ScoredDocument = _reflection.GeneratedProtocolMessageType('ScoredDocument', (_message.Message,), {
'DESCRIPTOR' : _SCOREDDOCUMENT,
'__module__' : 'nexus.meta_api.proto.search_service_pb2'
# @@protoc_insertion_point(class_scope:nexus.meta_api.proto.ScoredDocument)
})
_sym_db.RegisterMessage(ScoredDocument)
SearchResponse = _reflection.GeneratedProtocolMessageType('SearchResponse', (_message.Message,), {
'DESCRIPTOR' : _SEARCHRESPONSE,
'__module__' : 'nexus.meta_api.proto.search_service_pb2'
# @@protoc_insertion_point(class_scope:nexus.meta_api.proto.SearchResponse)
})
_sym_db.RegisterMessage(SearchResponse)
SearchRequest = _reflection.GeneratedProtocolMessageType('SearchRequest', (_message.Message,), {
'DESCRIPTOR' : _SEARCHREQUEST,
'__module__' : 'nexus.meta_api.proto.search_service_pb2'
# @@protoc_insertion_point(class_scope:nexus.meta_api.proto.SearchRequest)
})
_sym_db.RegisterMessage(SearchRequest)
_SEARCH = _descriptor.ServiceDescriptor(
name='Search',
full_name='nexus.meta_api.proto.Search',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=418,
serialized_end=513,
methods=[
_descriptor.MethodDescriptor(
name='search',
full_name='nexus.meta_api.proto.Search.search',
index=0,
containing_service=None,
input_type=_SEARCHREQUEST,
output_type=_SEARCHRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_SEARCH)
DESCRIPTOR.services_by_name['Search'] = _SEARCH
# @@protoc_insertion_point(module_scope)
|
en
| 0.255381
|
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: nexus/meta_api/proto/search_service.proto Generated protocol buffer code. # @@protoc_insertion_point(imports) #.nexus.meta_api.proto.SearchRequest\x1a$.nexus.meta_api.proto.SearchResponse\"\x00\x62\x06proto3' # @@protoc_insertion_point(class_scope:nexus.meta_api.proto.ScoredDocument) # @@protoc_insertion_point(class_scope:nexus.meta_api.proto.SearchResponse) # @@protoc_insertion_point(class_scope:nexus.meta_api.proto.SearchRequest) # @@protoc_insertion_point(module_scope)
| 1.023087
| 1
|
The dawn of Otrozhny/gameIO.py
|
SeoFernando25/The-dawn-of-Otrozhny
| 0
|
6628764
|
<gh_stars>0
# Used this for serialization
# and deserialization of files in the game
import pickle
import levelData
import entities
import os
from functools import lru_cache
import pygame
PATH_ROOT = os.getcwd()
MAPS_PATH = os.path.join(PATH_ROOT, "Maps")
def list_maps():
map_names = []
if not os.path.exists(MAPS_PATH):
os.mkdir(MAPS_PATH)
return map_names
for files in os.listdir(MAPS_PATH):
#[-4] to remove extension and just get the map name
map_names.append( (os.path.join(MAPS_PATH, files), files[-4] ))
return map_names
def get_files_paths_from_folder(folder, *folders):
file_paths = []
curPath = os.path.join(PATH_ROOT, folder, *folders)
if not os.path.exists(curPath):
return None
for file in os.listdir(curPath):
file_paths.append(os.path.join(curPath, file))
return file_paths
def load_level(level_path):
with open(level_path, "rb") as level:
level_data = pickle.load(level)
levelData.Level.load(level_data)
def load_level_object(level_path):
with open(level_path, "rb") as level:
level_data = pickle.load(level)
return level_data
def save_level(level_name, level):
pickle.dump(level, open( os.path.join(MAPS_PATH, str(level_name) + ".map"), "wb"))
# The lru cache caches the results of the function
# so I dont need to create a new surface object
# everytime
@lru_cache(maxsize=256)
def get_sprite(sprite_pack: str, sprite_position: int):
filenames_list = get_files_paths_from_folder("Assets", sprite_pack, "Sprites")
if filenames_list != None:
for pos, path in enumerate(sorted(filenames_list)):
if pos == sprite_position:
img = pygame.image.load(path).convert_alpha()
img.set_colorkey((152, 0, 136))
return img
img = pygame.image.load(filenames_list[0]).convert_alpha()
img.set_colorkey((152, 0, 136))
return img
return None
def get_audio(sprite_pack, state):
import random
filenames_list = get_files_paths_from_folder("Assets", sprite_pack, str(state))
if filenames_list == None:
return None
audioIndex = random.randint(0, len(filenames_list)-1)
audioObj = pygame.mixer.Sound(filenames_list[audioIndex])
return audioObj
@lru_cache(maxsize=8)
def get_cached_audio(folder_name, sub_folder):
filenames_list = get_files_paths_from_folder("Assets", folder_name, sub_folder)
if filenames_list == None:
return None
audioObj = pygame.mixer.Sound(filenames_list[0])
return audioObj
|
# Used this for serialization
# and deserialization of files in the game
import pickle
import levelData
import entities
import os
from functools import lru_cache
import pygame
PATH_ROOT = os.getcwd()
MAPS_PATH = os.path.join(PATH_ROOT, "Maps")
def list_maps():
map_names = []
if not os.path.exists(MAPS_PATH):
os.mkdir(MAPS_PATH)
return map_names
for files in os.listdir(MAPS_PATH):
#[-4] to remove extension and just get the map name
map_names.append( (os.path.join(MAPS_PATH, files), files[-4] ))
return map_names
def get_files_paths_from_folder(folder, *folders):
file_paths = []
curPath = os.path.join(PATH_ROOT, folder, *folders)
if not os.path.exists(curPath):
return None
for file in os.listdir(curPath):
file_paths.append(os.path.join(curPath, file))
return file_paths
def load_level(level_path):
with open(level_path, "rb") as level:
level_data = pickle.load(level)
levelData.Level.load(level_data)
def load_level_object(level_path):
with open(level_path, "rb") as level:
level_data = pickle.load(level)
return level_data
def save_level(level_name, level):
pickle.dump(level, open( os.path.join(MAPS_PATH, str(level_name) + ".map"), "wb"))
# The lru cache caches the results of the function
# so I dont need to create a new surface object
# everytime
@lru_cache(maxsize=256)
def get_sprite(sprite_pack: str, sprite_position: int):
filenames_list = get_files_paths_from_folder("Assets", sprite_pack, "Sprites")
if filenames_list != None:
for pos, path in enumerate(sorted(filenames_list)):
if pos == sprite_position:
img = pygame.image.load(path).convert_alpha()
img.set_colorkey((152, 0, 136))
return img
img = pygame.image.load(filenames_list[0]).convert_alpha()
img.set_colorkey((152, 0, 136))
return img
return None
def get_audio(sprite_pack, state):
import random
filenames_list = get_files_paths_from_folder("Assets", sprite_pack, str(state))
if filenames_list == None:
return None
audioIndex = random.randint(0, len(filenames_list)-1)
audioObj = pygame.mixer.Sound(filenames_list[audioIndex])
return audioObj
@lru_cache(maxsize=8)
def get_cached_audio(folder_name, sub_folder):
filenames_list = get_files_paths_from_folder("Assets", folder_name, sub_folder)
if filenames_list == None:
return None
audioObj = pygame.mixer.Sound(filenames_list[0])
return audioObj
|
en
| 0.717045
|
# Used this for serialization # and deserialization of files in the game #[-4] to remove extension and just get the map name # The lru cache caches the results of the function # so I dont need to create a new surface object # everytime
| 2.84148
| 3
|
src/sources/url_manager.py
|
joshuaellinger/corona19-data-pipeline
| 17
|
6628765
|
<gh_stars>10-100
#
# UrlManager
#
# make sure we don't hit the same URL twice
#
from typing import Tuple
from loguru import logger
import time
from shared.util import fetch_with_requests
from capture.captive_browser import CaptiveBrowser
class UrlManager:
def __init__(self, headless=True, browser="requests"):
self.history = {}
self.size = 0
self.browser = browser
self.headless = headless
self._captive = None
def is_repeat(self, url: str) -> bool:
return url in self.history
def reset(self):
self.history = {}
self.size = 0
def shutdown(self):
if self._captive != None:
self._captive.close()
self._captive = None
def fetch_with_captive(self, url: str) -> Tuple[bytes, int]:
if self._captive == None:
self._captive = CaptiveBrowser(self.headless, self.browser)
self._captive.navigate(url)
if self._captive.has_slow_elements():
logger.debug(f" found slow elements, wait for 5 seconds")
time.sleep(5)
return self._captive.page_source(), self._captive.status_code()
def fetch(self, url: str) -> Tuple[bytes, int]:
if url in self.history:
return self.history[url]
if self.browser == "requests":
content, status = fetch_with_requests(url)
else:
content, status = self.fetch_with_captive(url)
self.history[url] = (content, status)
if content != None:
self.size += len(content)
return content, status
|
#
# UrlManager
#
# make sure we don't hit the same URL twice
#
from typing import Tuple
from loguru import logger
import time
from shared.util import fetch_with_requests
from capture.captive_browser import CaptiveBrowser
class UrlManager:
def __init__(self, headless=True, browser="requests"):
self.history = {}
self.size = 0
self.browser = browser
self.headless = headless
self._captive = None
def is_repeat(self, url: str) -> bool:
return url in self.history
def reset(self):
self.history = {}
self.size = 0
def shutdown(self):
if self._captive != None:
self._captive.close()
self._captive = None
def fetch_with_captive(self, url: str) -> Tuple[bytes, int]:
if self._captive == None:
self._captive = CaptiveBrowser(self.headless, self.browser)
self._captive.navigate(url)
if self._captive.has_slow_elements():
logger.debug(f" found slow elements, wait for 5 seconds")
time.sleep(5)
return self._captive.page_source(), self._captive.status_code()
def fetch(self, url: str) -> Tuple[bytes, int]:
if url in self.history:
return self.history[url]
if self.browser == "requests":
content, status = fetch_with_requests(url)
else:
content, status = self.fetch_with_captive(url)
self.history[url] = (content, status)
if content != None:
self.size += len(content)
return content, status
|
en
| 0.905276
|
# # UrlManager # # make sure we don't hit the same URL twice #
| 2.476546
| 2
|
boltons/iterutils.py
|
jpoehnelt/boltons
| 0
|
6628766
|
<reponame>jpoehnelt/boltons<gh_stars>0
# -*- coding: utf-8 -*-
""":mod:`itertools` is full of great examples of Python generator
usage. However, there are still some critical gaps. ``iterutils``
fills many of those gaps with featureful, tested, and Pythonic
solutions.
Many of the functions below have two versions, one which
returns an iterator (denoted by the ``*_iter`` naming pattern), and a
shorter-named convenience form that returns a list. Some of the
following are based on examples in itertools docs.
"""
import os
import math
import time
import codecs
import random
import socket
import hashlib
import itertools
try:
from collections.abc import Mapping, Sequence, Set, ItemsView, Iterable
except ImportError:
from collections import Mapping, Sequence, Set, ItemsView, Iterable
try:
from typeutils import make_sentinel
_UNSET = make_sentinel('_UNSET')
_REMAP_EXIT = make_sentinel('_REMAP_EXIT')
except ImportError:
_REMAP_EXIT = object()
_UNSET = object()
try:
from future_builtins import filter
from itertools import izip
_IS_PY3 = False
except ImportError:
# Python 3 compat
_IS_PY3 = True
basestring = (str, bytes)
izip, xrange = zip, range
def is_iterable(obj):
"""Similar in nature to :func:`callable`, ``is_iterable`` returns
``True`` if an object is `iterable`_, ``False`` if not.
>>> is_iterable([])
True
>>> is_iterable(object())
False
.. _iterable: https://docs.python.org/2/glossary.html#term-iterable
"""
try:
iter(obj)
except TypeError:
return False
return True
def is_scalar(obj):
"""A near-mirror of :func:`is_iterable`. Returns ``False`` if an
object is an iterable container type. Strings are considered
scalar as well, because strings are more often treated as whole
values as opposed to iterables of 1-character substrings.
>>> is_scalar(object())
True
>>> is_scalar(range(10))
False
>>> is_scalar('hello')
True
"""
return not is_iterable(obj) or isinstance(obj, basestring)
def is_collection(obj):
"""The opposite of :func:`is_scalar`. Returns ``True`` if an object
is an iterable other than a string.
>>> is_collection(object())
False
>>> is_collection(range(10))
True
>>> is_collection('hello')
False
"""
return is_iterable(obj) and not isinstance(obj, basestring)
def split(src, sep=None, maxsplit=None):
"""Splits an iterable based on a separator. Like :meth:`str.split`,
but for all iterables. Returns a list of lists.
>>> split(['hi', 'hello', None, None, 'sup', None, 'soap', None])
[['hi', 'hello'], ['sup'], ['soap']]
See :func:`split_iter` docs for more info.
"""
return list(split_iter(src, sep, maxsplit))
def split_iter(src, sep=None, maxsplit=None):
"""Splits an iterable based on a separator, *sep*, a max of
*maxsplit* times (no max by default). *sep* can be:
* a single value
* an iterable of separators
* a single-argument callable that returns True when a separator is
encountered
``split_iter()`` yields lists of non-separator values. A separator will
never appear in the output.
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None, 'soap', None]))
[['hi', 'hello'], ['sup'], ['soap']]
Note that ``split_iter`` is based on :func:`str.split`, so if
*sep* is ``None``, ``split()`` **groups** separators. If empty lists
are desired between two contiguous ``None`` values, simply use
``sep=[None]``:
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None]))
[['hi', 'hello'], ['sup']]
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None], sep=[None]))
[['hi', 'hello'], [], ['sup'], []]
Using a callable separator:
>>> falsy_sep = lambda x: not x
>>> list(split_iter(['hi', 'hello', None, '', 'sup', False], falsy_sep))
[['hi', 'hello'], [], ['sup'], []]
See :func:`split` for a list-returning version.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if maxsplit is not None:
maxsplit = int(maxsplit)
if maxsplit == 0:
yield [src]
return
if callable(sep):
sep_func = sep
elif not is_scalar(sep):
sep = frozenset(sep)
sep_func = lambda x: x in sep
else:
sep_func = lambda x: x == sep
cur_group = []
split_count = 0
for s in src:
if maxsplit is not None and split_count >= maxsplit:
sep_func = lambda x: False
if sep_func(s):
if sep is None and not cur_group:
# If sep is none, str.split() "groups" separators
# check the str.split() docs for more info
continue
split_count += 1
yield cur_group
cur_group = []
else:
cur_group.append(s)
if cur_group or sep is not None:
yield cur_group
return
def chunked(src, size, count=None, **kw):
"""Returns a list of *count* chunks, each with *size* elements,
generated from iterable *src*. If *src* is not evenly divisible by
*size*, the final chunk will have fewer than *size* elements.
Provide the *fill* keyword argument to provide a pad value and
enable padding, otherwise no padding will take place.
>>> chunked(range(10), 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> chunked(range(10), 3, fill=None)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
>>> chunked(range(10), 3, count=2)
[[0, 1, 2], [3, 4, 5]]
See :func:`chunked_iter` for more info.
"""
chunk_iter = chunked_iter(src, size, **kw)
if count is None:
return list(chunk_iter)
else:
return list(itertools.islice(chunk_iter, count))
def chunked_iter(src, size, **kw):
"""Generates *size*-sized chunks from *src* iterable. Unless the
optional *fill* keyword argument is provided, iterables not even
divisible by *size* will have a final chunk that is smaller than
*size*.
>>> list(chunked_iter(range(10), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(chunked_iter(range(10), 3, fill=None))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
Note that ``fill=None`` in fact uses ``None`` as the fill value.
"""
# TODO: add count kwarg?
if not is_iterable(src):
raise TypeError('expected an iterable')
size = int(size)
if size <= 0:
raise ValueError('expected a positive integer chunk size')
do_fill = True
try:
fill_val = kw.pop('fill')
except KeyError:
do_fill = False
fill_val = None
if kw:
raise ValueError('got unexpected keyword arguments: %r' % kw.keys())
if not src:
return
postprocess = lambda chk: chk
if isinstance(src, basestring):
postprocess = lambda chk, _sep=type(src)(): _sep.join(chk)
src_iter = iter(src)
while True:
cur_chunk = list(itertools.islice(src_iter, size))
if not cur_chunk:
break
lc = len(cur_chunk)
if lc < size and do_fill:
cur_chunk[lc:] = [fill_val] * (size - lc)
yield postprocess(cur_chunk)
return
def pairwise(src):
"""Convenience function for calling :func:`windowed` on *src*, with
*size* set to 2.
>>> pairwise(range(5))
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> pairwise([])
[]
The number of pairs is always one less than the number of elements
in the iterable passed in, except on empty inputs, which returns
an empty list.
"""
return windowed(src, 2)
def pairwise_iter(src):
"""Convenience function for calling :func:`windowed_iter` on *src*,
with *size* set to 2.
>>> list(pairwise_iter(range(5)))
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> list(pairwise_iter([]))
[]
The number of pairs is always one less than the number of elements
in the iterable passed in, or zero, when *src* is empty.
"""
return windowed_iter(src, 2)
def windowed(src, size):
"""Returns tuples with exactly length *size*. If the iterable is
too short to make a window of length *size*, no tuples are
returned. See :func:`windowed_iter` for more.
"""
return list(windowed_iter(src, size))
def windowed_iter(src, size):
"""Returns tuples with length *size* which represent a sliding
window over iterable *src*.
>>> list(windowed_iter(range(7), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)]
If the iterable is too short to make a window of length *size*,
then no window tuples are returned.
>>> list(windowed_iter(range(3), 5))
[]
"""
# TODO: lists? (for consistency)
tees = itertools.tee(src, size)
try:
for i, t in enumerate(tees):
for _ in xrange(i):
next(t)
except StopIteration:
return izip([])
return izip(*tees)
def xfrange(stop, start=None, step=1.0):
"""Same as :func:`frange`, but generator-based instead of returning a
list.
>>> tuple(xfrange(1, 3, step=0.75))
(1.0, 1.75, 2.5)
See :func:`frange` for more details.
"""
if not step:
raise ValueError('step must be non-zero')
if start is None:
start, stop = 0.0, stop * 1.0
else:
# swap when all args are used
stop, start = start * 1.0, stop * 1.0
cur = start
while cur < stop:
yield cur
cur += step
def frange(stop, start=None, step=1.0):
"""A :func:`range` clone for float-based ranges.
>>> frange(5)
[0.0, 1.0, 2.0, 3.0, 4.0]
>>> frange(6, step=1.25)
[0.0, 1.25, 2.5, 3.75, 5.0]
>>> frange(100.5, 101.5, 0.25)
[100.5, 100.75, 101.0, 101.25]
>>> frange(5, 0)
[]
>>> frange(5, 0, step=-1.25)
[5.0, 3.75, 2.5, 1.25]
"""
if not step:
raise ValueError('step must be non-zero')
if start is None:
start, stop = 0.0, stop * 1.0
else:
# swap when all args are used
stop, start = start * 1.0, stop * 1.0
count = int(math.ceil((stop - start) / step))
ret = [None] * count
if not ret:
return ret
ret[0] = start
for i in xrange(1, count):
ret[i] = ret[i - 1] + step
return ret
def backoff(start, stop, count=None, factor=2.0, jitter=False):
"""Returns a list of geometrically-increasing floating-point numbers,
suitable for usage with `exponential backoff`_. Exactly like
:func:`backoff_iter`, but without the ``'repeat'`` option for
*count*. See :func:`backoff_iter` for more details.
.. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff
>>> backoff(1, 10)
[1.0, 2.0, 4.0, 8.0, 10.0]
"""
if count == 'repeat':
raise ValueError("'repeat' supported in backoff_iter, not backoff")
return list(backoff_iter(start, stop, count=count,
factor=factor, jitter=jitter))
def backoff_iter(start, stop, count=None, factor=2.0, jitter=False):
"""Generates a sequence of geometrically-increasing floats, suitable
for usage with `exponential backoff`_. Starts with *start*,
increasing by *factor* until *stop* is reached, optionally
stopping iteration once *count* numbers are yielded. *factor*
defaults to 2. In general retrying with properly-configured
backoff creates a better-behaved component for a larger service
ecosystem.
.. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff
>>> list(backoff_iter(1.0, 10.0, count=5))
[1.0, 2.0, 4.0, 8.0, 10.0]
>>> list(backoff_iter(1.0, 10.0, count=8))
[1.0, 2.0, 4.0, 8.0, 10.0, 10.0, 10.0, 10.0]
>>> list(backoff_iter(0.25, 100.0, factor=10))
[0.25, 2.5, 25.0, 100.0]
A simplified usage example:
.. code-block:: python
for timeout in backoff_iter(0.25, 5.0):
try:
res = network_call()
break
except Exception as e:
log(e)
time.sleep(timeout)
An enhancement for large-scale systems would be to add variation,
or *jitter*, to timeout values. This is done to avoid a thundering
herd on the receiving end of the network call.
Finally, for *count*, the special value ``'repeat'`` can be passed to
continue yielding indefinitely.
Args:
start (float): Positive number for baseline.
stop (float): Positive number for maximum.
count (int): Number of steps before stopping
iteration. Defaults to the number of steps between *start* and
*stop*. Pass the string, `'repeat'`, to continue iteration
indefinitely.
factor (float): Rate of exponential increase. Defaults to `2.0`,
e.g., `[1, 2, 4, 8, 16]`.
jitter (float): A factor between `-1.0` and `1.0`, used to
uniformly randomize and thus spread out timeouts in a distributed
system, avoiding rhythm effects. Positive values use the base
backoff curve as a maximum, negative values use the curve as a
minimum. Set to 1.0 or `True` for a jitter approximating
Ethernet's time-tested backoff solution. Defaults to `False`.
"""
start = float(start)
stop = float(stop)
factor = float(factor)
if start < 0.0:
raise ValueError('expected start >= 0, not %r' % start)
if factor < 1.0:
raise ValueError('expected factor >= 1.0, not %r' % factor)
if stop == 0.0:
raise ValueError('expected stop >= 0')
if stop < start:
raise ValueError('expected stop >= start, not %r' % stop)
if count is None:
denom = start if start else 1
count = 1 + math.ceil(math.log(stop/denom, factor))
count = count if start else count + 1
if count != 'repeat' and count < 0:
raise ValueError('count must be positive or "repeat", not %r' % count)
if jitter:
jitter = float(jitter)
if not (-1.0 <= jitter <= 1.0):
raise ValueError('expected jitter -1 <= j <= 1, not: %r' % jitter)
cur, i = start, 0
while count == 'repeat' or i < count:
if not jitter:
cur_ret = cur
elif jitter:
cur_ret = cur - (cur * jitter * random.random())
yield cur_ret
i += 1
if cur == 0:
cur = 1
elif cur < stop:
cur *= factor
if cur > stop:
cur = stop
return
def bucketize(src, key=None, value_transform=None, key_filter=None):
"""Group values in the *src* iterable by the value returned by *key*,
which defaults to :class:`bool`, grouping values by truthiness.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
Bucketize into more than 3 groups
>>> bucketize(range(10), lambda x: x % 3)
{0: [0, 3, 6, 9], 1: [1, 4, 7], 2: [2, 5, 8]}
``bucketize`` has a couple of advanced options useful in certain
cases. *value_transform* can be used to modify values as they are
added to buckets, and *key_filter* will allow excluding certain
buckets from being collected.
>>> bucketize(range(5), value_transform=lambda x: x*x)
{False: [0], True: [1, 4, 9, 16]}
>>> bucketize(range(10), key=lambda x: x % 3, key_filter=lambda k: k % 3 != 1)
{0: [0, 3, 6, 9], 2: [2, 5, 8]}
Note in some of these examples there were at most two keys, ``True`` and
``False``, and each key present has a list with at least one
item. See :func:`partition` for a version specialized for binary
use cases.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if key is None:
key = bool
if not callable(key):
raise TypeError('expected callable key function')
if value_transform is None:
value_transform = lambda x: x
if not callable(value_transform):
raise TypeError('expected callable value transform function')
ret = {}
for val in src:
key_of_val = key(val)
if key_filter is None or key_filter(key_of_val):
ret.setdefault(key_of_val, []).append(value_transform(val))
return ret
def partition(src, key=None):
"""No relation to :meth:`str.partition`, ``partition`` is like
:func:`bucketize`, but for added convenience returns a tuple of
``(truthy_values, falsy_values)``.
>>> nonempty, empty = partition(['', '', 'hi', '', 'bye'])
>>> nonempty
['hi', 'bye']
*key* defaults to :class:`bool`, but can be carefully overridden to
use any function that returns either ``True`` or ``False``.
>>> import string
>>> is_digit = lambda x: x in string.digits
>>> decimal_digits, hexletters = partition(string.hexdigits, is_digit)
>>> ''.join(decimal_digits), ''.join(hexletters)
('0123456789', 'abcdefABCDEF')
"""
bucketized = bucketize(src, key)
return bucketized.get(True, []), bucketized.get(False, [])
def unique(src, key=None):
"""``unique()`` returns a list of unique values, as determined by
*key*, in the order they first appeared in the input iterable,
*src*.
>>> ones_n_zeros = '11010110001010010101010'
>>> ''.join(unique(ones_n_zeros))
'10'
See :func:`unique_iter` docs for more details.
"""
return list(unique_iter(src, key))
def unique_iter(src, key=None):
"""Yield unique elements from the iterable, *src*, based on *key*,
in the order in which they first appeared in *src*.
>>> repetitious = [1, 2, 3] * 10
>>> list(unique_iter(repetitious))
[1, 2, 3]
By default, *key* is the object itself, but *key* can either be a
callable or, for convenience, a string name of the attribute on
which to uniqueify objects, falling back on identity when the
attribute is not present.
>>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes']
>>> list(unique_iter(pleasantries, key=lambda x: len(x)))
['hi', 'hello', 'bye']
"""
if not is_iterable(src):
raise TypeError('expected an iterable, not %r' % type(src))
if key is None:
key_func = lambda x: x
elif callable(key):
key_func = key
elif isinstance(key, basestring):
key_func = lambda x: getattr(x, key, x)
else:
raise TypeError('"key" expected a string or callable, not %r' % key)
seen = set()
for i in src:
k = key_func(i)
if k not in seen:
seen.add(k)
yield i
return
def redundant(src, key=None, groups=False):
"""The complement of :func:`unique()`.
By default returns non-unique values as a list of the *first*
redundant value in *src*. Pass ``groups=True`` to get groups of
all values with redundancies, ordered by position of the first
redundant value. This is useful in conjunction with some
normalizing *key* function.
>>> redundant([1, 2, 3, 4])
[]
>>> redundant([1, 2, 3, 2, 3, 3, 4])
[2, 3]
>>> redundant([1, 2, 3, 2, 3, 3, 4], groups=True)
[[2, 2], [3, 3, 3]]
An example using a *key* function to do case-insensitive
redundancy detection.
>>> redundant(['hi', 'Hi', 'HI', 'hello'], key=str.lower)
['Hi']
>>> redundant(['hi', 'Hi', 'HI', 'hello'], groups=True, key=str.lower)
[['hi', 'Hi', 'HI']]
*key* should also be used when the values in *src* are not hashable.
.. note::
This output of this function is designed for reporting
duplicates in contexts when a unique input is desired. Due to
the grouped return type, there is no streaming equivalent of
this function for the time being.
"""
if key is None:
pass
elif callable(key):
key_func = key
elif isinstance(key, basestring):
key_func = lambda x: getattr(x, key, x)
else:
raise TypeError('"key" expected a string or callable, not %r' % key)
seen = {} # key to first seen item
redundant_order = []
redundant_groups = {}
for i in src:
k = key_func(i) if key else i
if k not in seen:
seen[k] = i
else:
if k in redundant_groups:
if groups:
redundant_groups[k].append(i)
else:
redundant_order.append(k)
redundant_groups[k] = [seen[k], i]
if not groups:
ret = [redundant_groups[k][1] for k in redundant_order]
else:
ret = [redundant_groups[k] for k in redundant_order]
return ret
def one(src, default=None, key=None):
"""Along the same lines as builtins, :func:`all` and :func:`any`, and
similar to :func:`first`, ``one()`` returns the single object in
the given iterable *src* that evaluates to ``True``, as determined
by callable *key*. If unset, *key* defaults to :class:`bool`. If
no such objects are found, *default* is returned. If *default* is
not passed, ``None`` is returned.
If *src* has more than one object that evaluates to ``True``, or
if there is no object that fulfills such condition, return
*default*. It's like an `XOR`_ over an iterable.
>>> one((True, False, False))
True
>>> one((True, False, True))
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
>>> one((True, True), default=False)
False
>>> bool(one(('', 1)))
True
>>> one((10, 20, 30, 42), key=lambda i: i > 40)
42
See `<NAME>'s original repo`_ for further use cases.
.. _<NAME>'s original repo: https://github.com/mgaitan/one
.. _XOR: https://en.wikipedia.org/wiki/Exclusive_or
"""
ones = list(itertools.islice(filter(key, src), 2))
return ones[0] if len(ones) == 1 else default
def first(iterable, default=None, key=None):
"""Return first element of *iterable* that evaluates to ``True``, else
return ``None`` or optional *default*. Similar to :func:`one`.
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional *key* argument specifies a one-argument predicate function
like that used for *filter()*. The *key* argument, if supplied, should be
in keyword form. For example, finding the first even number in an iterable:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
Contributed by <NAME>, author of `the original standalone module`_.
.. _the original standalone module: https://github.com/hynek/first
"""
return next(filter(key, iterable), default)
def flatten_iter(iterable):
"""``flatten_iter()`` yields all the elements from *iterable* while
collapsing any nested iterables.
>>> nested = [[1, 2], [[3], [4, 5]]]
>>> list(flatten_iter(nested))
[1, 2, 3, 4, 5]
"""
for item in iterable:
if isinstance(item, Iterable) and not isinstance(item, basestring):
for subitem in flatten_iter(item):
yield subitem
else:
yield item
def flatten(iterable):
"""``flatten()`` returns a collapsed list of all the elements from
*iterable* while collapsing any nested iterables.
>>> nested = [[1, 2], [[3], [4, 5]]]
>>> flatten(nested)
[1, 2, 3, 4, 5]
"""
return list(flatten_iter(iterable))
def same(iterable, ref=_UNSET):
"""``same()`` returns ``True`` when all values in *iterable* are
equal to one another, or optionally a reference value,
*ref*. Similar to :func:`all` and :func:`any` in that it evaluates
an iterable and returns a :class:`bool`. ``same()`` returns
``True`` for empty iterables.
>>> same([])
True
>>> same([1])
True
>>> same(['a', 'a', 'a'])
True
>>> same(range(20))
False
>>> same([[], []])
True
>>> same([[], []], ref='test')
False
"""
iterator = iter(iterable)
if ref is _UNSET:
ref = next(iterator, ref)
return all(val == ref for val in iterator)
def default_visit(path, key, value):
# print('visit(%r, %r, %r)' % (path, key, value))
return key, value
# enable the extreme: monkeypatching iterutils with a different default_visit
_orig_default_visit = default_visit
def default_enter(path, key, value):
# print('enter(%r, %r)' % (key, value))
if isinstance(value, basestring):
return value, False
elif isinstance(value, Mapping):
return value.__class__(), ItemsView(value)
elif isinstance(value, Sequence):
return value.__class__(), enumerate(value)
elif isinstance(value, Set):
return value.__class__(), enumerate(value)
else:
# files, strings, other iterables, and scalars are not
# traversed
return value, False
def default_exit(path, key, old_parent, new_parent, new_items):
# print('exit(%r, %r, %r, %r, %r)'
# % (path, key, old_parent, new_parent, new_items))
ret = new_parent
if isinstance(new_parent, Mapping):
new_parent.update(new_items)
elif isinstance(new_parent, Sequence):
vals = [v for i, v in new_items]
try:
new_parent.extend(vals)
except AttributeError:
ret = new_parent.__class__(vals) # tuples
elif isinstance(new_parent, Set):
vals = [v for i, v in new_items]
try:
new_parent.update(vals)
except AttributeError:
ret = new_parent.__class__(vals) # frozensets
else:
raise RuntimeError('unexpected iterable type: %r' % type(new_parent))
return ret
def remap(root, visit=default_visit, enter=default_enter, exit=default_exit,
**kwargs):
"""The remap ("recursive map") function is used to traverse and
transform nested structures. Lists, tuples, sets, and dictionaries
are just a few of the data structures nested into heterogenous
tree-like structures that are so common in programming.
Unfortunately, Python's built-in ways to manipulate collections
are almost all flat. List comprehensions may be fast and succinct,
but they do not recurse, making it tedious to apply quick changes
or complex transforms to real-world data.
remap goes where list comprehensions cannot.
Here's an example of removing all Nones from some data:
>>> from pprint import pprint
>>> reviews = {'Star Trek': {'TNG': 10, 'DS9': 8.5, 'ENT': None},
... 'Babylon 5': 6, 'Dr. Who': None}
>>> pprint(remap(reviews, lambda p, k, v: v is not None))
{'Babylon 5': 6, 'Star Trek': {'DS9': 8.5, 'TNG': 10}}
Notice how both Nones have been removed despite the nesting in the
dictionary. Not bad for a one-liner, and that's just the beginning.
See `this remap cookbook`_ for more delicious recipes.
.. _this remap cookbook: http://sedimental.org/remap.html
remap takes four main arguments: the object to traverse and three
optional callables which determine how the remapped object will be
created.
Args:
root: The target object to traverse. By default, remap
supports iterables like :class:`list`, :class:`tuple`,
:class:`dict`, and :class:`set`, but any object traversable by
*enter* will work.
visit (callable): This function is called on every item in
*root*. It must accept three positional arguments, *path*,
*key*, and *value*. *path* is simply a tuple of parents'
keys. *visit* should return the new key-value pair. It may
also return ``True`` as shorthand to keep the old item
unmodified, or ``False`` to drop the item from the new
structure. *visit* is called after *enter*, on the new parent.
The *visit* function is called for every item in root,
including duplicate items. For traversable values, it is
called on the new parent object, after all its children
have been visited. The default visit behavior simply
returns the key-value pair unmodified.
enter (callable): This function controls which items in *root*
are traversed. It accepts the same arguments as *visit*: the
path, the key, and the value of the current item. It returns a
pair of the blank new parent, and an iterator over the items
which should be visited. If ``False`` is returned instead of
an iterator, the value will not be traversed.
The *enter* function is only called once per unique value. The
default enter behavior support mappings, sequences, and
sets. Strings and all other iterables will not be traversed.
exit (callable): This function determines how to handle items
once they have been visited. It gets the same three
arguments as the other functions -- *path*, *key*, *value*
-- plus two more: the blank new parent object returned
from *enter*, and a list of the new items, as remapped by
*visit*.
Like *enter*, the *exit* function is only called once per
unique value. The default exit behavior is to simply add
all new items to the new parent, e.g., using
:meth:`list.extend` and :meth:`dict.update` to add to the
new parent. Immutable objects, such as a :class:`tuple` or
:class:`namedtuple`, must be recreated from scratch, but
use the same type as the new parent passed back from the
*enter* function.
reraise_visit (bool): A pragmatic convenience for the *visit*
callable. When set to ``False``, remap ignores any errors
raised by the *visit* callback. Items causing exceptions
are kept. See examples for more details.
remap is designed to cover the majority of cases with just the
*visit* callable. While passing in multiple callables is very
empowering, remap is designed so very few cases should require
passing more than one function.
When passing *enter* and *exit*, it's common and easiest to build
on the default behavior. Simply add ``from boltons.iterutils import
default_enter`` (or ``default_exit``), and have your enter/exit
function call the default behavior before or after your custom
logic. See `this example`_.
Duplicate and self-referential objects (aka reference loops) are
automatically handled internally, `as shown here`_.
.. _this example: http://sedimental.org/remap.html#sort_all_lists
.. _as shown here: http://sedimental.org/remap.html#corner_cases
"""
# TODO: improve argument formatting in sphinx doc
# TODO: enter() return (False, items) to continue traverse but cancel copy?
if not callable(visit):
raise TypeError('visit expected callable, not: %r' % visit)
if not callable(enter):
raise TypeError('enter expected callable, not: %r' % enter)
if not callable(exit):
raise TypeError('exit expected callable, not: %r' % exit)
reraise_visit = kwargs.pop('reraise_visit', True)
if kwargs:
raise TypeError('unexpected keyword arguments: %r' % kwargs.keys())
path, registry, stack = (), {}, [(None, root)]
new_items_stack = []
while stack:
key, value = stack.pop()
id_value = id(value)
if key is _REMAP_EXIT:
key, new_parent, old_parent = value
id_value = id(old_parent)
path, new_items = new_items_stack.pop()
value = exit(path, key, old_parent, new_parent, new_items)
registry[id_value] = value
if not new_items_stack:
continue
elif id_value in registry:
value = registry[id_value]
else:
res = enter(path, key, value)
try:
new_parent, new_items = res
except TypeError:
# TODO: handle False?
raise TypeError('enter should return a tuple of (new_parent,'
' items_iterator), not: %r' % res)
if new_items is not False:
# traverse unless False is explicitly passed
registry[id_value] = new_parent
new_items_stack.append((path, []))
if value is not root:
path += (key,)
stack.append((_REMAP_EXIT, (key, new_parent, value)))
if new_items:
stack.extend(reversed(list(new_items)))
continue
if visit is _orig_default_visit:
# avoid function call overhead by inlining identity operation
visited_item = (key, value)
else:
try:
visited_item = visit(path, key, value)
except Exception:
if reraise_visit:
raise
visited_item = True
if visited_item is False:
continue # drop
elif visited_item is True:
visited_item = (key, value)
# TODO: typecheck?
# raise TypeError('expected (key, value) from visit(),'
# ' not: %r' % visited_item)
try:
new_items_stack[-1][1].append(visited_item)
except IndexError:
raise TypeError('expected remappable root, not: %r' % root)
return value
class PathAccessError(KeyError, IndexError, TypeError):
"""An amalgamation of KeyError, IndexError, and TypeError,
representing what can occur when looking up a path in a nested
object.
"""
def __init__(self, exc, seg, path):
self.exc = exc
self.seg = seg
self.path = path
def __repr__(self):
cn = self.__class__.__name__
return '%s(%r, %r, %r)' % (cn, self.exc, self.seg, self.path)
def __str__(self):
return ('could not access %r from path %r, got error: %r'
% (self.seg, self.path, self.exc))
def get_path(root, path, default=_UNSET):
"""Retrieve a value from a nested object via a tuple representing the
lookup path.
>>> root = {'a': {'b': {'c': [[1], [2], [3]]}}}
>>> get_path(root, ('a', 'b', 'c', 2, 0))
3
The path format is intentionally consistent with that of
:func:`remap`.
One of get_path's chief aims is improved error messaging. EAFP is
great, but the error messages are not.
For instance, ``root['a']['b']['c'][2][1]`` gives back
``IndexError: list index out of range``
What went out of range where? get_path currently raises
``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2,
1), got error: IndexError('list index out of range',)``, a
subclass of IndexError and KeyError.
You can also pass a default that covers the entire operation,
should the lookup fail at any level.
Args:
root: The target nesting of dictionaries, lists, or other
objects supporting ``__getitem__``.
path (tuple): A list of strings and integers to be successively
looked up within *root*.
default: The value to be returned should any
``PathAccessError`` exceptions be raised.
"""
if isinstance(path, basestring):
path = path.split('.')
cur = root
try:
for seg in path:
try:
cur = cur[seg]
except (KeyError, IndexError) as exc:
raise PathAccessError(exc, seg, path)
except TypeError as exc:
# either string index in a list, or a parent that
# doesn't support indexing
try:
seg = int(seg)
cur = cur[seg]
except (ValueError, KeyError, IndexError, TypeError):
if not is_iterable(cur):
exc = TypeError('%r object is not indexable'
% type(cur).__name__)
raise PathAccessError(exc, seg, path)
except PathAccessError:
if default is _UNSET:
raise
return default
return cur
def research(root, query=lambda p, k, v: True, reraise=False):
"""The :func:`research` function uses :func:`remap` to recurse over
any data nested in *root*, and find values which match a given
criterion, specified by the *query* callable.
Results are returned as a list of ``(path, value)`` pairs. The
paths are tuples in the same format accepted by
:func:`get_path`. This can be useful for comparing values nested
in two or more different structures.
Here's a simple example that finds all integers:
>>> root = {'a': {'b': 1, 'c': (2, 'd', 3)}, 'e': None}
>>> res = research(root, query=lambda p, k, v: isinstance(v, int))
>>> print(sorted(res))
[(('a', 'b'), 1), (('a', 'c', 0), 2), (('a', 'c', 2), 3)]
Note how *query* follows the same, familiar ``path, key, value``
signature as the ``visit`` and ``enter`` functions on
:func:`remap`, and returns a :class:`bool`.
Args:
root: The target object to search. Supports the same types of
objects as :func:`remap`, including :class:`list`,
:class:`tuple`, :class:`dict`, and :class:`set`.
query (callable): The function called on every object to
determine whether to include it in the search results. The
callable must accept three arguments, *path*, *key*, and
*value*, commonly abbreviated *p*, *k*, and *v*, same as
*enter* and *visit* from :func:`remap`.
reraise (bool): Whether to reraise exceptions raised by *query*
or to simply drop the result that caused the error.
With :func:`research` it's easy to inspect the details of a data
structure, like finding values that are at a certain depth (using
``len(p)``) and much more. If more advanced functionality is
needed, check out the code and make your own :func:`remap`
wrapper, and consider `submitting a patch`_!
.. _submitting a patch: https://github.com/mahmoud/boltons/pulls
"""
ret = []
if not callable(query):
raise TypeError('query expected callable, not: %r' % query)
def enter(path, key, value):
try:
if query(path, key, value):
ret.append((path + (key,), value))
except Exception:
if reraise:
raise
return default_enter(path, key, value)
remap(root, enter=enter)
return ret
# TODO: recollect()
# TODO: refilter()
# TODO: reiter()
# GUID iterators: 10x faster and somewhat more compact than uuid.
class GUIDerator(object):
"""The GUIDerator is an iterator that yields a globally-unique
identifier (GUID) on every iteration. The GUIDs produced are
hexadecimal strings.
Testing shows it to be around 12x faster than the uuid module. By
default it is also more compact, partly due to its default 96-bit
(24-hexdigit) length. 96 bits of randomness means that there is a
1 in 2 ^ 32 chance of collision after 2 ^ 64 iterations. If more
or less uniqueness is desired, the *size* argument can be adjusted
accordingly.
Args:
size (int): character length of the GUID, defaults to 24. Lengths
between 20 and 36 are considered valid.
The GUIDerator has built-in fork protection that causes it to
detect a fork on next iteration and reseed accordingly.
"""
def __init__(self, size=24):
self.size = size
if size < 20 or size > 36:
raise ValueError('expected 20 < size <= 36')
self.count = itertools.count()
self.reseed()
def reseed(self):
self.pid = os.getpid()
self.salt = '-'.join([str(self.pid),
socket.gethostname() or b'<nohostname>',
str(time.time()),
codecs.encode(os.urandom(6),
'hex_codec').decode('ascii')])
# that codecs trick is the best/only way to get a bytes to
# hexbytes in py2/3
return
def __iter__(self):
return self
if _IS_PY3:
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
target_bytes = (self.salt + str(next(self.count))).encode('utf8')
hash_text = hashlib.sha1(target_bytes).hexdigest()[:self.size]
return hash_text
else:
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
return hashlib.sha1(self.salt +
str(next(self.count))).hexdigest()[:self.size]
next = __next__
class SequentialGUIDerator(GUIDerator):
"""Much like the standard GUIDerator, the SequentialGUIDerator is an
iterator that yields a globally-unique identifier (GUID) on every
iteration. The GUIDs produced are hexadecimal strings.
The SequentialGUIDerator differs in that it picks a starting GUID
value and increments every iteration. This yields GUIDs which are
of course unique, but also ordered and lexicographically sortable.
The SequentialGUIDerator is around 50% faster than the normal
GUIDerator, making it almost 20x as fast as the built-in uuid
module. By default it is also more compact, partly due to its
96-bit (24-hexdigit) default length. 96 bits of randomness means that
there is a 1 in 2 ^ 32 chance of collision after 2 ^ 64
iterations. If more or less uniqueness is desired, the *size*
argument can be adjusted accordingly.
Args:
size (int): character length of the GUID, defaults to 24.
Note that with SequentialGUIDerator there is a chance of GUIDs
growing larger than the size configured. The SequentialGUIDerator
has built-in fork protection that causes it to detect a fork on
next iteration and reseed accordingly.
"""
if _IS_PY3:
def reseed(self):
super(SequentialGUIDerator, self).reseed()
start_str = hashlib.sha1(self.salt.encode('utf8')).hexdigest()
self.start = int(start_str[:self.size], 16)
self.start |= (1 << ((self.size * 4) - 2))
else:
def reseed(self):
super(SequentialGUIDerator, self).reseed()
start_str = hashlib.sha1(self.salt).hexdigest()
self.start = int(start_str[:self.size], 16)
self.start |= (1 << ((self.size * 4) - 2))
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
return '%x' % (next(self.count) + self.start)
next = __next__
guid_iter = GUIDerator()
seq_guid_iter = SequentialGUIDerator()
def soft_sorted(iterable, first=None, last=None, key=None, reverse=False):
"""For when you care about the order of some elements, but not about
others.
Use this to float to the top and/or sink to the bottom a specific
ordering, while sorting the rest of the elements according to
normal :func:`sorted` rules.
>>> soft_sorted(['two', 'b', 'one', 'a'], first=['one', 'two'])
['one', 'two', 'a', 'b']
>>> soft_sorted(range(7), first=[6, 15], last=[2, 4], reverse=True)
[6, 5, 3, 1, 0, 2, 4]
>>> import string
>>> ''.join(soft_sorted(string.hexdigits, first='za1', last='b', key=str.lower))
'aA1023456789cCdDeEfFbB'
Args:
iterable (list): A list or other iterable to sort.
first (list): A sequence to enforce for elements which should
appear at the beginning of the returned list.
last (list): A sequence to enforce for elements which should
appear at the end of the returned list.
key (callable): Callable used to generate a comparable key for
each item to be sorted, same as the key in
:func:`sorted`. Note that entries in *first* and *last*
should be the keys for the items. Defaults to
passthrough/the identity function.
reverse (bool): Whether or not elements not explicitly ordered
by *first* and *last* should be in reverse order or not.
Returns a new list in sorted order.
"""
first = first or []
last = last or []
key = key or (lambda x: x)
seq = list(iterable)
other = [x for x in seq if not ((first and key(x) in first) or (last and key(x) in last))]
other.sort(key=key, reverse=reverse)
if first:
first = sorted([x for x in seq if key(x) in first], key=lambda x: first.index(key(x)))
if last:
last = sorted([x for x in seq if key(x) in last], key=lambda x: last.index(key(x)))
return first + other + last
"""
May actually be faster to do an isinstance check for a str path
$ python -m timeit -s "x = [1]" "x[0]"
10000000 loops, best of 3: 0.0207 usec per loop
$ python -m timeit -s "x = [1]" "try: x[0] \nexcept: pass"
10000000 loops, best of 3: 0.029 usec per loop
$ python -m timeit -s "x = [1]" "try: x[1] \nexcept: pass"
1000000 loops, best of 3: 0.315 usec per loop
# setting up try/except is fast, only around 0.01us
# actually triggering the exception takes almost 10x as long
$ python -m timeit -s "x = [1]" "isinstance(x, basestring)"
10000000 loops, best of 3: 0.141 usec per loop
$ python -m timeit -s "x = [1]" "isinstance(x, str)"
10000000 loops, best of 3: 0.131 usec per loop
$ python -m timeit -s "x = [1]" "try: x.split('.')\n except: pass"
1000000 loops, best of 3: 0.443 usec per loop
$ python -m timeit -s "x = [1]" "try: x.split('.') \nexcept AttributeError: pass"
1000000 loops, best of 3: 0.544 usec per loop
"""
|
# -*- coding: utf-8 -*-
""":mod:`itertools` is full of great examples of Python generator
usage. However, there are still some critical gaps. ``iterutils``
fills many of those gaps with featureful, tested, and Pythonic
solutions.
Many of the functions below have two versions, one which
returns an iterator (denoted by the ``*_iter`` naming pattern), and a
shorter-named convenience form that returns a list. Some of the
following are based on examples in itertools docs.
"""
import os
import math
import time
import codecs
import random
import socket
import hashlib
import itertools
try:
from collections.abc import Mapping, Sequence, Set, ItemsView, Iterable
except ImportError:
from collections import Mapping, Sequence, Set, ItemsView, Iterable
try:
from typeutils import make_sentinel
_UNSET = make_sentinel('_UNSET')
_REMAP_EXIT = make_sentinel('_REMAP_EXIT')
except ImportError:
_REMAP_EXIT = object()
_UNSET = object()
try:
from future_builtins import filter
from itertools import izip
_IS_PY3 = False
except ImportError:
# Python 3 compat
_IS_PY3 = True
basestring = (str, bytes)
izip, xrange = zip, range
def is_iterable(obj):
"""Similar in nature to :func:`callable`, ``is_iterable`` returns
``True`` if an object is `iterable`_, ``False`` if not.
>>> is_iterable([])
True
>>> is_iterable(object())
False
.. _iterable: https://docs.python.org/2/glossary.html#term-iterable
"""
try:
iter(obj)
except TypeError:
return False
return True
def is_scalar(obj):
"""A near-mirror of :func:`is_iterable`. Returns ``False`` if an
object is an iterable container type. Strings are considered
scalar as well, because strings are more often treated as whole
values as opposed to iterables of 1-character substrings.
>>> is_scalar(object())
True
>>> is_scalar(range(10))
False
>>> is_scalar('hello')
True
"""
return not is_iterable(obj) or isinstance(obj, basestring)
def is_collection(obj):
"""The opposite of :func:`is_scalar`. Returns ``True`` if an object
is an iterable other than a string.
>>> is_collection(object())
False
>>> is_collection(range(10))
True
>>> is_collection('hello')
False
"""
return is_iterable(obj) and not isinstance(obj, basestring)
def split(src, sep=None, maxsplit=None):
"""Splits an iterable based on a separator. Like :meth:`str.split`,
but for all iterables. Returns a list of lists.
>>> split(['hi', 'hello', None, None, 'sup', None, 'soap', None])
[['hi', 'hello'], ['sup'], ['soap']]
See :func:`split_iter` docs for more info.
"""
return list(split_iter(src, sep, maxsplit))
def split_iter(src, sep=None, maxsplit=None):
"""Splits an iterable based on a separator, *sep*, a max of
*maxsplit* times (no max by default). *sep* can be:
* a single value
* an iterable of separators
* a single-argument callable that returns True when a separator is
encountered
``split_iter()`` yields lists of non-separator values. A separator will
never appear in the output.
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None, 'soap', None]))
[['hi', 'hello'], ['sup'], ['soap']]
Note that ``split_iter`` is based on :func:`str.split`, so if
*sep* is ``None``, ``split()`` **groups** separators. If empty lists
are desired between two contiguous ``None`` values, simply use
``sep=[None]``:
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None]))
[['hi', 'hello'], ['sup']]
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None], sep=[None]))
[['hi', 'hello'], [], ['sup'], []]
Using a callable separator:
>>> falsy_sep = lambda x: not x
>>> list(split_iter(['hi', 'hello', None, '', 'sup', False], falsy_sep))
[['hi', 'hello'], [], ['sup'], []]
See :func:`split` for a list-returning version.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if maxsplit is not None:
maxsplit = int(maxsplit)
if maxsplit == 0:
yield [src]
return
if callable(sep):
sep_func = sep
elif not is_scalar(sep):
sep = frozenset(sep)
sep_func = lambda x: x in sep
else:
sep_func = lambda x: x == sep
cur_group = []
split_count = 0
for s in src:
if maxsplit is not None and split_count >= maxsplit:
sep_func = lambda x: False
if sep_func(s):
if sep is None and not cur_group:
# If sep is none, str.split() "groups" separators
# check the str.split() docs for more info
continue
split_count += 1
yield cur_group
cur_group = []
else:
cur_group.append(s)
if cur_group or sep is not None:
yield cur_group
return
def chunked(src, size, count=None, **kw):
"""Returns a list of *count* chunks, each with *size* elements,
generated from iterable *src*. If *src* is not evenly divisible by
*size*, the final chunk will have fewer than *size* elements.
Provide the *fill* keyword argument to provide a pad value and
enable padding, otherwise no padding will take place.
>>> chunked(range(10), 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> chunked(range(10), 3, fill=None)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
>>> chunked(range(10), 3, count=2)
[[0, 1, 2], [3, 4, 5]]
See :func:`chunked_iter` for more info.
"""
chunk_iter = chunked_iter(src, size, **kw)
if count is None:
return list(chunk_iter)
else:
return list(itertools.islice(chunk_iter, count))
def chunked_iter(src, size, **kw):
"""Generates *size*-sized chunks from *src* iterable. Unless the
optional *fill* keyword argument is provided, iterables not even
divisible by *size* will have a final chunk that is smaller than
*size*.
>>> list(chunked_iter(range(10), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(chunked_iter(range(10), 3, fill=None))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
Note that ``fill=None`` in fact uses ``None`` as the fill value.
"""
# TODO: add count kwarg?
if not is_iterable(src):
raise TypeError('expected an iterable')
size = int(size)
if size <= 0:
raise ValueError('expected a positive integer chunk size')
do_fill = True
try:
fill_val = kw.pop('fill')
except KeyError:
do_fill = False
fill_val = None
if kw:
raise ValueError('got unexpected keyword arguments: %r' % kw.keys())
if not src:
return
postprocess = lambda chk: chk
if isinstance(src, basestring):
postprocess = lambda chk, _sep=type(src)(): _sep.join(chk)
src_iter = iter(src)
while True:
cur_chunk = list(itertools.islice(src_iter, size))
if not cur_chunk:
break
lc = len(cur_chunk)
if lc < size and do_fill:
cur_chunk[lc:] = [fill_val] * (size - lc)
yield postprocess(cur_chunk)
return
def pairwise(src):
"""Convenience function for calling :func:`windowed` on *src*, with
*size* set to 2.
>>> pairwise(range(5))
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> pairwise([])
[]
The number of pairs is always one less than the number of elements
in the iterable passed in, except on empty inputs, which returns
an empty list.
"""
return windowed(src, 2)
def pairwise_iter(src):
"""Convenience function for calling :func:`windowed_iter` on *src*,
with *size* set to 2.
>>> list(pairwise_iter(range(5)))
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> list(pairwise_iter([]))
[]
The number of pairs is always one less than the number of elements
in the iterable passed in, or zero, when *src* is empty.
"""
return windowed_iter(src, 2)
def windowed(src, size):
"""Returns tuples with exactly length *size*. If the iterable is
too short to make a window of length *size*, no tuples are
returned. See :func:`windowed_iter` for more.
"""
return list(windowed_iter(src, size))
def windowed_iter(src, size):
"""Returns tuples with length *size* which represent a sliding
window over iterable *src*.
>>> list(windowed_iter(range(7), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)]
If the iterable is too short to make a window of length *size*,
then no window tuples are returned.
>>> list(windowed_iter(range(3), 5))
[]
"""
# TODO: lists? (for consistency)
tees = itertools.tee(src, size)
try:
for i, t in enumerate(tees):
for _ in xrange(i):
next(t)
except StopIteration:
return izip([])
return izip(*tees)
def xfrange(stop, start=None, step=1.0):
"""Same as :func:`frange`, but generator-based instead of returning a
list.
>>> tuple(xfrange(1, 3, step=0.75))
(1.0, 1.75, 2.5)
See :func:`frange` for more details.
"""
if not step:
raise ValueError('step must be non-zero')
if start is None:
start, stop = 0.0, stop * 1.0
else:
# swap when all args are used
stop, start = start * 1.0, stop * 1.0
cur = start
while cur < stop:
yield cur
cur += step
def frange(stop, start=None, step=1.0):
"""A :func:`range` clone for float-based ranges.
>>> frange(5)
[0.0, 1.0, 2.0, 3.0, 4.0]
>>> frange(6, step=1.25)
[0.0, 1.25, 2.5, 3.75, 5.0]
>>> frange(100.5, 101.5, 0.25)
[100.5, 100.75, 101.0, 101.25]
>>> frange(5, 0)
[]
>>> frange(5, 0, step=-1.25)
[5.0, 3.75, 2.5, 1.25]
"""
if not step:
raise ValueError('step must be non-zero')
if start is None:
start, stop = 0.0, stop * 1.0
else:
# swap when all args are used
stop, start = start * 1.0, stop * 1.0
count = int(math.ceil((stop - start) / step))
ret = [None] * count
if not ret:
return ret
ret[0] = start
for i in xrange(1, count):
ret[i] = ret[i - 1] + step
return ret
def backoff(start, stop, count=None, factor=2.0, jitter=False):
"""Returns a list of geometrically-increasing floating-point numbers,
suitable for usage with `exponential backoff`_. Exactly like
:func:`backoff_iter`, but without the ``'repeat'`` option for
*count*. See :func:`backoff_iter` for more details.
.. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff
>>> backoff(1, 10)
[1.0, 2.0, 4.0, 8.0, 10.0]
"""
if count == 'repeat':
raise ValueError("'repeat' supported in backoff_iter, not backoff")
return list(backoff_iter(start, stop, count=count,
factor=factor, jitter=jitter))
def backoff_iter(start, stop, count=None, factor=2.0, jitter=False):
"""Generates a sequence of geometrically-increasing floats, suitable
for usage with `exponential backoff`_. Starts with *start*,
increasing by *factor* until *stop* is reached, optionally
stopping iteration once *count* numbers are yielded. *factor*
defaults to 2. In general retrying with properly-configured
backoff creates a better-behaved component for a larger service
ecosystem.
.. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff
>>> list(backoff_iter(1.0, 10.0, count=5))
[1.0, 2.0, 4.0, 8.0, 10.0]
>>> list(backoff_iter(1.0, 10.0, count=8))
[1.0, 2.0, 4.0, 8.0, 10.0, 10.0, 10.0, 10.0]
>>> list(backoff_iter(0.25, 100.0, factor=10))
[0.25, 2.5, 25.0, 100.0]
A simplified usage example:
.. code-block:: python
for timeout in backoff_iter(0.25, 5.0):
try:
res = network_call()
break
except Exception as e:
log(e)
time.sleep(timeout)
An enhancement for large-scale systems would be to add variation,
or *jitter*, to timeout values. This is done to avoid a thundering
herd on the receiving end of the network call.
Finally, for *count*, the special value ``'repeat'`` can be passed to
continue yielding indefinitely.
Args:
start (float): Positive number for baseline.
stop (float): Positive number for maximum.
count (int): Number of steps before stopping
iteration. Defaults to the number of steps between *start* and
*stop*. Pass the string, `'repeat'`, to continue iteration
indefinitely.
factor (float): Rate of exponential increase. Defaults to `2.0`,
e.g., `[1, 2, 4, 8, 16]`.
jitter (float): A factor between `-1.0` and `1.0`, used to
uniformly randomize and thus spread out timeouts in a distributed
system, avoiding rhythm effects. Positive values use the base
backoff curve as a maximum, negative values use the curve as a
minimum. Set to 1.0 or `True` for a jitter approximating
Ethernet's time-tested backoff solution. Defaults to `False`.
"""
start = float(start)
stop = float(stop)
factor = float(factor)
if start < 0.0:
raise ValueError('expected start >= 0, not %r' % start)
if factor < 1.0:
raise ValueError('expected factor >= 1.0, not %r' % factor)
if stop == 0.0:
raise ValueError('expected stop >= 0')
if stop < start:
raise ValueError('expected stop >= start, not %r' % stop)
if count is None:
denom = start if start else 1
count = 1 + math.ceil(math.log(stop/denom, factor))
count = count if start else count + 1
if count != 'repeat' and count < 0:
raise ValueError('count must be positive or "repeat", not %r' % count)
if jitter:
jitter = float(jitter)
if not (-1.0 <= jitter <= 1.0):
raise ValueError('expected jitter -1 <= j <= 1, not: %r' % jitter)
cur, i = start, 0
while count == 'repeat' or i < count:
if not jitter:
cur_ret = cur
elif jitter:
cur_ret = cur - (cur * jitter * random.random())
yield cur_ret
i += 1
if cur == 0:
cur = 1
elif cur < stop:
cur *= factor
if cur > stop:
cur = stop
return
def bucketize(src, key=None, value_transform=None, key_filter=None):
"""Group values in the *src* iterable by the value returned by *key*,
which defaults to :class:`bool`, grouping values by truthiness.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
Bucketize into more than 3 groups
>>> bucketize(range(10), lambda x: x % 3)
{0: [0, 3, 6, 9], 1: [1, 4, 7], 2: [2, 5, 8]}
``bucketize`` has a couple of advanced options useful in certain
cases. *value_transform* can be used to modify values as they are
added to buckets, and *key_filter* will allow excluding certain
buckets from being collected.
>>> bucketize(range(5), value_transform=lambda x: x*x)
{False: [0], True: [1, 4, 9, 16]}
>>> bucketize(range(10), key=lambda x: x % 3, key_filter=lambda k: k % 3 != 1)
{0: [0, 3, 6, 9], 2: [2, 5, 8]}
Note in some of these examples there were at most two keys, ``True`` and
``False``, and each key present has a list with at least one
item. See :func:`partition` for a version specialized for binary
use cases.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if key is None:
key = bool
if not callable(key):
raise TypeError('expected callable key function')
if value_transform is None:
value_transform = lambda x: x
if not callable(value_transform):
raise TypeError('expected callable value transform function')
ret = {}
for val in src:
key_of_val = key(val)
if key_filter is None or key_filter(key_of_val):
ret.setdefault(key_of_val, []).append(value_transform(val))
return ret
def partition(src, key=None):
"""No relation to :meth:`str.partition`, ``partition`` is like
:func:`bucketize`, but for added convenience returns a tuple of
``(truthy_values, falsy_values)``.
>>> nonempty, empty = partition(['', '', 'hi', '', 'bye'])
>>> nonempty
['hi', 'bye']
*key* defaults to :class:`bool`, but can be carefully overridden to
use any function that returns either ``True`` or ``False``.
>>> import string
>>> is_digit = lambda x: x in string.digits
>>> decimal_digits, hexletters = partition(string.hexdigits, is_digit)
>>> ''.join(decimal_digits), ''.join(hexletters)
('0123456789', 'abcdefABCDEF')
"""
bucketized = bucketize(src, key)
return bucketized.get(True, []), bucketized.get(False, [])
def unique(src, key=None):
"""``unique()`` returns a list of unique values, as determined by
*key*, in the order they first appeared in the input iterable,
*src*.
>>> ones_n_zeros = '11010110001010010101010'
>>> ''.join(unique(ones_n_zeros))
'10'
See :func:`unique_iter` docs for more details.
"""
return list(unique_iter(src, key))
def unique_iter(src, key=None):
"""Yield unique elements from the iterable, *src*, based on *key*,
in the order in which they first appeared in *src*.
>>> repetitious = [1, 2, 3] * 10
>>> list(unique_iter(repetitious))
[1, 2, 3]
By default, *key* is the object itself, but *key* can either be a
callable or, for convenience, a string name of the attribute on
which to uniqueify objects, falling back on identity when the
attribute is not present.
>>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes']
>>> list(unique_iter(pleasantries, key=lambda x: len(x)))
['hi', 'hello', 'bye']
"""
if not is_iterable(src):
raise TypeError('expected an iterable, not %r' % type(src))
if key is None:
key_func = lambda x: x
elif callable(key):
key_func = key
elif isinstance(key, basestring):
key_func = lambda x: getattr(x, key, x)
else:
raise TypeError('"key" expected a string or callable, not %r' % key)
seen = set()
for i in src:
k = key_func(i)
if k not in seen:
seen.add(k)
yield i
return
def redundant(src, key=None, groups=False):
"""The complement of :func:`unique()`.
By default returns non-unique values as a list of the *first*
redundant value in *src*. Pass ``groups=True`` to get groups of
all values with redundancies, ordered by position of the first
redundant value. This is useful in conjunction with some
normalizing *key* function.
>>> redundant([1, 2, 3, 4])
[]
>>> redundant([1, 2, 3, 2, 3, 3, 4])
[2, 3]
>>> redundant([1, 2, 3, 2, 3, 3, 4], groups=True)
[[2, 2], [3, 3, 3]]
An example using a *key* function to do case-insensitive
redundancy detection.
>>> redundant(['hi', 'Hi', 'HI', 'hello'], key=str.lower)
['Hi']
>>> redundant(['hi', 'Hi', 'HI', 'hello'], groups=True, key=str.lower)
[['hi', 'Hi', 'HI']]
*key* should also be used when the values in *src* are not hashable.
.. note::
This output of this function is designed for reporting
duplicates in contexts when a unique input is desired. Due to
the grouped return type, there is no streaming equivalent of
this function for the time being.
"""
if key is None:
pass
elif callable(key):
key_func = key
elif isinstance(key, basestring):
key_func = lambda x: getattr(x, key, x)
else:
raise TypeError('"key" expected a string or callable, not %r' % key)
seen = {} # key to first seen item
redundant_order = []
redundant_groups = {}
for i in src:
k = key_func(i) if key else i
if k not in seen:
seen[k] = i
else:
if k in redundant_groups:
if groups:
redundant_groups[k].append(i)
else:
redundant_order.append(k)
redundant_groups[k] = [seen[k], i]
if not groups:
ret = [redundant_groups[k][1] for k in redundant_order]
else:
ret = [redundant_groups[k] for k in redundant_order]
return ret
def one(src, default=None, key=None):
"""Along the same lines as builtins, :func:`all` and :func:`any`, and
similar to :func:`first`, ``one()`` returns the single object in
the given iterable *src* that evaluates to ``True``, as determined
by callable *key*. If unset, *key* defaults to :class:`bool`. If
no such objects are found, *default* is returned. If *default* is
not passed, ``None`` is returned.
If *src* has more than one object that evaluates to ``True``, or
if there is no object that fulfills such condition, return
*default*. It's like an `XOR`_ over an iterable.
>>> one((True, False, False))
True
>>> one((True, False, True))
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
>>> one((True, True), default=False)
False
>>> bool(one(('', 1)))
True
>>> one((10, 20, 30, 42), key=lambda i: i > 40)
42
See `<NAME>'s original repo`_ for further use cases.
.. _<NAME>'s original repo: https://github.com/mgaitan/one
.. _XOR: https://en.wikipedia.org/wiki/Exclusive_or
"""
ones = list(itertools.islice(filter(key, src), 2))
return ones[0] if len(ones) == 1 else default
def first(iterable, default=None, key=None):
"""Return first element of *iterable* that evaluates to ``True``, else
return ``None`` or optional *default*. Similar to :func:`one`.
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional *key* argument specifies a one-argument predicate function
like that used for *filter()*. The *key* argument, if supplied, should be
in keyword form. For example, finding the first even number in an iterable:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
Contributed by <NAME>, author of `the original standalone module`_.
.. _the original standalone module: https://github.com/hynek/first
"""
return next(filter(key, iterable), default)
def flatten_iter(iterable):
"""``flatten_iter()`` yields all the elements from *iterable* while
collapsing any nested iterables.
>>> nested = [[1, 2], [[3], [4, 5]]]
>>> list(flatten_iter(nested))
[1, 2, 3, 4, 5]
"""
for item in iterable:
if isinstance(item, Iterable) and not isinstance(item, basestring):
for subitem in flatten_iter(item):
yield subitem
else:
yield item
def flatten(iterable):
"""``flatten()`` returns a collapsed list of all the elements from
*iterable* while collapsing any nested iterables.
>>> nested = [[1, 2], [[3], [4, 5]]]
>>> flatten(nested)
[1, 2, 3, 4, 5]
"""
return list(flatten_iter(iterable))
def same(iterable, ref=_UNSET):
"""``same()`` returns ``True`` when all values in *iterable* are
equal to one another, or optionally a reference value,
*ref*. Similar to :func:`all` and :func:`any` in that it evaluates
an iterable and returns a :class:`bool`. ``same()`` returns
``True`` for empty iterables.
>>> same([])
True
>>> same([1])
True
>>> same(['a', 'a', 'a'])
True
>>> same(range(20))
False
>>> same([[], []])
True
>>> same([[], []], ref='test')
False
"""
iterator = iter(iterable)
if ref is _UNSET:
ref = next(iterator, ref)
return all(val == ref for val in iterator)
def default_visit(path, key, value):
# print('visit(%r, %r, %r)' % (path, key, value))
return key, value
# enable the extreme: monkeypatching iterutils with a different default_visit
_orig_default_visit = default_visit
def default_enter(path, key, value):
# print('enter(%r, %r)' % (key, value))
if isinstance(value, basestring):
return value, False
elif isinstance(value, Mapping):
return value.__class__(), ItemsView(value)
elif isinstance(value, Sequence):
return value.__class__(), enumerate(value)
elif isinstance(value, Set):
return value.__class__(), enumerate(value)
else:
# files, strings, other iterables, and scalars are not
# traversed
return value, False
def default_exit(path, key, old_parent, new_parent, new_items):
# print('exit(%r, %r, %r, %r, %r)'
# % (path, key, old_parent, new_parent, new_items))
ret = new_parent
if isinstance(new_parent, Mapping):
new_parent.update(new_items)
elif isinstance(new_parent, Sequence):
vals = [v for i, v in new_items]
try:
new_parent.extend(vals)
except AttributeError:
ret = new_parent.__class__(vals) # tuples
elif isinstance(new_parent, Set):
vals = [v for i, v in new_items]
try:
new_parent.update(vals)
except AttributeError:
ret = new_parent.__class__(vals) # frozensets
else:
raise RuntimeError('unexpected iterable type: %r' % type(new_parent))
return ret
def remap(root, visit=default_visit, enter=default_enter, exit=default_exit,
**kwargs):
"""The remap ("recursive map") function is used to traverse and
transform nested structures. Lists, tuples, sets, and dictionaries
are just a few of the data structures nested into heterogenous
tree-like structures that are so common in programming.
Unfortunately, Python's built-in ways to manipulate collections
are almost all flat. List comprehensions may be fast and succinct,
but they do not recurse, making it tedious to apply quick changes
or complex transforms to real-world data.
remap goes where list comprehensions cannot.
Here's an example of removing all Nones from some data:
>>> from pprint import pprint
>>> reviews = {'Star Trek': {'TNG': 10, 'DS9': 8.5, 'ENT': None},
... 'Babylon 5': 6, 'Dr. Who': None}
>>> pprint(remap(reviews, lambda p, k, v: v is not None))
{'Babylon 5': 6, 'Star Trek': {'DS9': 8.5, 'TNG': 10}}
Notice how both Nones have been removed despite the nesting in the
dictionary. Not bad for a one-liner, and that's just the beginning.
See `this remap cookbook`_ for more delicious recipes.
.. _this remap cookbook: http://sedimental.org/remap.html
remap takes four main arguments: the object to traverse and three
optional callables which determine how the remapped object will be
created.
Args:
root: The target object to traverse. By default, remap
supports iterables like :class:`list`, :class:`tuple`,
:class:`dict`, and :class:`set`, but any object traversable by
*enter* will work.
visit (callable): This function is called on every item in
*root*. It must accept three positional arguments, *path*,
*key*, and *value*. *path* is simply a tuple of parents'
keys. *visit* should return the new key-value pair. It may
also return ``True`` as shorthand to keep the old item
unmodified, or ``False`` to drop the item from the new
structure. *visit* is called after *enter*, on the new parent.
The *visit* function is called for every item in root,
including duplicate items. For traversable values, it is
called on the new parent object, after all its children
have been visited. The default visit behavior simply
returns the key-value pair unmodified.
enter (callable): This function controls which items in *root*
are traversed. It accepts the same arguments as *visit*: the
path, the key, and the value of the current item. It returns a
pair of the blank new parent, and an iterator over the items
which should be visited. If ``False`` is returned instead of
an iterator, the value will not be traversed.
The *enter* function is only called once per unique value. The
default enter behavior support mappings, sequences, and
sets. Strings and all other iterables will not be traversed.
exit (callable): This function determines how to handle items
once they have been visited. It gets the same three
arguments as the other functions -- *path*, *key*, *value*
-- plus two more: the blank new parent object returned
from *enter*, and a list of the new items, as remapped by
*visit*.
Like *enter*, the *exit* function is only called once per
unique value. The default exit behavior is to simply add
all new items to the new parent, e.g., using
:meth:`list.extend` and :meth:`dict.update` to add to the
new parent. Immutable objects, such as a :class:`tuple` or
:class:`namedtuple`, must be recreated from scratch, but
use the same type as the new parent passed back from the
*enter* function.
reraise_visit (bool): A pragmatic convenience for the *visit*
callable. When set to ``False``, remap ignores any errors
raised by the *visit* callback. Items causing exceptions
are kept. See examples for more details.
remap is designed to cover the majority of cases with just the
*visit* callable. While passing in multiple callables is very
empowering, remap is designed so very few cases should require
passing more than one function.
When passing *enter* and *exit*, it's common and easiest to build
on the default behavior. Simply add ``from boltons.iterutils import
default_enter`` (or ``default_exit``), and have your enter/exit
function call the default behavior before or after your custom
logic. See `this example`_.
Duplicate and self-referential objects (aka reference loops) are
automatically handled internally, `as shown here`_.
.. _this example: http://sedimental.org/remap.html#sort_all_lists
.. _as shown here: http://sedimental.org/remap.html#corner_cases
"""
# TODO: improve argument formatting in sphinx doc
# TODO: enter() return (False, items) to continue traverse but cancel copy?
if not callable(visit):
raise TypeError('visit expected callable, not: %r' % visit)
if not callable(enter):
raise TypeError('enter expected callable, not: %r' % enter)
if not callable(exit):
raise TypeError('exit expected callable, not: %r' % exit)
reraise_visit = kwargs.pop('reraise_visit', True)
if kwargs:
raise TypeError('unexpected keyword arguments: %r' % kwargs.keys())
path, registry, stack = (), {}, [(None, root)]
new_items_stack = []
while stack:
key, value = stack.pop()
id_value = id(value)
if key is _REMAP_EXIT:
key, new_parent, old_parent = value
id_value = id(old_parent)
path, new_items = new_items_stack.pop()
value = exit(path, key, old_parent, new_parent, new_items)
registry[id_value] = value
if not new_items_stack:
continue
elif id_value in registry:
value = registry[id_value]
else:
res = enter(path, key, value)
try:
new_parent, new_items = res
except TypeError:
# TODO: handle False?
raise TypeError('enter should return a tuple of (new_parent,'
' items_iterator), not: %r' % res)
if new_items is not False:
# traverse unless False is explicitly passed
registry[id_value] = new_parent
new_items_stack.append((path, []))
if value is not root:
path += (key,)
stack.append((_REMAP_EXIT, (key, new_parent, value)))
if new_items:
stack.extend(reversed(list(new_items)))
continue
if visit is _orig_default_visit:
# avoid function call overhead by inlining identity operation
visited_item = (key, value)
else:
try:
visited_item = visit(path, key, value)
except Exception:
if reraise_visit:
raise
visited_item = True
if visited_item is False:
continue # drop
elif visited_item is True:
visited_item = (key, value)
# TODO: typecheck?
# raise TypeError('expected (key, value) from visit(),'
# ' not: %r' % visited_item)
try:
new_items_stack[-1][1].append(visited_item)
except IndexError:
raise TypeError('expected remappable root, not: %r' % root)
return value
class PathAccessError(KeyError, IndexError, TypeError):
"""An amalgamation of KeyError, IndexError, and TypeError,
representing what can occur when looking up a path in a nested
object.
"""
def __init__(self, exc, seg, path):
self.exc = exc
self.seg = seg
self.path = path
def __repr__(self):
cn = self.__class__.__name__
return '%s(%r, %r, %r)' % (cn, self.exc, self.seg, self.path)
def __str__(self):
return ('could not access %r from path %r, got error: %r'
% (self.seg, self.path, self.exc))
def get_path(root, path, default=_UNSET):
"""Retrieve a value from a nested object via a tuple representing the
lookup path.
>>> root = {'a': {'b': {'c': [[1], [2], [3]]}}}
>>> get_path(root, ('a', 'b', 'c', 2, 0))
3
The path format is intentionally consistent with that of
:func:`remap`.
One of get_path's chief aims is improved error messaging. EAFP is
great, but the error messages are not.
For instance, ``root['a']['b']['c'][2][1]`` gives back
``IndexError: list index out of range``
What went out of range where? get_path currently raises
``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2,
1), got error: IndexError('list index out of range',)``, a
subclass of IndexError and KeyError.
You can also pass a default that covers the entire operation,
should the lookup fail at any level.
Args:
root: The target nesting of dictionaries, lists, or other
objects supporting ``__getitem__``.
path (tuple): A list of strings and integers to be successively
looked up within *root*.
default: The value to be returned should any
``PathAccessError`` exceptions be raised.
"""
if isinstance(path, basestring):
path = path.split('.')
cur = root
try:
for seg in path:
try:
cur = cur[seg]
except (KeyError, IndexError) as exc:
raise PathAccessError(exc, seg, path)
except TypeError as exc:
# either string index in a list, or a parent that
# doesn't support indexing
try:
seg = int(seg)
cur = cur[seg]
except (ValueError, KeyError, IndexError, TypeError):
if not is_iterable(cur):
exc = TypeError('%r object is not indexable'
% type(cur).__name__)
raise PathAccessError(exc, seg, path)
except PathAccessError:
if default is _UNSET:
raise
return default
return cur
def research(root, query=lambda p, k, v: True, reraise=False):
"""The :func:`research` function uses :func:`remap` to recurse over
any data nested in *root*, and find values which match a given
criterion, specified by the *query* callable.
Results are returned as a list of ``(path, value)`` pairs. The
paths are tuples in the same format accepted by
:func:`get_path`. This can be useful for comparing values nested
in two or more different structures.
Here's a simple example that finds all integers:
>>> root = {'a': {'b': 1, 'c': (2, 'd', 3)}, 'e': None}
>>> res = research(root, query=lambda p, k, v: isinstance(v, int))
>>> print(sorted(res))
[(('a', 'b'), 1), (('a', 'c', 0), 2), (('a', 'c', 2), 3)]
Note how *query* follows the same, familiar ``path, key, value``
signature as the ``visit`` and ``enter`` functions on
:func:`remap`, and returns a :class:`bool`.
Args:
root: The target object to search. Supports the same types of
objects as :func:`remap`, including :class:`list`,
:class:`tuple`, :class:`dict`, and :class:`set`.
query (callable): The function called on every object to
determine whether to include it in the search results. The
callable must accept three arguments, *path*, *key*, and
*value*, commonly abbreviated *p*, *k*, and *v*, same as
*enter* and *visit* from :func:`remap`.
reraise (bool): Whether to reraise exceptions raised by *query*
or to simply drop the result that caused the error.
With :func:`research` it's easy to inspect the details of a data
structure, like finding values that are at a certain depth (using
``len(p)``) and much more. If more advanced functionality is
needed, check out the code and make your own :func:`remap`
wrapper, and consider `submitting a patch`_!
.. _submitting a patch: https://github.com/mahmoud/boltons/pulls
"""
ret = []
if not callable(query):
raise TypeError('query expected callable, not: %r' % query)
def enter(path, key, value):
try:
if query(path, key, value):
ret.append((path + (key,), value))
except Exception:
if reraise:
raise
return default_enter(path, key, value)
remap(root, enter=enter)
return ret
# TODO: recollect()
# TODO: refilter()
# TODO: reiter()
# GUID iterators: 10x faster and somewhat more compact than uuid.
class GUIDerator(object):
"""The GUIDerator is an iterator that yields a globally-unique
identifier (GUID) on every iteration. The GUIDs produced are
hexadecimal strings.
Testing shows it to be around 12x faster than the uuid module. By
default it is also more compact, partly due to its default 96-bit
(24-hexdigit) length. 96 bits of randomness means that there is a
1 in 2 ^ 32 chance of collision after 2 ^ 64 iterations. If more
or less uniqueness is desired, the *size* argument can be adjusted
accordingly.
Args:
size (int): character length of the GUID, defaults to 24. Lengths
between 20 and 36 are considered valid.
The GUIDerator has built-in fork protection that causes it to
detect a fork on next iteration and reseed accordingly.
"""
def __init__(self, size=24):
self.size = size
if size < 20 or size > 36:
raise ValueError('expected 20 < size <= 36')
self.count = itertools.count()
self.reseed()
def reseed(self):
self.pid = os.getpid()
self.salt = '-'.join([str(self.pid),
socket.gethostname() or b'<nohostname>',
str(time.time()),
codecs.encode(os.urandom(6),
'hex_codec').decode('ascii')])
# that codecs trick is the best/only way to get a bytes to
# hexbytes in py2/3
return
def __iter__(self):
return self
if _IS_PY3:
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
target_bytes = (self.salt + str(next(self.count))).encode('utf8')
hash_text = hashlib.sha1(target_bytes).hexdigest()[:self.size]
return hash_text
else:
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
return hashlib.sha1(self.salt +
str(next(self.count))).hexdigest()[:self.size]
next = __next__
class SequentialGUIDerator(GUIDerator):
"""Much like the standard GUIDerator, the SequentialGUIDerator is an
iterator that yields a globally-unique identifier (GUID) on every
iteration. The GUIDs produced are hexadecimal strings.
The SequentialGUIDerator differs in that it picks a starting GUID
value and increments every iteration. This yields GUIDs which are
of course unique, but also ordered and lexicographically sortable.
The SequentialGUIDerator is around 50% faster than the normal
GUIDerator, making it almost 20x as fast as the built-in uuid
module. By default it is also more compact, partly due to its
96-bit (24-hexdigit) default length. 96 bits of randomness means that
there is a 1 in 2 ^ 32 chance of collision after 2 ^ 64
iterations. If more or less uniqueness is desired, the *size*
argument can be adjusted accordingly.
Args:
size (int): character length of the GUID, defaults to 24.
Note that with SequentialGUIDerator there is a chance of GUIDs
growing larger than the size configured. The SequentialGUIDerator
has built-in fork protection that causes it to detect a fork on
next iteration and reseed accordingly.
"""
if _IS_PY3:
def reseed(self):
super(SequentialGUIDerator, self).reseed()
start_str = hashlib.sha1(self.salt.encode('utf8')).hexdigest()
self.start = int(start_str[:self.size], 16)
self.start |= (1 << ((self.size * 4) - 2))
else:
def reseed(self):
super(SequentialGUIDerator, self).reseed()
start_str = hashlib.sha1(self.salt).hexdigest()
self.start = int(start_str[:self.size], 16)
self.start |= (1 << ((self.size * 4) - 2))
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
return '%x' % (next(self.count) + self.start)
next = __next__
guid_iter = GUIDerator()
seq_guid_iter = SequentialGUIDerator()
def soft_sorted(iterable, first=None, last=None, key=None, reverse=False):
"""For when you care about the order of some elements, but not about
others.
Use this to float to the top and/or sink to the bottom a specific
ordering, while sorting the rest of the elements according to
normal :func:`sorted` rules.
>>> soft_sorted(['two', 'b', 'one', 'a'], first=['one', 'two'])
['one', 'two', 'a', 'b']
>>> soft_sorted(range(7), first=[6, 15], last=[2, 4], reverse=True)
[6, 5, 3, 1, 0, 2, 4]
>>> import string
>>> ''.join(soft_sorted(string.hexdigits, first='za1', last='b', key=str.lower))
'aA1023456789cCdDeEfFbB'
Args:
iterable (list): A list or other iterable to sort.
first (list): A sequence to enforce for elements which should
appear at the beginning of the returned list.
last (list): A sequence to enforce for elements which should
appear at the end of the returned list.
key (callable): Callable used to generate a comparable key for
each item to be sorted, same as the key in
:func:`sorted`. Note that entries in *first* and *last*
should be the keys for the items. Defaults to
passthrough/the identity function.
reverse (bool): Whether or not elements not explicitly ordered
by *first* and *last* should be in reverse order or not.
Returns a new list in sorted order.
"""
first = first or []
last = last or []
key = key or (lambda x: x)
seq = list(iterable)
other = [x for x in seq if not ((first and key(x) in first) or (last and key(x) in last))]
other.sort(key=key, reverse=reverse)
if first:
first = sorted([x for x in seq if key(x) in first], key=lambda x: first.index(key(x)))
if last:
last = sorted([x for x in seq if key(x) in last], key=lambda x: last.index(key(x)))
return first + other + last
"""
May actually be faster to do an isinstance check for a str path
$ python -m timeit -s "x = [1]" "x[0]"
10000000 loops, best of 3: 0.0207 usec per loop
$ python -m timeit -s "x = [1]" "try: x[0] \nexcept: pass"
10000000 loops, best of 3: 0.029 usec per loop
$ python -m timeit -s "x = [1]" "try: x[1] \nexcept: pass"
1000000 loops, best of 3: 0.315 usec per loop
# setting up try/except is fast, only around 0.01us
# actually triggering the exception takes almost 10x as long
$ python -m timeit -s "x = [1]" "isinstance(x, basestring)"
10000000 loops, best of 3: 0.141 usec per loop
$ python -m timeit -s "x = [1]" "isinstance(x, str)"
10000000 loops, best of 3: 0.131 usec per loop
$ python -m timeit -s "x = [1]" "try: x.split('.')\n except: pass"
1000000 loops, best of 3: 0.443 usec per loop
$ python -m timeit -s "x = [1]" "try: x.split('.') \nexcept AttributeError: pass"
1000000 loops, best of 3: 0.544 usec per loop
"""
|
en
| 0.733082
|
# -*- coding: utf-8 -*- :mod:`itertools` is full of great examples of Python generator usage. However, there are still some critical gaps. ``iterutils`` fills many of those gaps with featureful, tested, and Pythonic solutions. Many of the functions below have two versions, one which returns an iterator (denoted by the ``*_iter`` naming pattern), and a shorter-named convenience form that returns a list. Some of the following are based on examples in itertools docs. # Python 3 compat Similar in nature to :func:`callable`, ``is_iterable`` returns ``True`` if an object is `iterable`_, ``False`` if not. >>> is_iterable([]) True >>> is_iterable(object()) False .. _iterable: https://docs.python.org/2/glossary.html#term-iterable A near-mirror of :func:`is_iterable`. Returns ``False`` if an object is an iterable container type. Strings are considered scalar as well, because strings are more often treated as whole values as opposed to iterables of 1-character substrings. >>> is_scalar(object()) True >>> is_scalar(range(10)) False >>> is_scalar('hello') True The opposite of :func:`is_scalar`. Returns ``True`` if an object is an iterable other than a string. >>> is_collection(object()) False >>> is_collection(range(10)) True >>> is_collection('hello') False Splits an iterable based on a separator. Like :meth:`str.split`, but for all iterables. Returns a list of lists. >>> split(['hi', 'hello', None, None, 'sup', None, 'soap', None]) [['hi', 'hello'], ['sup'], ['soap']] See :func:`split_iter` docs for more info. Splits an iterable based on a separator, *sep*, a max of *maxsplit* times (no max by default). *sep* can be: * a single value * an iterable of separators * a single-argument callable that returns True when a separator is encountered ``split_iter()`` yields lists of non-separator values. A separator will never appear in the output. >>> list(split_iter(['hi', 'hello', None, None, 'sup', None, 'soap', None])) [['hi', 'hello'], ['sup'], ['soap']] Note that ``split_iter`` is based on :func:`str.split`, so if *sep* is ``None``, ``split()`` **groups** separators. If empty lists are desired between two contiguous ``None`` values, simply use ``sep=[None]``: >>> list(split_iter(['hi', 'hello', None, None, 'sup', None])) [['hi', 'hello'], ['sup']] >>> list(split_iter(['hi', 'hello', None, None, 'sup', None], sep=[None])) [['hi', 'hello'], [], ['sup'], []] Using a callable separator: >>> falsy_sep = lambda x: not x >>> list(split_iter(['hi', 'hello', None, '', 'sup', False], falsy_sep)) [['hi', 'hello'], [], ['sup'], []] See :func:`split` for a list-returning version. # If sep is none, str.split() "groups" separators # check the str.split() docs for more info Returns a list of *count* chunks, each with *size* elements, generated from iterable *src*. If *src* is not evenly divisible by *size*, the final chunk will have fewer than *size* elements. Provide the *fill* keyword argument to provide a pad value and enable padding, otherwise no padding will take place. >>> chunked(range(10), 3) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> chunked(range(10), 3, fill=None) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]] >>> chunked(range(10), 3, count=2) [[0, 1, 2], [3, 4, 5]] See :func:`chunked_iter` for more info. Generates *size*-sized chunks from *src* iterable. Unless the optional *fill* keyword argument is provided, iterables not even divisible by *size* will have a final chunk that is smaller than *size*. >>> list(chunked_iter(range(10), 3)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> list(chunked_iter(range(10), 3, fill=None)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]] Note that ``fill=None`` in fact uses ``None`` as the fill value. # TODO: add count kwarg? Convenience function for calling :func:`windowed` on *src*, with *size* set to 2. >>> pairwise(range(5)) [(0, 1), (1, 2), (2, 3), (3, 4)] >>> pairwise([]) [] The number of pairs is always one less than the number of elements in the iterable passed in, except on empty inputs, which returns an empty list. Convenience function for calling :func:`windowed_iter` on *src*, with *size* set to 2. >>> list(pairwise_iter(range(5))) [(0, 1), (1, 2), (2, 3), (3, 4)] >>> list(pairwise_iter([])) [] The number of pairs is always one less than the number of elements in the iterable passed in, or zero, when *src* is empty. Returns tuples with exactly length *size*. If the iterable is too short to make a window of length *size*, no tuples are returned. See :func:`windowed_iter` for more. Returns tuples with length *size* which represent a sliding window over iterable *src*. >>> list(windowed_iter(range(7), 3)) [(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)] If the iterable is too short to make a window of length *size*, then no window tuples are returned. >>> list(windowed_iter(range(3), 5)) [] # TODO: lists? (for consistency) Same as :func:`frange`, but generator-based instead of returning a list. >>> tuple(xfrange(1, 3, step=0.75)) (1.0, 1.75, 2.5) See :func:`frange` for more details. # swap when all args are used A :func:`range` clone for float-based ranges. >>> frange(5) [0.0, 1.0, 2.0, 3.0, 4.0] >>> frange(6, step=1.25) [0.0, 1.25, 2.5, 3.75, 5.0] >>> frange(100.5, 101.5, 0.25) [100.5, 100.75, 101.0, 101.25] >>> frange(5, 0) [] >>> frange(5, 0, step=-1.25) [5.0, 3.75, 2.5, 1.25] # swap when all args are used Returns a list of geometrically-increasing floating-point numbers, suitable for usage with `exponential backoff`_. Exactly like :func:`backoff_iter`, but without the ``'repeat'`` option for *count*. See :func:`backoff_iter` for more details. .. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff >>> backoff(1, 10) [1.0, 2.0, 4.0, 8.0, 10.0] Generates a sequence of geometrically-increasing floats, suitable for usage with `exponential backoff`_. Starts with *start*, increasing by *factor* until *stop* is reached, optionally stopping iteration once *count* numbers are yielded. *factor* defaults to 2. In general retrying with properly-configured backoff creates a better-behaved component for a larger service ecosystem. .. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff >>> list(backoff_iter(1.0, 10.0, count=5)) [1.0, 2.0, 4.0, 8.0, 10.0] >>> list(backoff_iter(1.0, 10.0, count=8)) [1.0, 2.0, 4.0, 8.0, 10.0, 10.0, 10.0, 10.0] >>> list(backoff_iter(0.25, 100.0, factor=10)) [0.25, 2.5, 25.0, 100.0] A simplified usage example: .. code-block:: python for timeout in backoff_iter(0.25, 5.0): try: res = network_call() break except Exception as e: log(e) time.sleep(timeout) An enhancement for large-scale systems would be to add variation, or *jitter*, to timeout values. This is done to avoid a thundering herd on the receiving end of the network call. Finally, for *count*, the special value ``'repeat'`` can be passed to continue yielding indefinitely. Args: start (float): Positive number for baseline. stop (float): Positive number for maximum. count (int): Number of steps before stopping iteration. Defaults to the number of steps between *start* and *stop*. Pass the string, `'repeat'`, to continue iteration indefinitely. factor (float): Rate of exponential increase. Defaults to `2.0`, e.g., `[1, 2, 4, 8, 16]`. jitter (float): A factor between `-1.0` and `1.0`, used to uniformly randomize and thus spread out timeouts in a distributed system, avoiding rhythm effects. Positive values use the base backoff curve as a maximum, negative values use the curve as a minimum. Set to 1.0 or `True` for a jitter approximating Ethernet's time-tested backoff solution. Defaults to `False`. Group values in the *src* iterable by the value returned by *key*, which defaults to :class:`bool`, grouping values by truthiness. >>> bucketize(range(5)) {False: [0], True: [1, 2, 3, 4]} >>> is_odd = lambda x: x % 2 == 1 >>> bucketize(range(5), is_odd) {False: [0, 2, 4], True: [1, 3]} Value lists are not deduplicated: >>> bucketize([None, None, None, 'hello']) {False: [None, None, None], True: ['hello']} Bucketize into more than 3 groups >>> bucketize(range(10), lambda x: x % 3) {0: [0, 3, 6, 9], 1: [1, 4, 7], 2: [2, 5, 8]} ``bucketize`` has a couple of advanced options useful in certain cases. *value_transform* can be used to modify values as they are added to buckets, and *key_filter* will allow excluding certain buckets from being collected. >>> bucketize(range(5), value_transform=lambda x: x*x) {False: [0], True: [1, 4, 9, 16]} >>> bucketize(range(10), key=lambda x: x % 3, key_filter=lambda k: k % 3 != 1) {0: [0, 3, 6, 9], 2: [2, 5, 8]} Note in some of these examples there were at most two keys, ``True`` and ``False``, and each key present has a list with at least one item. See :func:`partition` for a version specialized for binary use cases. No relation to :meth:`str.partition`, ``partition`` is like :func:`bucketize`, but for added convenience returns a tuple of ``(truthy_values, falsy_values)``. >>> nonempty, empty = partition(['', '', 'hi', '', 'bye']) >>> nonempty ['hi', 'bye'] *key* defaults to :class:`bool`, but can be carefully overridden to use any function that returns either ``True`` or ``False``. >>> import string >>> is_digit = lambda x: x in string.digits >>> decimal_digits, hexletters = partition(string.hexdigits, is_digit) >>> ''.join(decimal_digits), ''.join(hexletters) ('0123456789', 'abcdefABCDEF') ``unique()`` returns a list of unique values, as determined by *key*, in the order they first appeared in the input iterable, *src*. >>> ones_n_zeros = '11010110001010010101010' >>> ''.join(unique(ones_n_zeros)) '10' See :func:`unique_iter` docs for more details. Yield unique elements from the iterable, *src*, based on *key*, in the order in which they first appeared in *src*. >>> repetitious = [1, 2, 3] * 10 >>> list(unique_iter(repetitious)) [1, 2, 3] By default, *key* is the object itself, but *key* can either be a callable or, for convenience, a string name of the attribute on which to uniqueify objects, falling back on identity when the attribute is not present. >>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes'] >>> list(unique_iter(pleasantries, key=lambda x: len(x))) ['hi', 'hello', 'bye'] The complement of :func:`unique()`. By default returns non-unique values as a list of the *first* redundant value in *src*. Pass ``groups=True`` to get groups of all values with redundancies, ordered by position of the first redundant value. This is useful in conjunction with some normalizing *key* function. >>> redundant([1, 2, 3, 4]) [] >>> redundant([1, 2, 3, 2, 3, 3, 4]) [2, 3] >>> redundant([1, 2, 3, 2, 3, 3, 4], groups=True) [[2, 2], [3, 3, 3]] An example using a *key* function to do case-insensitive redundancy detection. >>> redundant(['hi', 'Hi', 'HI', 'hello'], key=str.lower) ['Hi'] >>> redundant(['hi', 'Hi', 'HI', 'hello'], groups=True, key=str.lower) [['hi', 'Hi', 'HI']] *key* should also be used when the values in *src* are not hashable. .. note:: This output of this function is designed for reporting duplicates in contexts when a unique input is desired. Due to the grouped return type, there is no streaming equivalent of this function for the time being. # key to first seen item Along the same lines as builtins, :func:`all` and :func:`any`, and similar to :func:`first`, ``one()`` returns the single object in the given iterable *src* that evaluates to ``True``, as determined by callable *key*. If unset, *key* defaults to :class:`bool`. If no such objects are found, *default* is returned. If *default* is not passed, ``None`` is returned. If *src* has more than one object that evaluates to ``True``, or if there is no object that fulfills such condition, return *default*. It's like an `XOR`_ over an iterable. >>> one((True, False, False)) True >>> one((True, False, True)) >>> one((0, 0, 'a')) 'a' >>> one((0, False, None)) >>> one((True, True), default=False) False >>> bool(one(('', 1))) True >>> one((10, 20, 30, 42), key=lambda i: i > 40) 42 See `<NAME>'s original repo`_ for further use cases. .. _<NAME>'s original repo: https://github.com/mgaitan/one .. _XOR: https://en.wikipedia.org/wiki/Exclusive_or Return first element of *iterable* that evaluates to ``True``, else return ``None`` or optional *default*. Similar to :func:`one`. >>> first([0, False, None, [], (), 42]) 42 >>> first([0, False, None, [], ()]) is None True >>> first([0, False, None, [], ()], default='ohai') 'ohai' >>> import re >>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)']) >>> m.group(1) 'bc' The optional *key* argument specifies a one-argument predicate function like that used for *filter()*. The *key* argument, if supplied, should be in keyword form. For example, finding the first even number in an iterable: >>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0) 4 Contributed by <NAME>, author of `the original standalone module`_. .. _the original standalone module: https://github.com/hynek/first ``flatten_iter()`` yields all the elements from *iterable* while collapsing any nested iterables. >>> nested = [[1, 2], [[3], [4, 5]]] >>> list(flatten_iter(nested)) [1, 2, 3, 4, 5] ``flatten()`` returns a collapsed list of all the elements from *iterable* while collapsing any nested iterables. >>> nested = [[1, 2], [[3], [4, 5]]] >>> flatten(nested) [1, 2, 3, 4, 5] ``same()`` returns ``True`` when all values in *iterable* are equal to one another, or optionally a reference value, *ref*. Similar to :func:`all` and :func:`any` in that it evaluates an iterable and returns a :class:`bool`. ``same()`` returns ``True`` for empty iterables. >>> same([]) True >>> same([1]) True >>> same(['a', 'a', 'a']) True >>> same(range(20)) False >>> same([[], []]) True >>> same([[], []], ref='test') False # print('visit(%r, %r, %r)' % (path, key, value)) # enable the extreme: monkeypatching iterutils with a different default_visit # print('enter(%r, %r)' % (key, value)) # files, strings, other iterables, and scalars are not # traversed # print('exit(%r, %r, %r, %r, %r)' # % (path, key, old_parent, new_parent, new_items)) # tuples # frozensets The remap ("recursive map") function is used to traverse and transform nested structures. Lists, tuples, sets, and dictionaries are just a few of the data structures nested into heterogenous tree-like structures that are so common in programming. Unfortunately, Python's built-in ways to manipulate collections are almost all flat. List comprehensions may be fast and succinct, but they do not recurse, making it tedious to apply quick changes or complex transforms to real-world data. remap goes where list comprehensions cannot. Here's an example of removing all Nones from some data: >>> from pprint import pprint >>> reviews = {'Star Trek': {'TNG': 10, 'DS9': 8.5, 'ENT': None}, ... 'Babylon 5': 6, 'Dr. Who': None} >>> pprint(remap(reviews, lambda p, k, v: v is not None)) {'Babylon 5': 6, 'Star Trek': {'DS9': 8.5, 'TNG': 10}} Notice how both Nones have been removed despite the nesting in the dictionary. Not bad for a one-liner, and that's just the beginning. See `this remap cookbook`_ for more delicious recipes. .. _this remap cookbook: http://sedimental.org/remap.html remap takes four main arguments: the object to traverse and three optional callables which determine how the remapped object will be created. Args: root: The target object to traverse. By default, remap supports iterables like :class:`list`, :class:`tuple`, :class:`dict`, and :class:`set`, but any object traversable by *enter* will work. visit (callable): This function is called on every item in *root*. It must accept three positional arguments, *path*, *key*, and *value*. *path* is simply a tuple of parents' keys. *visit* should return the new key-value pair. It may also return ``True`` as shorthand to keep the old item unmodified, or ``False`` to drop the item from the new structure. *visit* is called after *enter*, on the new parent. The *visit* function is called for every item in root, including duplicate items. For traversable values, it is called on the new parent object, after all its children have been visited. The default visit behavior simply returns the key-value pair unmodified. enter (callable): This function controls which items in *root* are traversed. It accepts the same arguments as *visit*: the path, the key, and the value of the current item. It returns a pair of the blank new parent, and an iterator over the items which should be visited. If ``False`` is returned instead of an iterator, the value will not be traversed. The *enter* function is only called once per unique value. The default enter behavior support mappings, sequences, and sets. Strings and all other iterables will not be traversed. exit (callable): This function determines how to handle items once they have been visited. It gets the same three arguments as the other functions -- *path*, *key*, *value* -- plus two more: the blank new parent object returned from *enter*, and a list of the new items, as remapped by *visit*. Like *enter*, the *exit* function is only called once per unique value. The default exit behavior is to simply add all new items to the new parent, e.g., using :meth:`list.extend` and :meth:`dict.update` to add to the new parent. Immutable objects, such as a :class:`tuple` or :class:`namedtuple`, must be recreated from scratch, but use the same type as the new parent passed back from the *enter* function. reraise_visit (bool): A pragmatic convenience for the *visit* callable. When set to ``False``, remap ignores any errors raised by the *visit* callback. Items causing exceptions are kept. See examples for more details. remap is designed to cover the majority of cases with just the *visit* callable. While passing in multiple callables is very empowering, remap is designed so very few cases should require passing more than one function. When passing *enter* and *exit*, it's common and easiest to build on the default behavior. Simply add ``from boltons.iterutils import default_enter`` (or ``default_exit``), and have your enter/exit function call the default behavior before or after your custom logic. See `this example`_. Duplicate and self-referential objects (aka reference loops) are automatically handled internally, `as shown here`_. .. _this example: http://sedimental.org/remap.html#sort_all_lists .. _as shown here: http://sedimental.org/remap.html#corner_cases # TODO: improve argument formatting in sphinx doc # TODO: enter() return (False, items) to continue traverse but cancel copy? # TODO: handle False? # traverse unless False is explicitly passed # avoid function call overhead by inlining identity operation # drop # TODO: typecheck? # raise TypeError('expected (key, value) from visit(),' # ' not: %r' % visited_item) An amalgamation of KeyError, IndexError, and TypeError, representing what can occur when looking up a path in a nested object. Retrieve a value from a nested object via a tuple representing the lookup path. >>> root = {'a': {'b': {'c': [[1], [2], [3]]}}} >>> get_path(root, ('a', 'b', 'c', 2, 0)) 3 The path format is intentionally consistent with that of :func:`remap`. One of get_path's chief aims is improved error messaging. EAFP is great, but the error messages are not. For instance, ``root['a']['b']['c'][2][1]`` gives back ``IndexError: list index out of range`` What went out of range where? get_path currently raises ``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2, 1), got error: IndexError('list index out of range',)``, a subclass of IndexError and KeyError. You can also pass a default that covers the entire operation, should the lookup fail at any level. Args: root: The target nesting of dictionaries, lists, or other objects supporting ``__getitem__``. path (tuple): A list of strings and integers to be successively looked up within *root*. default: The value to be returned should any ``PathAccessError`` exceptions be raised. # either string index in a list, or a parent that # doesn't support indexing The :func:`research` function uses :func:`remap` to recurse over any data nested in *root*, and find values which match a given criterion, specified by the *query* callable. Results are returned as a list of ``(path, value)`` pairs. The paths are tuples in the same format accepted by :func:`get_path`. This can be useful for comparing values nested in two or more different structures. Here's a simple example that finds all integers: >>> root = {'a': {'b': 1, 'c': (2, 'd', 3)}, 'e': None} >>> res = research(root, query=lambda p, k, v: isinstance(v, int)) >>> print(sorted(res)) [(('a', 'b'), 1), (('a', 'c', 0), 2), (('a', 'c', 2), 3)] Note how *query* follows the same, familiar ``path, key, value`` signature as the ``visit`` and ``enter`` functions on :func:`remap`, and returns a :class:`bool`. Args: root: The target object to search. Supports the same types of objects as :func:`remap`, including :class:`list`, :class:`tuple`, :class:`dict`, and :class:`set`. query (callable): The function called on every object to determine whether to include it in the search results. The callable must accept three arguments, *path*, *key*, and *value*, commonly abbreviated *p*, *k*, and *v*, same as *enter* and *visit* from :func:`remap`. reraise (bool): Whether to reraise exceptions raised by *query* or to simply drop the result that caused the error. With :func:`research` it's easy to inspect the details of a data structure, like finding values that are at a certain depth (using ``len(p)``) and much more. If more advanced functionality is needed, check out the code and make your own :func:`remap` wrapper, and consider `submitting a patch`_! .. _submitting a patch: https://github.com/mahmoud/boltons/pulls # TODO: recollect() # TODO: refilter() # TODO: reiter() # GUID iterators: 10x faster and somewhat more compact than uuid. The GUIDerator is an iterator that yields a globally-unique identifier (GUID) on every iteration. The GUIDs produced are hexadecimal strings. Testing shows it to be around 12x faster than the uuid module. By default it is also more compact, partly due to its default 96-bit (24-hexdigit) length. 96 bits of randomness means that there is a 1 in 2 ^ 32 chance of collision after 2 ^ 64 iterations. If more or less uniqueness is desired, the *size* argument can be adjusted accordingly. Args: size (int): character length of the GUID, defaults to 24. Lengths between 20 and 36 are considered valid. The GUIDerator has built-in fork protection that causes it to detect a fork on next iteration and reseed accordingly. # that codecs trick is the best/only way to get a bytes to # hexbytes in py2/3 Much like the standard GUIDerator, the SequentialGUIDerator is an iterator that yields a globally-unique identifier (GUID) on every iteration. The GUIDs produced are hexadecimal strings. The SequentialGUIDerator differs in that it picks a starting GUID value and increments every iteration. This yields GUIDs which are of course unique, but also ordered and lexicographically sortable. The SequentialGUIDerator is around 50% faster than the normal GUIDerator, making it almost 20x as fast as the built-in uuid module. By default it is also more compact, partly due to its 96-bit (24-hexdigit) default length. 96 bits of randomness means that there is a 1 in 2 ^ 32 chance of collision after 2 ^ 64 iterations. If more or less uniqueness is desired, the *size* argument can be adjusted accordingly. Args: size (int): character length of the GUID, defaults to 24. Note that with SequentialGUIDerator there is a chance of GUIDs growing larger than the size configured. The SequentialGUIDerator has built-in fork protection that causes it to detect a fork on next iteration and reseed accordingly. For when you care about the order of some elements, but not about others. Use this to float to the top and/or sink to the bottom a specific ordering, while sorting the rest of the elements according to normal :func:`sorted` rules. >>> soft_sorted(['two', 'b', 'one', 'a'], first=['one', 'two']) ['one', 'two', 'a', 'b'] >>> soft_sorted(range(7), first=[6, 15], last=[2, 4], reverse=True) [6, 5, 3, 1, 0, 2, 4] >>> import string >>> ''.join(soft_sorted(string.hexdigits, first='za1', last='b', key=str.lower)) 'aA1023456789cCdDeEfFbB' Args: iterable (list): A list or other iterable to sort. first (list): A sequence to enforce for elements which should appear at the beginning of the returned list. last (list): A sequence to enforce for elements which should appear at the end of the returned list. key (callable): Callable used to generate a comparable key for each item to be sorted, same as the key in :func:`sorted`. Note that entries in *first* and *last* should be the keys for the items. Defaults to passthrough/the identity function. reverse (bool): Whether or not elements not explicitly ordered by *first* and *last* should be in reverse order or not. Returns a new list in sorted order. May actually be faster to do an isinstance check for a str path $ python -m timeit -s "x = [1]" "x[0]" 10000000 loops, best of 3: 0.0207 usec per loop $ python -m timeit -s "x = [1]" "try: x[0] \nexcept: pass" 10000000 loops, best of 3: 0.029 usec per loop $ python -m timeit -s "x = [1]" "try: x[1] \nexcept: pass" 1000000 loops, best of 3: 0.315 usec per loop # setting up try/except is fast, only around 0.01us # actually triggering the exception takes almost 10x as long $ python -m timeit -s "x = [1]" "isinstance(x, basestring)" 10000000 loops, best of 3: 0.141 usec per loop $ python -m timeit -s "x = [1]" "isinstance(x, str)" 10000000 loops, best of 3: 0.131 usec per loop $ python -m timeit -s "x = [1]" "try: x.split('.')\n except: pass" 1000000 loops, best of 3: 0.443 usec per loop $ python -m timeit -s "x = [1]" "try: x.split('.') \nexcept AttributeError: pass" 1000000 loops, best of 3: 0.544 usec per loop
| 2.957244
| 3
|
mwe.py
|
lenoch/tagsetbench
| 0
|
6628767
|
# from copy import deepcopy
import re
from symbols import Token
ORDINAL_NUMBER = re.compile('(\d+)\.')
# TODO: tohle nepoužívám (snad ani v testech), tak nějak rozsekat (využít
# v modification.handle_mwes) a pohřbít
def handle_mwes(tokens, args):
"""
NOTE: většinu MWE mám v úsporné formě, takže by default se nechají být
TODO: rozšiřovat se budou jenom vybraný druhy MWE, např. řadový číslovky,
čísla s mezerama/desetinama, zkratky (s tečkou) a jména (bohužel
nemám odlišená cizí)
"""
to_compress = args['compress']
to_expand = args['expand']
for xml_tag, token in tokens:
if xml_tag:
for line_number, line in xml_tag.original_lines:
yield line
else:
mwe_type = guess_type(token)
if mwe_type and mwe_type in to_compress:
yield from compress_token(token, mwe_type).plain_vertical()
elif mwe_type and mwe_type in to_expand:
yield from expand_token(token, mwe_type).plain_vertical()
else:
for line_number, line in token.original_lines:
yield line
def guess_type(token):
if token['word'].endswith('.'):
ordinal = ORDINAL_NUMBER.match(token['word'])
if ordinal:
return 'ordinal'
elif token.get('z') == 'A':
return 'abbreviation'
elif ' ' in token['word']:
return 'multiword'
def expand_tokens(tokens, output_file):
"""
TODO: převeď jednořádkový MWE na <phr/>
TODO: jednoduše převeď <phr/> s w=, l=, t= a <g/> na jednořádkovej zápis
v jednom řádku je word, co může obsahovat mezery, lemma stejně tak
(a nejen mezery, i tečky a další věci, co tokenizer dává samostatně),
a nakonec společnej tag – anebo možná i tolik tagů, kolik je
„hloupejch“ tokenů – no a to vše oddělený tabulátorama
takže tady budu re-tokenizovat (rekonstruovat, jak to původně hloupě
bylo) a dávat značky – buď jednu a tu samou, anebo (v případě čísel)
takovou, co odpovídá povaze tokenu (díky jednoduchým regexům)
"""
pass
# TODO: tuhle funkci už určitě používám v Modifier.bootstrap().
def compress_token(token):
"""
Return regular tokens unchanged. Return <phr/> as a single line (with tabs
to delimit columns/“attributes”, and squashing <g/>).
"""
pass
def expand_token(token, mwe_type=None, force=False):
"""
Return regular tokens unchanged. Return a token containing heterogenous
content (punctuation, spaces) as <phr/> with each token on a line of its
own, delimited by <g/> where there was no whitespace.
"""
# TODO: test zatím nepředává mwe_type
if mwe_type is None:
mwe_type = guess_type(token)
if mwe_type == 'ordinal':
ordinal = ORDINAL_NUMBER.match(token['word'])
if ordinal:
return expand_ordinal(token, ordinal)
elif mwe_type == 'abbreviation':
return expand_abbrev(token)
elif mwe_type == 'multiword':
multiword = expand_multiword(token)
if multiword:
return multiword
if force:
token.tokens = [Token.from_Token(token)]
return token
return token
def expand_abbrev(token):
abbrev = Token.from_Token(token, word=token['word'][:-1])
abbrev.trailing_whitespace = False
token.tokens = [abbrev, Token(word='.', lemma='.', k='I', x='.',
final=True, begin=token.begin)]
return token
def expand_ordinal(token, ordinal):
number = Token(word=ordinal.group(1), lemma='#' * len(ordinal.group(1)),
k='4', trailing_whitespace=False)
token.tokens = [number, Token(word='.', lemma='.', k='I', x='.',
final=True)]
# TODO: testovat ještě zpětně sestavenej token, jestli se projevuje
# trailing_whitespace a final
for t in token.tokens:
t.begin = 0
return token
def expand_multiword(token):
# kdyby tam byly nějaký apostrofy a tak… tak samozřejmě s těma taky počítat
# a chytře tokenizovat
# TODO: tokenizovat se dá zase s regexama, vždyť to znám…
# TODO: pak teda nějak nahradit chytřejším tokenizátorem
words = token['word'].split()
lemmata = token['lemma'].split()
lemma = iter(lemmata)
if len(words) == len(lemmata):
# TODO: zase, prostě nejdřív ten původní token naklonovat, ať zůstanou
# značky (tady už s nima nic vymejšlet nebudu, nejsem tagger)
token.tokens = [Token.from_Token(token, word=word, lemma=next(lemma))
for word in words]
return token
|
# from copy import deepcopy
import re
from symbols import Token
ORDINAL_NUMBER = re.compile('(\d+)\.')
# TODO: tohle nepoužívám (snad ani v testech), tak nějak rozsekat (využít
# v modification.handle_mwes) a pohřbít
def handle_mwes(tokens, args):
"""
NOTE: většinu MWE mám v úsporné formě, takže by default se nechají být
TODO: rozšiřovat se budou jenom vybraný druhy MWE, např. řadový číslovky,
čísla s mezerama/desetinama, zkratky (s tečkou) a jména (bohužel
nemám odlišená cizí)
"""
to_compress = args['compress']
to_expand = args['expand']
for xml_tag, token in tokens:
if xml_tag:
for line_number, line in xml_tag.original_lines:
yield line
else:
mwe_type = guess_type(token)
if mwe_type and mwe_type in to_compress:
yield from compress_token(token, mwe_type).plain_vertical()
elif mwe_type and mwe_type in to_expand:
yield from expand_token(token, mwe_type).plain_vertical()
else:
for line_number, line in token.original_lines:
yield line
def guess_type(token):
if token['word'].endswith('.'):
ordinal = ORDINAL_NUMBER.match(token['word'])
if ordinal:
return 'ordinal'
elif token.get('z') == 'A':
return 'abbreviation'
elif ' ' in token['word']:
return 'multiword'
def expand_tokens(tokens, output_file):
"""
TODO: převeď jednořádkový MWE na <phr/>
TODO: jednoduše převeď <phr/> s w=, l=, t= a <g/> na jednořádkovej zápis
v jednom řádku je word, co může obsahovat mezery, lemma stejně tak
(a nejen mezery, i tečky a další věci, co tokenizer dává samostatně),
a nakonec společnej tag – anebo možná i tolik tagů, kolik je
„hloupejch“ tokenů – no a to vše oddělený tabulátorama
takže tady budu re-tokenizovat (rekonstruovat, jak to původně hloupě
bylo) a dávat značky – buď jednu a tu samou, anebo (v případě čísel)
takovou, co odpovídá povaze tokenu (díky jednoduchým regexům)
"""
pass
# TODO: tuhle funkci už určitě používám v Modifier.bootstrap().
def compress_token(token):
"""
Return regular tokens unchanged. Return <phr/> as a single line (with tabs
to delimit columns/“attributes”, and squashing <g/>).
"""
pass
def expand_token(token, mwe_type=None, force=False):
"""
Return regular tokens unchanged. Return a token containing heterogenous
content (punctuation, spaces) as <phr/> with each token on a line of its
own, delimited by <g/> where there was no whitespace.
"""
# TODO: test zatím nepředává mwe_type
if mwe_type is None:
mwe_type = guess_type(token)
if mwe_type == 'ordinal':
ordinal = ORDINAL_NUMBER.match(token['word'])
if ordinal:
return expand_ordinal(token, ordinal)
elif mwe_type == 'abbreviation':
return expand_abbrev(token)
elif mwe_type == 'multiword':
multiword = expand_multiword(token)
if multiword:
return multiword
if force:
token.tokens = [Token.from_Token(token)]
return token
return token
def expand_abbrev(token):
abbrev = Token.from_Token(token, word=token['word'][:-1])
abbrev.trailing_whitespace = False
token.tokens = [abbrev, Token(word='.', lemma='.', k='I', x='.',
final=True, begin=token.begin)]
return token
def expand_ordinal(token, ordinal):
number = Token(word=ordinal.group(1), lemma='#' * len(ordinal.group(1)),
k='4', trailing_whitespace=False)
token.tokens = [number, Token(word='.', lemma='.', k='I', x='.',
final=True)]
# TODO: testovat ještě zpětně sestavenej token, jestli se projevuje
# trailing_whitespace a final
for t in token.tokens:
t.begin = 0
return token
def expand_multiword(token):
# kdyby tam byly nějaký apostrofy a tak… tak samozřejmě s těma taky počítat
# a chytře tokenizovat
# TODO: tokenizovat se dá zase s regexama, vždyť to znám…
# TODO: pak teda nějak nahradit chytřejším tokenizátorem
words = token['word'].split()
lemmata = token['lemma'].split()
lemma = iter(lemmata)
if len(words) == len(lemmata):
# TODO: zase, prostě nejdřív ten původní token naklonovat, ať zůstanou
# značky (tady už s nima nic vymejšlet nebudu, nejsem tagger)
token.tokens = [Token.from_Token(token, word=word, lemma=next(lemma))
for word in words]
return token
|
cs
| 0.963889
|
# from copy import deepcopy # TODO: tohle nepoužívám (snad ani v testech), tak nějak rozsekat (využít # v modification.handle_mwes) a pohřbít NOTE: většinu MWE mám v úsporné formě, takže by default se nechají být TODO: rozšiřovat se budou jenom vybraný druhy MWE, např. řadový číslovky, čísla s mezerama/desetinama, zkratky (s tečkou) a jména (bohužel nemám odlišená cizí) TODO: převeď jednořádkový MWE na <phr/> TODO: jednoduše převeď <phr/> s w=, l=, t= a <g/> na jednořádkovej zápis v jednom řádku je word, co může obsahovat mezery, lemma stejně tak (a nejen mezery, i tečky a další věci, co tokenizer dává samostatně), a nakonec společnej tag – anebo možná i tolik tagů, kolik je „hloupejch“ tokenů – no a to vše oddělený tabulátorama takže tady budu re-tokenizovat (rekonstruovat, jak to původně hloupě bylo) a dávat značky – buď jednu a tu samou, anebo (v případě čísel) takovou, co odpovídá povaze tokenu (díky jednoduchým regexům) # TODO: tuhle funkci už určitě používám v Modifier.bootstrap(). Return regular tokens unchanged. Return <phr/> as a single line (with tabs to delimit columns/“attributes”, and squashing <g/>). Return regular tokens unchanged. Return a token containing heterogenous content (punctuation, spaces) as <phr/> with each token on a line of its own, delimited by <g/> where there was no whitespace. # TODO: test zatím nepředává mwe_type # TODO: testovat ještě zpětně sestavenej token, jestli se projevuje # trailing_whitespace a final # kdyby tam byly nějaký apostrofy a tak… tak samozřejmě s těma taky počítat # a chytře tokenizovat # TODO: tokenizovat se dá zase s regexama, vždyť to znám… # TODO: pak teda nějak nahradit chytřejším tokenizátorem # TODO: zase, prostě nejdřív ten původní token naklonovat, ať zůstanou # značky (tady už s nima nic vymejšlet nebudu, nejsem tagger)
| 2.818866
| 3
|
source/environment/atari/episodic_life.py
|
Aethiles/ppo-pytorch
| 0
|
6628768
|
import gym
import numpy as np
from typing import Dict, Tuple
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self,
env: gym.Wrapper,
):
"""
Signals end of episode on loss of life to improve value estimation.
Slightly modified from OpenAI baselines AtariWrappers. As detailed in Mnih et al. (2015) -- aka Nature paper.
:param env: the inner environment
"""
super().__init__(env)
self.lives = 0
self.done = True
def reset(self,
**kwargs,
) -> np.ndarray:
"""
Resets the environment
:param kwargs:
:return:
"""
if self.done:
state = self.env.reset(**kwargs)
else:
state, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return state
def step(self,
action: int,
) -> Tuple[np.ndarray, float, bool, Dict]:
"""
Performs the provided action
:param action: the action taken
:return: state, reward, done, information dictionary
"""
state, reward, done, info = self.env.step(action)
self.done = done
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
done = True
self.lives = lives
return state, reward, done, info
|
import gym
import numpy as np
from typing import Dict, Tuple
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self,
env: gym.Wrapper,
):
"""
Signals end of episode on loss of life to improve value estimation.
Slightly modified from OpenAI baselines AtariWrappers. As detailed in Mnih et al. (2015) -- aka Nature paper.
:param env: the inner environment
"""
super().__init__(env)
self.lives = 0
self.done = True
def reset(self,
**kwargs,
) -> np.ndarray:
"""
Resets the environment
:param kwargs:
:return:
"""
if self.done:
state = self.env.reset(**kwargs)
else:
state, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return state
def step(self,
action: int,
) -> Tuple[np.ndarray, float, bool, Dict]:
"""
Performs the provided action
:param action: the action taken
:return: state, reward, done, information dictionary
"""
state, reward, done, info = self.env.step(action)
self.done = done
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
done = True
self.lives = lives
return state, reward, done, info
|
en
| 0.742494
|
Signals end of episode on loss of life to improve value estimation. Slightly modified from OpenAI baselines AtariWrappers. As detailed in Mnih et al. (2015) -- aka Nature paper. :param env: the inner environment Resets the environment :param kwargs: :return: Performs the provided action :param action: the action taken :return: state, reward, done, information dictionary
| 3.200706
| 3
|
augmentation/cam_augmentation.py
|
hwfan/STRAPS-3DHumanShapePose
| 1
|
6628769
|
<filename>augmentation/cam_augmentation.py
import torch
def augment_cam_t(mean_cam_t, xy_std=0.05, delta_z_range=[-5, 5]):
batch_size = mean_cam_t.shape[0]
device = mean_cam_t.device
new_cam_t = mean_cam_t.clone()
delta_tx_ty = torch.randn(batch_size, 2, device=device) * xy_std
new_cam_t[:, :2] = mean_cam_t[:, :2] + delta_tx_ty
h, l = delta_z_range
delta_tz = (h - l) * torch.rand(batch_size, device=device) + l
new_cam_t[:, 2] = mean_cam_t[:, 2] + delta_tz
return new_cam_t
|
<filename>augmentation/cam_augmentation.py
import torch
def augment_cam_t(mean_cam_t, xy_std=0.05, delta_z_range=[-5, 5]):
batch_size = mean_cam_t.shape[0]
device = mean_cam_t.device
new_cam_t = mean_cam_t.clone()
delta_tx_ty = torch.randn(batch_size, 2, device=device) * xy_std
new_cam_t[:, :2] = mean_cam_t[:, :2] + delta_tx_ty
h, l = delta_z_range
delta_tz = (h - l) * torch.rand(batch_size, device=device) + l
new_cam_t[:, 2] = mean_cam_t[:, 2] + delta_tz
return new_cam_t
|
none
| 1
| 2.554411
| 3
|
|
nicos_mlz/mira/setups/sample_ext.py
|
ebadkamil/nicos
| 0
|
6628770
|
<gh_stars>0
description = 'sample table (external control)'
group = 'lowlevel'
tango_base = 'tango://miractrl.mira.frm2:10000/mira/'
devices = dict(
co_stt = device('nicos.devices.tango.Sensor',
lowlevel = True,
tangodevice = tango_base + 'sample/phi_ext_enc',
unit = 'deg',
),
mo_stt = device('nicos.devices.tango.Motor',
lowlevel = True,
tangodevice = tango_base + 'sample/phi_ext_mot',
unit = 'deg',
precision = 0.002,
),
stt = device('nicos_mlz.mira.devices.axis.HoveringAxis',
description = 'sample two-theta angle',
abslimits = (-120, 120),
motor = 'mo_stt',
coder = 'co_stt',
startdelay = 1,
stopdelay = 2,
switch = 'air_sample_ana',
switchvalues = (0, 1),
fmtstr = '%.3f',
precision = 0.002,
),
air_mono = device('nicos.devices.generic.ManualMove',
abslimits = (0, 1),
unit = '',
lowlevel = True,
),
air_sample_ana = device('nicos_mlz.mira.devices.refcountio.MultiDigitalOutput',
outputs = ['air_sample', 'air_ana'],
unit = '',
lowlevel = True,
),
air_sample = device('nicos_mlz.mira.devices.refcountio.RefcountDigitalOutput',
tangodevice = tango_base + 'air/sample_ext',
lowlevel = True,
),
air_ana = device('nicos.devices.tango.DigitalOutput',
tangodevice = tango_base + 'air/det_ext',
lowlevel = True,
),
)
|
description = 'sample table (external control)'
group = 'lowlevel'
tango_base = 'tango://miractrl.mira.frm2:10000/mira/'
devices = dict(
co_stt = device('nicos.devices.tango.Sensor',
lowlevel = True,
tangodevice = tango_base + 'sample/phi_ext_enc',
unit = 'deg',
),
mo_stt = device('nicos.devices.tango.Motor',
lowlevel = True,
tangodevice = tango_base + 'sample/phi_ext_mot',
unit = 'deg',
precision = 0.002,
),
stt = device('nicos_mlz.mira.devices.axis.HoveringAxis',
description = 'sample two-theta angle',
abslimits = (-120, 120),
motor = 'mo_stt',
coder = 'co_stt',
startdelay = 1,
stopdelay = 2,
switch = 'air_sample_ana',
switchvalues = (0, 1),
fmtstr = '%.3f',
precision = 0.002,
),
air_mono = device('nicos.devices.generic.ManualMove',
abslimits = (0, 1),
unit = '',
lowlevel = True,
),
air_sample_ana = device('nicos_mlz.mira.devices.refcountio.MultiDigitalOutput',
outputs = ['air_sample', 'air_ana'],
unit = '',
lowlevel = True,
),
air_sample = device('nicos_mlz.mira.devices.refcountio.RefcountDigitalOutput',
tangodevice = tango_base + 'air/sample_ext',
lowlevel = True,
),
air_ana = device('nicos.devices.tango.DigitalOutput',
tangodevice = tango_base + 'air/det_ext',
lowlevel = True,
),
)
|
none
| 1
| 1.427445
| 1
|
|
src/facial_landmarks_detection.py
|
princeamitlali/computer-pointer-controller
| 0
|
6628771
|
<gh_stars>0
import cv2
import numpy as np
import logging as log
from openvino.inference_engine import IENetwork, IECore
import warnings
warnings.filterwarnings("ignore")
class FacialLandmarksClass:
def __init__(self, model_name, device, extensions=None):
self.model_weights = model_name + '.bin'
self.model_structure = model_name + '.xml'
self.device = device
self.extension = extensions
try:
self.model = IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name = next(iter(self.model.inputs))
self.input_shape = self.model.inputs[self.input_name].shape
self.output_name = next(iter(self.model.outputs))
self.output_shape = self.model.outputs[self.output_name].shape
def load_model(self):
self.model = IENetwork(self.model_structure, self.model_weights)
self.core = IECore()
sup_layer = self.core.query_network(network=self.model, device_name=self.device)
unsup_layer = [R for R in self.model.layers.keys() if R not in sup_layer]
if len(unsup_layer) != 0:
log.error("Unsupported layers found ...")
log.error("Adding specified extension")
self.core.add_extension(self.extension, self.device)
sup_layer = self.core.query_network(network=self.model, device_name=self.device)
unsup_layer = [R for R in self.model.layers.keys() if R not in sup_layer]
if len(unsup_layer) != 0:
log.error("ERROR: There are still unsupported layers after adding extension...")
exit(1)
self.net = self.core.load_network(network=self.model, device_name=self.device, num_requests=1)
def predict(self, image):
self.pre_image = self.preprocess_input(image)
self.results = self.net.infer(inputs={self.input_name: self.pre_image})
self.output = self.preprocess_output(self.results, image)
l_eye_x_min = self.output['l_eye_x_coord'] - 10
l_eye_x_max = self.output['l_eye_x_coord'] + 10
l_eye_y_min = self.output['l_eye_y_coord'] - 10
l_eye_y_max = self.output['l_eye_y_coord'] + 10
r_eye_x_min = self.output['r_eye_x_coord'] - 10
r_eye_x_max = self.output['r_eye_x_coord'] + 10
r_eye_y_min = self.output['r_eye_y_coord'] - 10
r_eye_y_max = self.output['r_eye_y_coord'] + 10
self.eye_coord = [[l_eye_x_min, l_eye_y_min, l_eye_x_max, l_eye_y_max],
[r_eye_x_min, r_eye_y_min, r_eye_x_max, r_eye_y_max]]
l_eye_img = image[l_eye_x_min:l_eye_x_max, l_eye_y_min:l_eye_y_max]
r_eye_img = image[r_eye_x_min:r_eye_x_max, r_eye_y_min:r_eye_y_max]
return l_eye_img, r_eye_img, self.eye_coord
def check_model(self):
pass
def preprocess_input(self, image):
pre_frms = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
pre_frms = pre_frms.transpose((2, 0, 1))
pre_frms = pre_frms.reshape(1, *pre_frms.shape)
return pre_frms
def preprocess_output(self, outputs, image):
outputs = outputs[self.output_name][0]
l_eye_x_coord = int(outputs[0] * image.shape[1])
l_eye_y_coord = int(outputs[1] * image.shape[0])
r_eye_x_coord = int(outputs[2] * image.shape[1])
r_eye_y_coord = int(outputs[3] * image.shape[0])
return {'left_eye_x_coordinates': l_eye_x_coord, 'left_eye_y_coordinates': l_eye_y_coord,
'rright_eye_x_coordinates': r_eye_x_coord, 'right_eye_y_coordinates': r_eye_y_coord}
|
import cv2
import numpy as np
import logging as log
from openvino.inference_engine import IENetwork, IECore
import warnings
warnings.filterwarnings("ignore")
class FacialLandmarksClass:
def __init__(self, model_name, device, extensions=None):
self.model_weights = model_name + '.bin'
self.model_structure = model_name + '.xml'
self.device = device
self.extension = extensions
try:
self.model = IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name = next(iter(self.model.inputs))
self.input_shape = self.model.inputs[self.input_name].shape
self.output_name = next(iter(self.model.outputs))
self.output_shape = self.model.outputs[self.output_name].shape
def load_model(self):
self.model = IENetwork(self.model_structure, self.model_weights)
self.core = IECore()
sup_layer = self.core.query_network(network=self.model, device_name=self.device)
unsup_layer = [R for R in self.model.layers.keys() if R not in sup_layer]
if len(unsup_layer) != 0:
log.error("Unsupported layers found ...")
log.error("Adding specified extension")
self.core.add_extension(self.extension, self.device)
sup_layer = self.core.query_network(network=self.model, device_name=self.device)
unsup_layer = [R for R in self.model.layers.keys() if R not in sup_layer]
if len(unsup_layer) != 0:
log.error("ERROR: There are still unsupported layers after adding extension...")
exit(1)
self.net = self.core.load_network(network=self.model, device_name=self.device, num_requests=1)
def predict(self, image):
self.pre_image = self.preprocess_input(image)
self.results = self.net.infer(inputs={self.input_name: self.pre_image})
self.output = self.preprocess_output(self.results, image)
l_eye_x_min = self.output['l_eye_x_coord'] - 10
l_eye_x_max = self.output['l_eye_x_coord'] + 10
l_eye_y_min = self.output['l_eye_y_coord'] - 10
l_eye_y_max = self.output['l_eye_y_coord'] + 10
r_eye_x_min = self.output['r_eye_x_coord'] - 10
r_eye_x_max = self.output['r_eye_x_coord'] + 10
r_eye_y_min = self.output['r_eye_y_coord'] - 10
r_eye_y_max = self.output['r_eye_y_coord'] + 10
self.eye_coord = [[l_eye_x_min, l_eye_y_min, l_eye_x_max, l_eye_y_max],
[r_eye_x_min, r_eye_y_min, r_eye_x_max, r_eye_y_max]]
l_eye_img = image[l_eye_x_min:l_eye_x_max, l_eye_y_min:l_eye_y_max]
r_eye_img = image[r_eye_x_min:r_eye_x_max, r_eye_y_min:r_eye_y_max]
return l_eye_img, r_eye_img, self.eye_coord
def check_model(self):
pass
def preprocess_input(self, image):
pre_frms = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
pre_frms = pre_frms.transpose((2, 0, 1))
pre_frms = pre_frms.reshape(1, *pre_frms.shape)
return pre_frms
def preprocess_output(self, outputs, image):
outputs = outputs[self.output_name][0]
l_eye_x_coord = int(outputs[0] * image.shape[1])
l_eye_y_coord = int(outputs[1] * image.shape[0])
r_eye_x_coord = int(outputs[2] * image.shape[1])
r_eye_y_coord = int(outputs[3] * image.shape[0])
return {'left_eye_x_coordinates': l_eye_x_coord, 'left_eye_y_coordinates': l_eye_y_coord,
'rright_eye_x_coordinates': r_eye_x_coord, 'right_eye_y_coordinates': r_eye_y_coord}
|
none
| 1
| 2.227956
| 2
|
|
openpype/hosts/photoshop/plugins/publish/validate_naming.py
|
2-REC-forks/OpenPype
| 1
|
6628772
|
<gh_stars>1-10
import re
import pyblish.api
import openpype.api
from avalon import photoshop
class ValidateNamingRepair(pyblish.api.Action):
"""Repair the instance asset."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
invalid_chars, replace_char = plugin.get_replace_chars()
self.log.info("{} --- {}".format(invalid_chars, replace_char))
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = photoshop.stub()
for instance in instances:
self.log.info("validate_naming instance {}".format(instance))
metadata = stub.read(instance[0])
self.log.info("metadata instance {}".format(metadata))
layer_name = None
if metadata.get("uuid"):
layer_data = stub.get_layer(metadata["uuid"])
self.log.info("layer_data {}".format(layer_data))
if layer_data:
layer_name = re.sub(invalid_chars,
replace_char,
layer_data.name)
stub.rename_layer(instance.data["uuid"], layer_name)
subset_name = re.sub(invalid_chars, replace_char,
instance.data["name"])
instance[0].Name = layer_name or subset_name
metadata["subset"] = subset_name
stub.imprint(instance[0], metadata)
return True
class ValidateNaming(pyblish.api.InstancePlugin):
"""Validate the instance name.
Spaces in names are not allowed. Will be replace with underscores.
"""
label = "Validate Naming"
hosts = ["photoshop"]
order = openpype.api.ValidateContentsOrder
families = ["image"]
actions = [ValidateNamingRepair]
# configured by Settings
invalid_chars = ''
replace_char = ''
def process(self, instance):
help_msg = ' Use Repair action (A) in Pyblish to fix it.'
msg = "Name \"{}\" is not allowed.{}".format(instance.data["name"],
help_msg)
assert not re.search(self.invalid_chars, instance.data["name"]), msg
msg = "Subset \"{}\" is not allowed.{}".format(instance.data["subset"],
help_msg)
assert not re.search(self.invalid_chars, instance.data["subset"]), msg
@classmethod
def get_replace_chars(cls):
"""Pass values configured in Settings for Repair."""
return cls.invalid_chars, cls.replace_char
|
import re
import pyblish.api
import openpype.api
from avalon import photoshop
class ValidateNamingRepair(pyblish.api.Action):
"""Repair the instance asset."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
invalid_chars, replace_char = plugin.get_replace_chars()
self.log.info("{} --- {}".format(invalid_chars, replace_char))
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = photoshop.stub()
for instance in instances:
self.log.info("validate_naming instance {}".format(instance))
metadata = stub.read(instance[0])
self.log.info("metadata instance {}".format(metadata))
layer_name = None
if metadata.get("uuid"):
layer_data = stub.get_layer(metadata["uuid"])
self.log.info("layer_data {}".format(layer_data))
if layer_data:
layer_name = re.sub(invalid_chars,
replace_char,
layer_data.name)
stub.rename_layer(instance.data["uuid"], layer_name)
subset_name = re.sub(invalid_chars, replace_char,
instance.data["name"])
instance[0].Name = layer_name or subset_name
metadata["subset"] = subset_name
stub.imprint(instance[0], metadata)
return True
class ValidateNaming(pyblish.api.InstancePlugin):
"""Validate the instance name.
Spaces in names are not allowed. Will be replace with underscores.
"""
label = "Validate Naming"
hosts = ["photoshop"]
order = openpype.api.ValidateContentsOrder
families = ["image"]
actions = [ValidateNamingRepair]
# configured by Settings
invalid_chars = ''
replace_char = ''
def process(self, instance):
help_msg = ' Use Repair action (A) in Pyblish to fix it.'
msg = "Name \"{}\" is not allowed.{}".format(instance.data["name"],
help_msg)
assert not re.search(self.invalid_chars, instance.data["name"]), msg
msg = "Subset \"{}\" is not allowed.{}".format(instance.data["subset"],
help_msg)
assert not re.search(self.invalid_chars, instance.data["subset"]), msg
@classmethod
def get_replace_chars(cls):
"""Pass values configured in Settings for Repair."""
return cls.invalid_chars, cls.replace_char
|
en
| 0.792429
|
Repair the instance asset. # Get the errored instances # Apply pyblish.logic to get the instances for the plug-in Validate the instance name. Spaces in names are not allowed. Will be replace with underscores. # configured by Settings Pass values configured in Settings for Repair.
| 2.380908
| 2
|
venv/lib/python2.7/site-packages/jsonh/__init__.py
|
Gabik077/domo_home
| 1
|
6628773
|
<reponame>Gabik077/domo_home<filename>venv/lib/python2.7/site-packages/jsonh/__init__.py
from .jsonh import load, loads, dump, dumps, pack, unpack, compress, uncompress
|
from .jsonh import load, loads, dump, dumps, pack, unpack, compress, uncompress
|
none
| 1
| 1.092001
| 1
|
|
tests/test_visitors/test_ast/test_complexity/test_counts/test_try_body_length.py
|
Andrka/wemake-python-styleguide
| 1
|
6628774
|
<gh_stars>1-10
import pytest
from wemake_python_styleguide.violations.complexity import (
TooLongTryBodyViolation,
)
from wemake_python_styleguide.visitors.ast.complexity.counts import (
TryExceptVisitor,
)
try_without_except = """
try:
{0}
finally:
...
"""
simple_try_except = """
try:
{0}
except ValueError:
...
"""
try_except_with_else = """
try:
{0}
except ValueError:
...
else:
...
"""
full_except_with_else = """
try:
{0}
except ValueError:
...
else:
...
finally:
...
"""
# Wrong:
wrong_try_without_except = """
try:
...
finally:
{0}
"""
wrong_simple_try_except = """
try:
...
except ValueError:
{0}
"""
wrong_try_except_with_else = """
try:
...
except ValueError:
...
else:
{0}
"""
@pytest.mark.parametrize('statements', [
'print(1)\n print(2)',
'm.print(1)\n m.print(2)\n m.print(3)',
'm = 1\n p = 2\n c = 3\n x = 4',
])
@pytest.mark.parametrize('code', [
try_without_except,
simple_try_except,
try_except_with_else,
full_except_with_else,
])
def test_try_body_count_default(
assert_errors,
assert_error_text,
parse_ast_tree,
default_options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
visitor = TryExceptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [TooLongTryBodyViolation])
@pytest.mark.parametrize('statements', [
'print(1)\n print(2)',
'm.print(1)\n m.print(2)',
'm = 1\n p = 2',
])
@pytest.mark.parametrize('code', [
try_without_except,
simple_try_except,
try_except_with_else,
full_except_with_else,
])
def test_try_body_wrong_custom_options(
assert_errors,
assert_error_text,
parse_ast_tree,
options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
option_values = options(max_try_body_length=1)
visitor = TryExceptVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [TooLongTryBodyViolation])
assert_error_text(
visitor, '2', baseline=option_values.max_try_body_length,
)
@pytest.mark.parametrize('statements', [
'print(1)\n print(2)',
'm.print(1)\n m.print(2)',
'm = 1\n p = 2',
])
@pytest.mark.parametrize('code', [
try_without_except,
simple_try_except,
try_except_with_else,
full_except_with_else,
])
def test_try_body_count_custom_options(
assert_errors,
parse_ast_tree,
options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
option_values = options(max_try_body_length=2)
visitor = TryExceptVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('statements', [
'print(1)',
'm.print(1)',
'm = 1',
])
@pytest.mark.parametrize('code', [
try_without_except,
simple_try_except,
try_except_with_else,
full_except_with_else,
])
def test_try_body_correct_default(
assert_errors,
parse_ast_tree,
default_options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
visitor = TryExceptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('statements', [
'print(1)',
'm.print(1)',
'm = 1',
'print(1)',
'm.print(1)',
'm = 1',
])
@pytest.mark.parametrize('code', [
wrong_simple_try_except,
wrong_try_except_with_else,
wrong_try_without_except,
])
def test_try_body_different_nodes(
assert_errors,
parse_ast_tree,
default_options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
visitor = TryExceptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
|
import pytest
from wemake_python_styleguide.violations.complexity import (
TooLongTryBodyViolation,
)
from wemake_python_styleguide.visitors.ast.complexity.counts import (
TryExceptVisitor,
)
try_without_except = """
try:
{0}
finally:
...
"""
simple_try_except = """
try:
{0}
except ValueError:
...
"""
try_except_with_else = """
try:
{0}
except ValueError:
...
else:
...
"""
full_except_with_else = """
try:
{0}
except ValueError:
...
else:
...
finally:
...
"""
# Wrong:
wrong_try_without_except = """
try:
...
finally:
{0}
"""
wrong_simple_try_except = """
try:
...
except ValueError:
{0}
"""
wrong_try_except_with_else = """
try:
...
except ValueError:
...
else:
{0}
"""
@pytest.mark.parametrize('statements', [
'print(1)\n print(2)',
'm.print(1)\n m.print(2)\n m.print(3)',
'm = 1\n p = 2\n c = 3\n x = 4',
])
@pytest.mark.parametrize('code', [
try_without_except,
simple_try_except,
try_except_with_else,
full_except_with_else,
])
def test_try_body_count_default(
assert_errors,
assert_error_text,
parse_ast_tree,
default_options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
visitor = TryExceptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [TooLongTryBodyViolation])
@pytest.mark.parametrize('statements', [
'print(1)\n print(2)',
'm.print(1)\n m.print(2)',
'm = 1\n p = 2',
])
@pytest.mark.parametrize('code', [
try_without_except,
simple_try_except,
try_except_with_else,
full_except_with_else,
])
def test_try_body_wrong_custom_options(
assert_errors,
assert_error_text,
parse_ast_tree,
options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
option_values = options(max_try_body_length=1)
visitor = TryExceptVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [TooLongTryBodyViolation])
assert_error_text(
visitor, '2', baseline=option_values.max_try_body_length,
)
@pytest.mark.parametrize('statements', [
'print(1)\n print(2)',
'm.print(1)\n m.print(2)',
'm = 1\n p = 2',
])
@pytest.mark.parametrize('code', [
try_without_except,
simple_try_except,
try_except_with_else,
full_except_with_else,
])
def test_try_body_count_custom_options(
assert_errors,
parse_ast_tree,
options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
option_values = options(max_try_body_length=2)
visitor = TryExceptVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('statements', [
'print(1)',
'm.print(1)',
'm = 1',
])
@pytest.mark.parametrize('code', [
try_without_except,
simple_try_except,
try_except_with_else,
full_except_with_else,
])
def test_try_body_correct_default(
assert_errors,
parse_ast_tree,
default_options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
visitor = TryExceptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('statements', [
'print(1)',
'm.print(1)',
'm = 1',
'print(1)',
'm.print(1)',
'm = 1',
])
@pytest.mark.parametrize('code', [
wrong_simple_try_except,
wrong_try_except_with_else,
wrong_try_without_except,
])
def test_try_body_different_nodes(
assert_errors,
parse_ast_tree,
default_options,
code,
statements,
):
"""Testing that default settings raise a warning."""
tree = parse_ast_tree(code.format(statements))
visitor = TryExceptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
|
en
| 0.647767
|
try: {0} finally: ... try: {0} except ValueError: ... try: {0} except ValueError: ... else: ... try: {0} except ValueError: ... else: ... finally: ... # Wrong: try: ... finally: {0} try: ... except ValueError: {0} try: ... except ValueError: ... else: {0} Testing that default settings raise a warning. Testing that default settings raise a warning. Testing that default settings raise a warning. Testing that default settings raise a warning. Testing that default settings raise a warning.
| 2.465746
| 2
|
hops/distribute/allreduce.py
|
tkakantousis/hops-util-py
| 0
|
6628775
|
"""
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import os
from hops import hdfs as hopshdfs
from hops import tensorboard
from hops import devices
from hops import util
import pydoop.hdfs
import threading
import datetime
import socket
import json
from . import allreduce_reservation
run_id = 0
def _launch(sc, map_fun, local_logdir=False, name="no-name"):
"""
Args:
sc:
map_fun:
local_logdir:
name:
Returns:
"""
global run_id
app_id = str(sc.applicationId)
num_executions = util.num_executors()
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup("CollectiveAllReduceStrategy", "{} | Distributed Training".format(name))
server = allreduce_reservation.Server(num_executions)
server_addr = server.start()
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, local_logdir, server_addr))
logdir = _get_logdir(app_id)
path_to_metric = logdir + '/metric'
if pydoop.hdfs.path.exists(path_to_metric):
with pydoop.hdfs.open(path_to_metric, "r") as fi:
metric = float(fi.read())
fi.close()
return metric, logdir
print('Finished Experiment \n')
return None, logdir
def _get_logdir(app_id):
"""
Args:
app_id:
Returns:
"""
global run_id
return hopshdfs._get_experiments_dir() + '/' + app_id + '/collective_all_reduce/run.' + str(run_id)
def _prepare_func(app_id, run_id, map_fun, local_logdir, server_addr):
"""
Args:
app_id:
run_id:
map_fun:
local_logdir:
server_addr:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
task_index = None
try:
host = util._get_ip_address()
tmp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_socket.bind(('', 0))
port = tmp_socket.getsockname()[1]
client = allreduce_reservation.Client(server_addr)
host_port = host + ":" + str(port)
client.register({"worker": host_port, "index": executor_num})
cluster = client.await_reservations()
tmp_socket.close()
client.close()
task_index = _find_index(host_port, cluster)
cluster["task"] = {"type": "worker", "index": task_index}
print(cluster)
os.environ["TF_CONFIG"] = json.dumps(cluster)
if task_index == 0:
hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs._create_directories(app_id, run_id, None, 'collective_all_reduce')
pydoop.hdfs.dump('', os.environ['EXEC_LOGFILE'], user=hopshdfs.project_user())
hopshdfs._init_logger()
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
gpu_str = '\nChecking for GPUs in the environment' + devices._get_gpu_info()
if task_index == 0:
hopshdfs.log(gpu_str)
print(gpu_str)
print('-------------------------------------------------------')
print('Started running task \n')
if task_index == 0:
hopshdfs.log('Started running task')
task_start = datetime.datetime.now()
retval = map_fun()
if task_index == 0:
if retval:
_handle_return(retval, hdfs_exec_logdir)
task_end = datetime.datetime.now()
time_str = 'Finished task - took ' + util._time_diff(task_start, task_end)
print('\n' + time_str)
print('-------------------------------------------------------')
if task_index == 0:
hopshdfs.log(time_str)
except:
raise
finally:
if task_index == 0:
if local_logdir:
local_tb = tensorboard.local_logdir_path
util._store_local_tensorboard(local_tb, hdfs_exec_logdir)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join()
_cleanup(tb_hdfs_path)
return _wrapper_fun
def _cleanup(tb_hdfs_path):
"""
Args:
tb_hdfs_path:
Returns:
"""
handle = hopshdfs.get()
if not tb_hdfs_path == None and not tb_hdfs_path == '' and handle.exists(tb_hdfs_path):
handle.delete(tb_hdfs_path)
hopshdfs._kill_logger()
def _handle_return(val, hdfs_exec_logdir):
"""
Args:
val:
hdfs_exec_logdir:
Returns:
"""
try:
test = int(val)
except:
raise ValueError('Your function needs to return a metric (number) which should be maximized or minimized')
metric_file = hdfs_exec_logdir + '/metric'
fs_handle = hopshdfs.get_fs()
try:
fd = fs_handle.open_file(metric_file, mode='w')
except:
fd = fs_handle.open_file(metric_file, flags='w')
fd.write(str(float(val)).encode())
fd.flush()
fd.close()
def _find_index(host_port, cluster_spec):
"""
Args:
host_port:
cluster_spec:
Returns:
"""
index = 0
for entry in cluster_spec["cluster"]["worker"]:
if entry == host_port:
return index
else:
index = index + 1
|
"""
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import os
from hops import hdfs as hopshdfs
from hops import tensorboard
from hops import devices
from hops import util
import pydoop.hdfs
import threading
import datetime
import socket
import json
from . import allreduce_reservation
run_id = 0
def _launch(sc, map_fun, local_logdir=False, name="no-name"):
"""
Args:
sc:
map_fun:
local_logdir:
name:
Returns:
"""
global run_id
app_id = str(sc.applicationId)
num_executions = util.num_executors()
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup("CollectiveAllReduceStrategy", "{} | Distributed Training".format(name))
server = allreduce_reservation.Server(num_executions)
server_addr = server.start()
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, local_logdir, server_addr))
logdir = _get_logdir(app_id)
path_to_metric = logdir + '/metric'
if pydoop.hdfs.path.exists(path_to_metric):
with pydoop.hdfs.open(path_to_metric, "r") as fi:
metric = float(fi.read())
fi.close()
return metric, logdir
print('Finished Experiment \n')
return None, logdir
def _get_logdir(app_id):
"""
Args:
app_id:
Returns:
"""
global run_id
return hopshdfs._get_experiments_dir() + '/' + app_id + '/collective_all_reduce/run.' + str(run_id)
def _prepare_func(app_id, run_id, map_fun, local_logdir, server_addr):
"""
Args:
app_id:
run_id:
map_fun:
local_logdir:
server_addr:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
task_index = None
try:
host = util._get_ip_address()
tmp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_socket.bind(('', 0))
port = tmp_socket.getsockname()[1]
client = allreduce_reservation.Client(server_addr)
host_port = host + ":" + str(port)
client.register({"worker": host_port, "index": executor_num})
cluster = client.await_reservations()
tmp_socket.close()
client.close()
task_index = _find_index(host_port, cluster)
cluster["task"] = {"type": "worker", "index": task_index}
print(cluster)
os.environ["TF_CONFIG"] = json.dumps(cluster)
if task_index == 0:
hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs._create_directories(app_id, run_id, None, 'collective_all_reduce')
pydoop.hdfs.dump('', os.environ['EXEC_LOGFILE'], user=hopshdfs.project_user())
hopshdfs._init_logger()
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
gpu_str = '\nChecking for GPUs in the environment' + devices._get_gpu_info()
if task_index == 0:
hopshdfs.log(gpu_str)
print(gpu_str)
print('-------------------------------------------------------')
print('Started running task \n')
if task_index == 0:
hopshdfs.log('Started running task')
task_start = datetime.datetime.now()
retval = map_fun()
if task_index == 0:
if retval:
_handle_return(retval, hdfs_exec_logdir)
task_end = datetime.datetime.now()
time_str = 'Finished task - took ' + util._time_diff(task_start, task_end)
print('\n' + time_str)
print('-------------------------------------------------------')
if task_index == 0:
hopshdfs.log(time_str)
except:
raise
finally:
if task_index == 0:
if local_logdir:
local_tb = tensorboard.local_logdir_path
util._store_local_tensorboard(local_tb, hdfs_exec_logdir)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join()
_cleanup(tb_hdfs_path)
return _wrapper_fun
def _cleanup(tb_hdfs_path):
"""
Args:
tb_hdfs_path:
Returns:
"""
handle = hopshdfs.get()
if not tb_hdfs_path == None and not tb_hdfs_path == '' and handle.exists(tb_hdfs_path):
handle.delete(tb_hdfs_path)
hopshdfs._kill_logger()
def _handle_return(val, hdfs_exec_logdir):
"""
Args:
val:
hdfs_exec_logdir:
Returns:
"""
try:
test = int(val)
except:
raise ValueError('Your function needs to return a metric (number) which should be maximized or minimized')
metric_file = hdfs_exec_logdir + '/metric'
fs_handle = hopshdfs.get_fs()
try:
fd = fs_handle.open_file(metric_file, mode='w')
except:
fd = fs_handle.open_file(metric_file, flags='w')
fd.write(str(float(val)).encode())
fd.flush()
fd.close()
def _find_index(host_port, cluster_spec):
"""
Args:
host_port:
cluster_spec:
Returns:
"""
index = 0
for entry in cluster_spec["cluster"]["worker"]:
if entry == host_port:
return index
else:
index = index + 1
|
en
| 0.752987
|
Utility functions to retrieve information about available services and setting up security for the Hops platform. These utils facilitates development by hiding complexity for programs interacting with Hops services. Args: sc: map_fun: local_logdir: name: Returns: #Each TF task should be run on 1 executor #Make SparkUI intuitive by grouping jobs #Force execution on executor, since GPU is located on executor Args: app_id: Returns: Args: app_id: run_id: map_fun: local_logdir: server_addr: Returns: Args: iter: Returns: Args: tb_hdfs_path: Returns: Args: val: hdfs_exec_logdir: Returns: Args: host_port: cluster_spec: Returns:
| 2.482635
| 2
|
aws_s3_resource/s3_bucket.py
|
Quakingaspen-codehub/aws_s3_resource
| 0
|
6628776
|
from . import S3
from .s3_object import S3Object
from botocore.client import ClientError
import uuid
import os
import boto3
class S3Bucket(S3):
@staticmethod
def create_name(bucket_prefix, num_random_chars=6):
if num_random_chars < NumberRandomCharsException.min_num_chars or \
num_random_chars > NumberRandomCharsException.max_num_chars:
raise NumberRandomCharsException
bucket_name = ''.join([bucket_prefix, '-', str(uuid.uuid4()).replace('-', '')[:num_random_chars]])
if len(bucket_name) < NumberCharsBucketNameException.min_num_chars or \
len(bucket_name) > NumberCharsBucketNameException.max_num_chars:
raise NumberCharsBucketNameException(bucket_name, len(bucket_name))
return bucket_name
@classmethod
def create(cls, bucket_name):
# Take the region from the config file
session = boto3.session.Session()
region = session.region_name
# Create the bucket
cls.resource.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': os.environ['AWS_REGION_NAME']})
@classmethod
def create_with_random_name_suffix(cls, bucket_name, num_random_chars=6):
while True:
try:
response = cls.create(bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'BucketAlreadyExists':
bucket_name = cls.create_name(bucket_name)
else:
raise e
else:
return bucket_name
@classmethod
def delete(cls, bucket_name):
return cls.resource.Bucket(bucket_name).delete()
@classmethod
def list_all(cls, as_generator):
buckets = cls.resource.buckets.all()
if as_generator:
return buckets
return list(buckets)
@classmethod
def list_all_names(cls, as_generator):
buckets = cls.list_all(as_generator=True)
buckets_names = (bucket.name for bucket in buckets)
if as_generator:
return buckets_names
return list(buckets_names)
@classmethod
def is_available(cls, bucket_name):
bucket = cls.resource.Bucket(bucket_name)
return bucket in cls.resource.buckets.all()
@classmethod
def num_buckets(cls):
return cls.list_all(False)
@classmethod
def download(cls, bucket_name, folder_name):
# Create the root folder
os.makedirs(folder_name)
bucket = cls.resource.Bucket(bucket_name)
# A set contains all created folders
created_folders = set()
for my_bucket_object in bucket.objects.all():
# Folder or file path
path = os.path.join(folder_name, my_bucket_object.key)
# If the object name end with special character "/", it will definitely be a folder
if my_bucket_object.key.endswith('/'):
# Continue if folder already created
if my_bucket_object.key in created_folders:
continue
# Create the folder tree and add it to the created set
os.makedirs(path)
created_folders.add(my_bucket_object.key)
# Download the object if it is a file
else:
file_dict = {
'file_path': path
}
S3Object.download(bucket_name, my_bucket_object.key, file_dict)
|
from . import S3
from .s3_object import S3Object
from botocore.client import ClientError
import uuid
import os
import boto3
class S3Bucket(S3):
@staticmethod
def create_name(bucket_prefix, num_random_chars=6):
if num_random_chars < NumberRandomCharsException.min_num_chars or \
num_random_chars > NumberRandomCharsException.max_num_chars:
raise NumberRandomCharsException
bucket_name = ''.join([bucket_prefix, '-', str(uuid.uuid4()).replace('-', '')[:num_random_chars]])
if len(bucket_name) < NumberCharsBucketNameException.min_num_chars or \
len(bucket_name) > NumberCharsBucketNameException.max_num_chars:
raise NumberCharsBucketNameException(bucket_name, len(bucket_name))
return bucket_name
@classmethod
def create(cls, bucket_name):
# Take the region from the config file
session = boto3.session.Session()
region = session.region_name
# Create the bucket
cls.resource.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': os.environ['AWS_REGION_NAME']})
@classmethod
def create_with_random_name_suffix(cls, bucket_name, num_random_chars=6):
while True:
try:
response = cls.create(bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'BucketAlreadyExists':
bucket_name = cls.create_name(bucket_name)
else:
raise e
else:
return bucket_name
@classmethod
def delete(cls, bucket_name):
return cls.resource.Bucket(bucket_name).delete()
@classmethod
def list_all(cls, as_generator):
buckets = cls.resource.buckets.all()
if as_generator:
return buckets
return list(buckets)
@classmethod
def list_all_names(cls, as_generator):
buckets = cls.list_all(as_generator=True)
buckets_names = (bucket.name for bucket in buckets)
if as_generator:
return buckets_names
return list(buckets_names)
@classmethod
def is_available(cls, bucket_name):
bucket = cls.resource.Bucket(bucket_name)
return bucket in cls.resource.buckets.all()
@classmethod
def num_buckets(cls):
return cls.list_all(False)
@classmethod
def download(cls, bucket_name, folder_name):
# Create the root folder
os.makedirs(folder_name)
bucket = cls.resource.Bucket(bucket_name)
# A set contains all created folders
created_folders = set()
for my_bucket_object in bucket.objects.all():
# Folder or file path
path = os.path.join(folder_name, my_bucket_object.key)
# If the object name end with special character "/", it will definitely be a folder
if my_bucket_object.key.endswith('/'):
# Continue if folder already created
if my_bucket_object.key in created_folders:
continue
# Create the folder tree and add it to the created set
os.makedirs(path)
created_folders.add(my_bucket_object.key)
# Download the object if it is a file
else:
file_dict = {
'file_path': path
}
S3Object.download(bucket_name, my_bucket_object.key, file_dict)
|
en
| 0.880816
|
# Take the region from the config file # Create the bucket # Create the root folder # A set contains all created folders # Folder or file path # If the object name end with special character "/", it will definitely be a folder # Continue if folder already created # Create the folder tree and add it to the created set # Download the object if it is a file
| 2.465923
| 2
|
homeassistant/components/smart_meter_texas/__init__.py
|
basicpail/core
| 5
|
6628777
|
"""The Smart Meter Texas integration."""
import asyncio
import logging
import ssl
from smart_meter_texas import Account, Client, ClientSSLContext
from smart_meter_texas.exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import (
DataUpdateCoordinator,
Debouncer,
UpdateFailed,
)
from .const import (
DATA_COORDINATOR,
DATA_SMART_METER,
DEBOUNCE_COOLDOWN,
DOMAIN,
SCAN_INTERVAL,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Smart Meter Texas from a config entry."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
account = Account(username, password)
client_ssl_context = ClientSSLContext()
ssl_context = await client_ssl_context.get_ssl_context()
smart_meter_texas_data = SmartMeterTexasData(hass, entry, account, ssl_context)
try:
await smart_meter_texas_data.client.authenticate()
except SmartMeterTexasAuthError:
_LOGGER.error("Username or password was not accepted")
return False
except asyncio.TimeoutError as error:
raise ConfigEntryNotReady from error
await smart_meter_texas_data.setup()
async def async_update_data():
_LOGGER.debug("Fetching latest data")
await smart_meter_texas_data.read_meters()
return smart_meter_texas_data
# Use a DataUpdateCoordinator to manage the updates. This is due to the
# Smart Meter Texas API which takes around 30 seconds to read a meter.
# This avoids Home Assistant from complaining about the component taking
# too long to update.
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="Smart Meter Texas",
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
hass, _LOGGER, cooldown=DEBOUNCE_COOLDOWN, immediate=True
),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_COORDINATOR: coordinator,
DATA_SMART_METER: smart_meter_texas_data,
}
asyncio.create_task(coordinator.async_refresh())
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
class SmartMeterTexasData:
"""Manages coordinatation of API data updates."""
def __init__(
self,
hass: HomeAssistant,
entry: ConfigEntry,
account: Account,
ssl_context: ssl.SSLContext,
) -> None:
"""Initialize the data coordintator."""
self._entry = entry
self.account = account
websession = aiohttp_client.async_get_clientsession(hass)
self.client = Client(websession, account, ssl_context=ssl_context)
self.meters: list = []
async def setup(self):
"""Fetch all of the user's meters."""
self.meters = await self.account.fetch_meters(self.client)
_LOGGER.debug("Discovered %s meter(s)", len(self.meters))
async def read_meters(self):
"""Read each meter."""
for meter in self.meters:
try:
await meter.read_meter(self.client)
except (SmartMeterTexasAPIError, SmartMeterTexasAuthError) as error:
raise UpdateFailed(error) from error
return self.meters
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
"""The Smart Meter Texas integration."""
import asyncio
import logging
import ssl
from smart_meter_texas import Account, Client, ClientSSLContext
from smart_meter_texas.exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import (
DataUpdateCoordinator,
Debouncer,
UpdateFailed,
)
from .const import (
DATA_COORDINATOR,
DATA_SMART_METER,
DEBOUNCE_COOLDOWN,
DOMAIN,
SCAN_INTERVAL,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Smart Meter Texas from a config entry."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
account = Account(username, password)
client_ssl_context = ClientSSLContext()
ssl_context = await client_ssl_context.get_ssl_context()
smart_meter_texas_data = SmartMeterTexasData(hass, entry, account, ssl_context)
try:
await smart_meter_texas_data.client.authenticate()
except SmartMeterTexasAuthError:
_LOGGER.error("Username or password was not accepted")
return False
except asyncio.TimeoutError as error:
raise ConfigEntryNotReady from error
await smart_meter_texas_data.setup()
async def async_update_data():
_LOGGER.debug("Fetching latest data")
await smart_meter_texas_data.read_meters()
return smart_meter_texas_data
# Use a DataUpdateCoordinator to manage the updates. This is due to the
# Smart Meter Texas API which takes around 30 seconds to read a meter.
# This avoids Home Assistant from complaining about the component taking
# too long to update.
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="Smart Meter Texas",
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
hass, _LOGGER, cooldown=DEBOUNCE_COOLDOWN, immediate=True
),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_COORDINATOR: coordinator,
DATA_SMART_METER: smart_meter_texas_data,
}
asyncio.create_task(coordinator.async_refresh())
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
class SmartMeterTexasData:
"""Manages coordinatation of API data updates."""
def __init__(
self,
hass: HomeAssistant,
entry: ConfigEntry,
account: Account,
ssl_context: ssl.SSLContext,
) -> None:
"""Initialize the data coordintator."""
self._entry = entry
self.account = account
websession = aiohttp_client.async_get_clientsession(hass)
self.client = Client(websession, account, ssl_context=ssl_context)
self.meters: list = []
async def setup(self):
"""Fetch all of the user's meters."""
self.meters = await self.account.fetch_meters(self.client)
_LOGGER.debug("Discovered %s meter(s)", len(self.meters))
async def read_meters(self):
"""Read each meter."""
for meter in self.meters:
try:
await meter.read_meter(self.client)
except (SmartMeterTexasAPIError, SmartMeterTexasAuthError) as error:
raise UpdateFailed(error) from error
return self.meters
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
en
| 0.779988
|
The Smart Meter Texas integration. Set up Smart Meter Texas from a config entry. # Use a DataUpdateCoordinator to manage the updates. This is due to the # Smart Meter Texas API which takes around 30 seconds to read a meter. # This avoids Home Assistant from complaining about the component taking # too long to update. Manages coordinatation of API data updates. Initialize the data coordintator. Fetch all of the user's meters. Read each meter. Unload a config entry.
| 2.221124
| 2
|
train.py
|
sriyash421/HEP-AnomalyDetection
| 0
|
6628778
|
<filename>train.py<gh_stars>0
import os
import torch
import pandas as pd
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from torch.utils.data import random_split
from models import Classifier, AutoEncoder
import numpy as np
from utils import print_dict, get_distance_matrix
import tqdm
from sklearn.metrics import roc_auc_score, accuracy_score, mean_squared_error, log_loss, roc_curve
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
class Model(pl.LightningModule):
def __init__(self,
momentum,
nesterov,
learn_rate,
learn_rate_decay,
classifier_wt,
encoder_wt,
optimizer,
classifier_nodes,
encoder_nodes,
dropout,
activation,
input_size,
output_size,
save_tb_logs,
log_path,
K,
inf_batch_size
):
'''create a training class'''
super(pl.LightningModule, self).__init__()
self.classifier = Classifier(
classifier_nodes, dropout, activation, input_size, output_size)
self.encoder = AutoEncoder(
encoder_nodes, dropout, activation, input_size+sum(classifier_nodes)+output_size)
self.m = encoder_nodes[-1]
self.example_input_array = torch.ones((1, input_size))
self.momentum = momentum
self.nesterov = nesterov
self.learn_rate = learn_rate
self.learn_rate_decay = learn_rate_decay
self.classifier_wt = classifier_wt
self.encoder_wt = encoder_wt
self.optimizer_ = optimizer
self.encoder_loss_fn = torch.nn.MSELoss()
self.classifier_loss_fn = torch.nn.CrossEntropyLoss()
self.K = K
self.inf_batch_size = inf_batch_size
self.log_path = log_path
self.num_bkg = output_size
def forward(self, input):
'''get output'''
predictions, features = self.classifier(input)
recon_features, latent_rep = self.encoder(features.detach())
return predictions, features, recon_features, latent_rep
def configure_optimizers(self):
'''create optimizer and scheduler'''
optimizer = None
if self.optimizer_ == 'adam':
optimizer = torch.optim.Adam(self.parameters(
), lr=self.learn_rate, betas=[self.momentum, 0.999])
else:
optimizer = torch.optim.SGD(
self.parameters(), lr=self.learn_rate, momentum=self.momentum, nesterov=self.nesterov)
def scheduler_fn(epoch): return 1./(1+epoch*self.learn_rate_decay)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, scheduler_fn)
return [optimizer], [scheduler]
def step_helper(self, batch):
inputs, targets = batch
predictions, features, recon_features, latent_rep = self(inputs)
classifier_loss = self.classifier_loss_fn(
predictions, (targets-1).long())
recon_loss = self.encoder_loss_fn(features, recon_features)
total_loss = self.classifier_wt*classifier_loss+self.encoder_wt*recon_loss
accuracy = (torch.argmax(predictions, dim=1)
== (targets-1)).float().mean()
return total_loss, classifier_loss, recon_loss, accuracy, latent_rep
def training_step(self, batch, batch_idx):
'''executed during training'''
total_loss, classifier_loss, recon_loss, accuracy, _ = self.step_helper(
batch)
self.log('train_loss', total_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('train_acc', accuracy, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('train_classifier_loss', classifier_loss,
on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log('train_recon_loss', recon_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
return {'loss': total_loss, 'acc': accuracy}
def validation_step(self, batch, batch_idx):
'''executed during validation'''
total_loss, classifier_loss, recon_loss, accuracy, _ = self.step_helper(
batch)
self.log('val_loss', total_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('val_acc', accuracy, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('val_classifier_loss', classifier_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('val_recon_loss', recon_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
return {'val_loss': total_loss, 'val_acc': accuracy}
def on_test_epoch_start(self):
self.eval()
training_samples = []
for _, batch in enumerate(self.train_dataloader()):
_, _, _, features = self(batch[0].to(self.device))
training_samples.append(features)
for _, batch in enumerate(self.val_dataloader()):
_, _, _, features = self(batch[0].to(self.device))
training_samples.append(features)
self.training_features = torch.cat(training_samples, axis=0)
self.test_features = []
self.test_target = []
def test_step(self, batch, batch_idx):
inputs, targets = batch
predictions, features, recon_features, latent_rep = self(inputs)
self.test_features.append(latent_rep)
self.test_target.append(batch[1])
def test_epoch_end(self, outputs):
self.test_features = torch.cat(self.test_features, axis=0)
self.test_target = torch.cat(self.test_target, axis=0)
self.analysis()
self.anomaly_detection()
def analysis(self):
features = TSNE(n_components=2).fit_transform(self.test_features.cpu().numpy())
target = self.test_target.cpu().numpy()
mc_sig_index = np.where(target == 0)
mc_bkg_index = [np.where(target == i+1) for i in range(self.num_bkg)]
plt.figure()
plt.scatter(features[mc_sig_index,0],features[mc_sig_index,1], label="Signal")
for i in range(self.num_bkg):
plt.scatter(features[mc_bkg_index[i],0],features[mc_bkg_index[i],1], label=f"Background_{i+1}")
plt.title("Hidden features")
plt.legend()
plt.tight_layout()
plt.savefig(f"{self.log_path}/report_features.png")
plt.clf()
def get_mean_dist(self, x, samples):
x = samples-x
x = torch.norm(x, dim=1)
return torch.sort(x)[0][-self.K:].mean()
def plot(self, scores, target, name):
mc_sig_index = np.where(target == 1)
mc_bkg_index = np.where(target == 0)
false_pos_rate, true_pos_rate, _ = roc_curve(target, scores)
plt.subplot(2, 1, 1)
plt.title("score distribution")
plt.hist(scores[mc_sig_index], bins=40, label="Signal",
range=[0, 1], histtype=u"step")
plt.hist(scores[mc_bkg_index], bins=40,
label="Background", range=[0, 1], histtype=u"step")
plt.yscale("log")
plt.ylabel("#events")
plt.xlabel("prediction score")
plt.legend(loc="best")
# plotting ROC curve
plt.subplot(2, 1, 2)
plt.title("ROC curve")
plt.plot(false_pos_rate, true_pos_rate)
plt.xlabel("False Positive Rate"), plt.ylabel("True Positive Rate")
plt.text(0.8, 0.2, f"AUC = {roc_auc_score(target, scores)}", bbox=dict(
facecolor="none", edgecolor="black", boxstyle="square"))
plt.tight_layout()
plt.savefig(f"{self.log_path}/report_{name}.png")
plt.clf()
def anomaly_detection(self):
training_dists = get_distance_matrix( self.inf_batch_size,
self.training_features, self.training_features, self.K, self.device)
train_mean, train_std = torch.mean(
training_dists), torch.std(training_dists)
d_train_samples = get_distance_matrix( self.inf_batch_size,
self.test_features, self.training_features, self.K, self.device)
d_test_samples = get_distance_matrix( self.inf_batch_size,
self.test_features, self.test_features, self.K, self.device)
delta_trad = torch.tensor(
[(d_train-train_mean)/(train_std+1e-8) for d_train in d_train_samples])
delta_new = torch.tensor([(d_test**(-self.m)-d_train**(-self.m))/(
d_train**(int(-self.m/2))+1e-8) for d_test, d_train in zip(d_test_samples, d_train_samples)])
rms_trad, rms_new = (delta_trad**2).mean(), (delta_new**2).mean()
scores_trad = 0.5*(1+torch.erf(delta_trad*(1.0/(rms_trad*(2**0.5)))))
scores_new = 0.5*(1+torch.erf(delta_new*(1.0/(rms_new*(2**0.5)))))
scores_comb = torch.mul(scores_trad, scores_new)**0.5
targets = (self.test_target == 0).float().cpu()
anomaly_comb = ((scores_comb.cpu() >= 0.5) ==
targets).float().mean()
anomaly_trad = ((scores_trad.cpu() >= 0.5) ==
targets).float().mean()
stats = {"Anomaly_new Acc": anomaly_trad, "Anomaly_trad Acc": anomaly_trad}
self.plot(scores_trad.numpy(), targets.numpy(), "Trad")
self.plot(scores_new.numpy(), targets.numpy(), "New")
self.plot(scores_comb.numpy(), targets.numpy(), "Comb")
temp_df = pd.DataFrame()
temp_df["targets"] = targets
temp_df["new_scores"] = scores_new
temp_df["trad_scores"] = scores_trad
temp_df["comb_scores"] = scores_comb
temp_df.to_csv(f"{self.log_path}/score_table.csv")
print_dict(stats, "FINAL STATS")
|
<filename>train.py<gh_stars>0
import os
import torch
import pandas as pd
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from torch.utils.data import random_split
from models import Classifier, AutoEncoder
import numpy as np
from utils import print_dict, get_distance_matrix
import tqdm
from sklearn.metrics import roc_auc_score, accuracy_score, mean_squared_error, log_loss, roc_curve
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
class Model(pl.LightningModule):
def __init__(self,
momentum,
nesterov,
learn_rate,
learn_rate_decay,
classifier_wt,
encoder_wt,
optimizer,
classifier_nodes,
encoder_nodes,
dropout,
activation,
input_size,
output_size,
save_tb_logs,
log_path,
K,
inf_batch_size
):
'''create a training class'''
super(pl.LightningModule, self).__init__()
self.classifier = Classifier(
classifier_nodes, dropout, activation, input_size, output_size)
self.encoder = AutoEncoder(
encoder_nodes, dropout, activation, input_size+sum(classifier_nodes)+output_size)
self.m = encoder_nodes[-1]
self.example_input_array = torch.ones((1, input_size))
self.momentum = momentum
self.nesterov = nesterov
self.learn_rate = learn_rate
self.learn_rate_decay = learn_rate_decay
self.classifier_wt = classifier_wt
self.encoder_wt = encoder_wt
self.optimizer_ = optimizer
self.encoder_loss_fn = torch.nn.MSELoss()
self.classifier_loss_fn = torch.nn.CrossEntropyLoss()
self.K = K
self.inf_batch_size = inf_batch_size
self.log_path = log_path
self.num_bkg = output_size
def forward(self, input):
'''get output'''
predictions, features = self.classifier(input)
recon_features, latent_rep = self.encoder(features.detach())
return predictions, features, recon_features, latent_rep
def configure_optimizers(self):
'''create optimizer and scheduler'''
optimizer = None
if self.optimizer_ == 'adam':
optimizer = torch.optim.Adam(self.parameters(
), lr=self.learn_rate, betas=[self.momentum, 0.999])
else:
optimizer = torch.optim.SGD(
self.parameters(), lr=self.learn_rate, momentum=self.momentum, nesterov=self.nesterov)
def scheduler_fn(epoch): return 1./(1+epoch*self.learn_rate_decay)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, scheduler_fn)
return [optimizer], [scheduler]
def step_helper(self, batch):
inputs, targets = batch
predictions, features, recon_features, latent_rep = self(inputs)
classifier_loss = self.classifier_loss_fn(
predictions, (targets-1).long())
recon_loss = self.encoder_loss_fn(features, recon_features)
total_loss = self.classifier_wt*classifier_loss+self.encoder_wt*recon_loss
accuracy = (torch.argmax(predictions, dim=1)
== (targets-1)).float().mean()
return total_loss, classifier_loss, recon_loss, accuracy, latent_rep
def training_step(self, batch, batch_idx):
'''executed during training'''
total_loss, classifier_loss, recon_loss, accuracy, _ = self.step_helper(
batch)
self.log('train_loss', total_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('train_acc', accuracy, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('train_classifier_loss', classifier_loss,
on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log('train_recon_loss', recon_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
return {'loss': total_loss, 'acc': accuracy}
def validation_step(self, batch, batch_idx):
'''executed during validation'''
total_loss, classifier_loss, recon_loss, accuracy, _ = self.step_helper(
batch)
self.log('val_loss', total_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('val_acc', accuracy, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('val_classifier_loss', classifier_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
self.log('val_recon_loss', recon_loss, on_step=True,
on_epoch=True, prog_bar=True, logger=True)
return {'val_loss': total_loss, 'val_acc': accuracy}
def on_test_epoch_start(self):
self.eval()
training_samples = []
for _, batch in enumerate(self.train_dataloader()):
_, _, _, features = self(batch[0].to(self.device))
training_samples.append(features)
for _, batch in enumerate(self.val_dataloader()):
_, _, _, features = self(batch[0].to(self.device))
training_samples.append(features)
self.training_features = torch.cat(training_samples, axis=0)
self.test_features = []
self.test_target = []
def test_step(self, batch, batch_idx):
inputs, targets = batch
predictions, features, recon_features, latent_rep = self(inputs)
self.test_features.append(latent_rep)
self.test_target.append(batch[1])
def test_epoch_end(self, outputs):
self.test_features = torch.cat(self.test_features, axis=0)
self.test_target = torch.cat(self.test_target, axis=0)
self.analysis()
self.anomaly_detection()
def analysis(self):
features = TSNE(n_components=2).fit_transform(self.test_features.cpu().numpy())
target = self.test_target.cpu().numpy()
mc_sig_index = np.where(target == 0)
mc_bkg_index = [np.where(target == i+1) for i in range(self.num_bkg)]
plt.figure()
plt.scatter(features[mc_sig_index,0],features[mc_sig_index,1], label="Signal")
for i in range(self.num_bkg):
plt.scatter(features[mc_bkg_index[i],0],features[mc_bkg_index[i],1], label=f"Background_{i+1}")
plt.title("Hidden features")
plt.legend()
plt.tight_layout()
plt.savefig(f"{self.log_path}/report_features.png")
plt.clf()
def get_mean_dist(self, x, samples):
x = samples-x
x = torch.norm(x, dim=1)
return torch.sort(x)[0][-self.K:].mean()
def plot(self, scores, target, name):
mc_sig_index = np.where(target == 1)
mc_bkg_index = np.where(target == 0)
false_pos_rate, true_pos_rate, _ = roc_curve(target, scores)
plt.subplot(2, 1, 1)
plt.title("score distribution")
plt.hist(scores[mc_sig_index], bins=40, label="Signal",
range=[0, 1], histtype=u"step")
plt.hist(scores[mc_bkg_index], bins=40,
label="Background", range=[0, 1], histtype=u"step")
plt.yscale("log")
plt.ylabel("#events")
plt.xlabel("prediction score")
plt.legend(loc="best")
# plotting ROC curve
plt.subplot(2, 1, 2)
plt.title("ROC curve")
plt.plot(false_pos_rate, true_pos_rate)
plt.xlabel("False Positive Rate"), plt.ylabel("True Positive Rate")
plt.text(0.8, 0.2, f"AUC = {roc_auc_score(target, scores)}", bbox=dict(
facecolor="none", edgecolor="black", boxstyle="square"))
plt.tight_layout()
plt.savefig(f"{self.log_path}/report_{name}.png")
plt.clf()
def anomaly_detection(self):
training_dists = get_distance_matrix( self.inf_batch_size,
self.training_features, self.training_features, self.K, self.device)
train_mean, train_std = torch.mean(
training_dists), torch.std(training_dists)
d_train_samples = get_distance_matrix( self.inf_batch_size,
self.test_features, self.training_features, self.K, self.device)
d_test_samples = get_distance_matrix( self.inf_batch_size,
self.test_features, self.test_features, self.K, self.device)
delta_trad = torch.tensor(
[(d_train-train_mean)/(train_std+1e-8) for d_train in d_train_samples])
delta_new = torch.tensor([(d_test**(-self.m)-d_train**(-self.m))/(
d_train**(int(-self.m/2))+1e-8) for d_test, d_train in zip(d_test_samples, d_train_samples)])
rms_trad, rms_new = (delta_trad**2).mean(), (delta_new**2).mean()
scores_trad = 0.5*(1+torch.erf(delta_trad*(1.0/(rms_trad*(2**0.5)))))
scores_new = 0.5*(1+torch.erf(delta_new*(1.0/(rms_new*(2**0.5)))))
scores_comb = torch.mul(scores_trad, scores_new)**0.5
targets = (self.test_target == 0).float().cpu()
anomaly_comb = ((scores_comb.cpu() >= 0.5) ==
targets).float().mean()
anomaly_trad = ((scores_trad.cpu() >= 0.5) ==
targets).float().mean()
stats = {"Anomaly_new Acc": anomaly_trad, "Anomaly_trad Acc": anomaly_trad}
self.plot(scores_trad.numpy(), targets.numpy(), "Trad")
self.plot(scores_new.numpy(), targets.numpy(), "New")
self.plot(scores_comb.numpy(), targets.numpy(), "Comb")
temp_df = pd.DataFrame()
temp_df["targets"] = targets
temp_df["new_scores"] = scores_new
temp_df["trad_scores"] = scores_trad
temp_df["comb_scores"] = scores_comb
temp_df.to_csv(f"{self.log_path}/score_table.csv")
print_dict(stats, "FINAL STATS")
|
en
| 0.949678
|
create a training class get output create optimizer and scheduler executed during training executed during validation # plotting ROC curve
| 2.327782
| 2
|
Evaluation/Plot_pvalueOverTime.py
|
Lucciola111/stream_autoencoder_windowing
| 4
|
6628779
|
<filename>Evaluation/Plot_pvalueOverTime.py
import matplotlib.pyplot as plt
import seaborn as sns
def plot_pvalue_over_time(df_p_value, value="p-value", max_ylim=False, log=True, plot_file_name=False, latex_font=False):
if latex_font:
# Use LaTex Font
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# Create plot
plt.style.use('ggplot')
# sns.set_theme()
plt.tight_layout()
fontsize = 15
params = {'axes.labelsize': fontsize, 'axes.titlesize': fontsize, 'legend.fontsize': fontsize,
'xtick.labelsize': fontsize, 'ytick.labelsize': fontsize}
plt.rcParams.update(params)
ax = sns.catplot(x="Time", y=value, kind="point", hue="Dimension Type",
hue_order=['Non-drift dimension', 'Drift dimension'], data=df_p_value, zorder=10)
ax.fig.set_size_inches(15, 5)
ax._legend.remove()
if max_ylim:
plt.ylim(0, max_ylim)
if value == "p-value":
plt.axhline(y=0.05, color='black', linestyle='-', zorder=0)
if log:
plt.yscale("log")
plt.legend()
plt.xlabel("Time", fontsize=18)
if value == "p-value":
plt.ylabel("p-value", fontsize=18)
else:
plt.ylabel("Drift score", fontsize=18)
if plot_file_name:
plt.savefig("Plots/Where/" + str(plot_file_name), bbox_inches='tight')
plt.show()
|
<filename>Evaluation/Plot_pvalueOverTime.py
import matplotlib.pyplot as plt
import seaborn as sns
def plot_pvalue_over_time(df_p_value, value="p-value", max_ylim=False, log=True, plot_file_name=False, latex_font=False):
if latex_font:
# Use LaTex Font
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# Create plot
plt.style.use('ggplot')
# sns.set_theme()
plt.tight_layout()
fontsize = 15
params = {'axes.labelsize': fontsize, 'axes.titlesize': fontsize, 'legend.fontsize': fontsize,
'xtick.labelsize': fontsize, 'ytick.labelsize': fontsize}
plt.rcParams.update(params)
ax = sns.catplot(x="Time", y=value, kind="point", hue="Dimension Type",
hue_order=['Non-drift dimension', 'Drift dimension'], data=df_p_value, zorder=10)
ax.fig.set_size_inches(15, 5)
ax._legend.remove()
if max_ylim:
plt.ylim(0, max_ylim)
if value == "p-value":
plt.axhline(y=0.05, color='black', linestyle='-', zorder=0)
if log:
plt.yscale("log")
plt.legend()
plt.xlabel("Time", fontsize=18)
if value == "p-value":
plt.ylabel("p-value", fontsize=18)
else:
plt.ylabel("Drift score", fontsize=18)
if plot_file_name:
plt.savefig("Plots/Where/" + str(plot_file_name), bbox_inches='tight')
plt.show()
|
en
| 0.378921
|
# Use LaTex Font # Create plot # sns.set_theme()
| 2.132475
| 2
|
src/run.py
|
MasakiAsada/MOL_DDIE
| 9
|
6628780
|
<filename>src/run.py
import sys
import copy
import numpy as np
import time
import pickle as pkl
import yaml
import chainer
from chainer import optimizers, cuda, serializers
import chainer.functions as F
cp = cuda.cupy
from preprocess import to_indx
from model import RelationExtractor
from cnn import CNN
from deepcnn import DeepCNN
from utils import calculate_microF
#if len(sys.argv) != 2:
# sys.stderr.write('Usage: python3 %s yamlfile' % (sys.argv[0]))
# sys.exit(-1)
with open(sys.argv[1], 'r') as f:
params = yaml.load(f)
word_indx = {}
label_indx = {}
Xtr, Ytr, w2v, mol2v = to_indx(params['train_path'], params, word_indx, label_indx, training=True)
Xte, Yte, _, _ = to_indx(params['test_path'], params, word_indx, label_indx, training=False)
params['out_dim'] = len(label_indx)
model = RelationExtractor(params, w2v, mol2v)
gpu_device = int(sys.argv[2])
cuda.get_device(gpu_device).use()
model.to_gpu(gpu_device)
average_model = copy.deepcopy(model)
store_model = copy.deepcopy(model)
store_model.init_params()
optimizer = optimizers.Adam(params['learning_rate'])
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(params['l2_lambda']))
if mol2v is not None: model.molemb.disable_update()
bs = params['batchsize']
def train(X, Y):
start = time.time()
losses = 0
n = len(X)
sffindx = np.random.permutation(n)
for i in range(0, n, bs):
model.zerograds()
x = cp.array(X[sffindx[i:(i+bs) if (i+bs) < n else n]])
y = cp.array(Y[sffindx[i:(i+bs) if (i+bs) < n else n]])
p = model(x)
loss = F.softmax_cross_entropy(p, y)
loss.backward()
optimizer.update()
losses += cuda.to_cpu(loss.data)
store_model.store_params(model)
print('Train: elapsedtime={:.2f} loss={:.2f}'.format(time.time()-start, losses))
def test(X, Y):
start = time.time()
losses = 0
n = len(X)
P = np.array([])
average_model.average_params(store_model)
with chainer.using_config('train', False):
for i in range(0, n, bs):
x = cp.array(X[i:(i+bs) if (i+bs) < n else n])
y = cp.array(Y[i:(i+bs) if (i+bs) < n else n])
if params['averaging']:
p = average_model(x)
else:
p = model(x)
loss = F.softmax_cross_entropy(p, y)
losses += cuda.to_cpu(loss.data)
b = 0.2
bias = b * F.cast(cp.eye(1, 5), 'f')
bias, _ = F.broadcast(bias, p)
#pred = F.argmax(p, axis=1)
pred = F.argmax(F.softmax(p)+bias, axis=1)
P = np.concatenate((P, cuda.to_cpu(pred.data)))
print('Test : elapsedtime={:.2f} loss={:.2f}'.format(time.time()-start, losses))
prec, recall, microF = calculate_microF(P, Y, label_indx['negative'])
print(' Precision={:.4f} Recall={:.4f} microF={:.4f}'.format(prec, recall, microF))
return P
for epoch in range(1, params['n_epoch']+1):
print('epoch={}'.format(epoch))
train(Xtr, Ytr)
test(Xte, Yte)
|
<filename>src/run.py
import sys
import copy
import numpy as np
import time
import pickle as pkl
import yaml
import chainer
from chainer import optimizers, cuda, serializers
import chainer.functions as F
cp = cuda.cupy
from preprocess import to_indx
from model import RelationExtractor
from cnn import CNN
from deepcnn import DeepCNN
from utils import calculate_microF
#if len(sys.argv) != 2:
# sys.stderr.write('Usage: python3 %s yamlfile' % (sys.argv[0]))
# sys.exit(-1)
with open(sys.argv[1], 'r') as f:
params = yaml.load(f)
word_indx = {}
label_indx = {}
Xtr, Ytr, w2v, mol2v = to_indx(params['train_path'], params, word_indx, label_indx, training=True)
Xte, Yte, _, _ = to_indx(params['test_path'], params, word_indx, label_indx, training=False)
params['out_dim'] = len(label_indx)
model = RelationExtractor(params, w2v, mol2v)
gpu_device = int(sys.argv[2])
cuda.get_device(gpu_device).use()
model.to_gpu(gpu_device)
average_model = copy.deepcopy(model)
store_model = copy.deepcopy(model)
store_model.init_params()
optimizer = optimizers.Adam(params['learning_rate'])
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(params['l2_lambda']))
if mol2v is not None: model.molemb.disable_update()
bs = params['batchsize']
def train(X, Y):
start = time.time()
losses = 0
n = len(X)
sffindx = np.random.permutation(n)
for i in range(0, n, bs):
model.zerograds()
x = cp.array(X[sffindx[i:(i+bs) if (i+bs) < n else n]])
y = cp.array(Y[sffindx[i:(i+bs) if (i+bs) < n else n]])
p = model(x)
loss = F.softmax_cross_entropy(p, y)
loss.backward()
optimizer.update()
losses += cuda.to_cpu(loss.data)
store_model.store_params(model)
print('Train: elapsedtime={:.2f} loss={:.2f}'.format(time.time()-start, losses))
def test(X, Y):
start = time.time()
losses = 0
n = len(X)
P = np.array([])
average_model.average_params(store_model)
with chainer.using_config('train', False):
for i in range(0, n, bs):
x = cp.array(X[i:(i+bs) if (i+bs) < n else n])
y = cp.array(Y[i:(i+bs) if (i+bs) < n else n])
if params['averaging']:
p = average_model(x)
else:
p = model(x)
loss = F.softmax_cross_entropy(p, y)
losses += cuda.to_cpu(loss.data)
b = 0.2
bias = b * F.cast(cp.eye(1, 5), 'f')
bias, _ = F.broadcast(bias, p)
#pred = F.argmax(p, axis=1)
pred = F.argmax(F.softmax(p)+bias, axis=1)
P = np.concatenate((P, cuda.to_cpu(pred.data)))
print('Test : elapsedtime={:.2f} loss={:.2f}'.format(time.time()-start, losses))
prec, recall, microF = calculate_microF(P, Y, label_indx['negative'])
print(' Precision={:.4f} Recall={:.4f} microF={:.4f}'.format(prec, recall, microF))
return P
for epoch in range(1, params['n_epoch']+1):
print('epoch={}'.format(epoch))
train(Xtr, Ytr)
test(Xte, Yte)
|
uk
| 0.160176
|
#if len(sys.argv) != 2: # sys.stderr.write('Usage: python3 %s yamlfile' % (sys.argv[0])) # sys.exit(-1) #pred = F.argmax(p, axis=1)
| 2.088895
| 2
|
converter_matriz.py
|
wmonteiro92/tsp-ga
| 0
|
6628781
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 17:40:09 2020
@author: wmont
"""
import numpy as np
def ler_arquivo(file, start_line, end_line):
lines = open(f'{file}.tsp', 'r').read().splitlines()[start_line:end_line]
values = [[int(value) for value in line.split()] for line in lines]
return values
def converter_matrix_triangular(values):
values = [[[0] * (len(values) - len(line) + 1) + line for line in values]][0]
values.append([0] * len(values[0]))
# criando a matriz simétrica
matrix = np.triu(values)
return matrix + matrix.T
# lendo o arquivo
file = 'brazil58'
values = ler_arquivo(file, 7, -1)
np.savetxt(f'{file}_matrix.out', converter_matrix_triangular(values), delimiter=',')
# lendo o arquivo
file = 'bays29'
values = ler_arquivo(file, 8, 37)
np.savetxt(f'{file}_matrix.out', np.matrix(values), delimiter=',')
# lendo o arquivo
file = 'swiss42'
values = ler_arquivo(file, 7, 49)
np.savetxt(f'{file}_matrix.out', np.matrix(values), delimiter=',')
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 17:40:09 2020
@author: wmont
"""
import numpy as np
def ler_arquivo(file, start_line, end_line):
lines = open(f'{file}.tsp', 'r').read().splitlines()[start_line:end_line]
values = [[int(value) for value in line.split()] for line in lines]
return values
def converter_matrix_triangular(values):
values = [[[0] * (len(values) - len(line) + 1) + line for line in values]][0]
values.append([0] * len(values[0]))
# criando a matriz simétrica
matrix = np.triu(values)
return matrix + matrix.T
# lendo o arquivo
file = 'brazil58'
values = ler_arquivo(file, 7, -1)
np.savetxt(f'{file}_matrix.out', converter_matrix_triangular(values), delimiter=',')
# lendo o arquivo
file = 'bays29'
values = ler_arquivo(file, 8, 37)
np.savetxt(f'{file}_matrix.out', np.matrix(values), delimiter=',')
# lendo o arquivo
file = 'swiss42'
values = ler_arquivo(file, 7, 49)
np.savetxt(f'{file}_matrix.out', np.matrix(values), delimiter=',')
|
pt
| 0.523467
|
# -*- coding: utf-8 -*- Created on Sat Mar 28 17:40:09 2020 @author: wmont # criando a matriz simétrica # lendo o arquivo # lendo o arquivo # lendo o arquivo
| 2.77124
| 3
|
swt_translator/translator.py
|
PrediktorAS/quarry
| 2
|
6628782
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .graph_builder import build_instance_graph
from .graph_builder import build_type_graph
from .swt_builder import build_swt
from typing import List, Optional
import pandas as pd
from opcua_tools import parse_xml_dir, parse_nodeid
def translate(xml_dir: str, namespaces: List[str], output_ttl_file: str, output_owl_file: Optional[str] = None,
subclass_closure: bool = False, subproperty_closure: bool = False,
signal_id_csv: Optional[str] = None):
parse_dict = parse_xml_dir(xmldir=xml_dir, namespaces=namespaces)
params_dict = {'subclass_closure': subclass_closure,
'subproperty_closure': subproperty_closure}
if signal_id_csv is not None:
signal_id_df = pd.read_csv(signal_id_csv)
signal_id_df['NodeId'] = signal_id_df['NodeId'].map(parse_nodeid)
signal_id_df['ns'] = signal_id_df['NodeId'].map(lambda x: x.namespace)
signal_id_df['signal_id'] = signal_id_df['signal_id'].astype(pd.Int32Dtype())
else:
signal_id_df = None
triples_dfs = build_swt(nodes=parse_dict['nodes'], references=parse_dict['references'],
lookup_df=parse_dict['lookup_df'], signal_id_df=signal_id_df, params_dict=params_dict)
g = build_instance_graph(triples_dfs=triples_dfs, namespaces=namespaces, params_dict=params_dict)
g.serialize(destination=output_ttl_file, format='ttl', encoding='utf-8')
if output_owl_file is not None:
g2 = build_type_graph(triples_dfs=triples_dfs, namespaces=namespaces)
g2.serialize(destination=output_owl_file, format='pretty-xml', encoding='utf-8')
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .graph_builder import build_instance_graph
from .graph_builder import build_type_graph
from .swt_builder import build_swt
from typing import List, Optional
import pandas as pd
from opcua_tools import parse_xml_dir, parse_nodeid
def translate(xml_dir: str, namespaces: List[str], output_ttl_file: str, output_owl_file: Optional[str] = None,
subclass_closure: bool = False, subproperty_closure: bool = False,
signal_id_csv: Optional[str] = None):
parse_dict = parse_xml_dir(xmldir=xml_dir, namespaces=namespaces)
params_dict = {'subclass_closure': subclass_closure,
'subproperty_closure': subproperty_closure}
if signal_id_csv is not None:
signal_id_df = pd.read_csv(signal_id_csv)
signal_id_df['NodeId'] = signal_id_df['NodeId'].map(parse_nodeid)
signal_id_df['ns'] = signal_id_df['NodeId'].map(lambda x: x.namespace)
signal_id_df['signal_id'] = signal_id_df['signal_id'].astype(pd.Int32Dtype())
else:
signal_id_df = None
triples_dfs = build_swt(nodes=parse_dict['nodes'], references=parse_dict['references'],
lookup_df=parse_dict['lookup_df'], signal_id_df=signal_id_df, params_dict=params_dict)
g = build_instance_graph(triples_dfs=triples_dfs, namespaces=namespaces, params_dict=params_dict)
g.serialize(destination=output_ttl_file, format='ttl', encoding='utf-8')
if output_owl_file is not None:
g2 = build_type_graph(triples_dfs=triples_dfs, namespaces=namespaces)
g2.serialize(destination=output_owl_file, format='pretty-xml', encoding='utf-8')
|
en
| 0.851094
|
# Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
| 1.829574
| 2
|
PyBank/main.py/main.py
|
Georgeaziz1985/Python-HW
| 0
|
6628783
|
<gh_stars>0
# Import modules
import os
import csv
# Define variables
month_count = 0
date_list = []
profit_l_list = []
total_p_l = float(0)
change_value_list = []
prior_value = float(0)
# Define path to budget data csv
csvpath = os.path.join("C:/Users/<NAME>/Desktop/case-homework/python-challenge/PyBank/Resources/budget_data.csv")
# Open csv
with open(csvpath, 'r', newline='') as csvfile:
# Define csv_reader
csv_reader = csv.reader(csvfile, delimiter=',')
# Identify header of the csv file and skip over it
csv_header = next(csv_reader)
# Loop through the dataset to count months and add to list of dates and profit/loss values
for value in csv_reader:
month_count += 1
date_list.append(str(value[0]))
profit_l_list.append(float(value[1]))
# Create list of profit/loss changes month-to-month
current_value = value[1]
change_value = float(current_value) - float(prior_value)
change_value_list.append(change_value)
prior_value = current_value
# Define function to calc avg change in profit/loss between months
def average(change_value_list):
x = len(change_value_list)
total = sum(change_value_list) - change_value_list[0]
avg = total / (x - 1)
return avg
# Calc avg change
average_change = round(average(change_value_list), 2)
# Calc total profit/loss
total_p_l = round(sum(profit_l_list))
# Match dates with the highest and lowest profit/loss values
highest_p_l = round(max(profit_l_list))
lowest_p_l = round(min(profit_l_list))
highest_index = profit_l_list.index(highest_p_l)
lowest_index = profit_l_list.index(lowest_p_l)
# Display output via terminal screen
print("------------------------------")
print("Financial Analysis")
print("------------------------------")
print(f"Total Months: {month_count}")
print(f"Total: ${total_p_l}")
print(f"Average Change: ${average_change}")
print(f"Greatest Increase in Profits: {date_list[highest_index]} (${highest_p_l})")
print(f"Greatest Decrease in Profits: {date_list[lowest_index]} (${lowest_p_l})")
# Create output path
output_path = os.path.join('..', 'PyBank','Analysis',"Financial_Analysis.txt")
with open(output_path, 'w', newline='') as text_file:
# Write the report to a text file within the Output folder
print('-----------------------------', file=text_file)
print("Financial Analysis", file=text_file)
print('-----------------------------', file=text_file)
print(f"Total Months: {month_count}", file=text_file)
print(f"Total: ${total_p_l}", file=text_file)
print(f"Average Change: ${average_change}", file=text_file)
print(f"Greatest Increase in Profits: {date_list[highest_index]} (${highest_p_l})", file=text_file)
print(f"Greatest Decrease in Profits: {date_list[lowest_index]} (${lowest_p_l})",file=text_file)
|
# Import modules
import os
import csv
# Define variables
month_count = 0
date_list = []
profit_l_list = []
total_p_l = float(0)
change_value_list = []
prior_value = float(0)
# Define path to budget data csv
csvpath = os.path.join("C:/Users/<NAME>/Desktop/case-homework/python-challenge/PyBank/Resources/budget_data.csv")
# Open csv
with open(csvpath, 'r', newline='') as csvfile:
# Define csv_reader
csv_reader = csv.reader(csvfile, delimiter=',')
# Identify header of the csv file and skip over it
csv_header = next(csv_reader)
# Loop through the dataset to count months and add to list of dates and profit/loss values
for value in csv_reader:
month_count += 1
date_list.append(str(value[0]))
profit_l_list.append(float(value[1]))
# Create list of profit/loss changes month-to-month
current_value = value[1]
change_value = float(current_value) - float(prior_value)
change_value_list.append(change_value)
prior_value = current_value
# Define function to calc avg change in profit/loss between months
def average(change_value_list):
x = len(change_value_list)
total = sum(change_value_list) - change_value_list[0]
avg = total / (x - 1)
return avg
# Calc avg change
average_change = round(average(change_value_list), 2)
# Calc total profit/loss
total_p_l = round(sum(profit_l_list))
# Match dates with the highest and lowest profit/loss values
highest_p_l = round(max(profit_l_list))
lowest_p_l = round(min(profit_l_list))
highest_index = profit_l_list.index(highest_p_l)
lowest_index = profit_l_list.index(lowest_p_l)
# Display output via terminal screen
print("------------------------------")
print("Financial Analysis")
print("------------------------------")
print(f"Total Months: {month_count}")
print(f"Total: ${total_p_l}")
print(f"Average Change: ${average_change}")
print(f"Greatest Increase in Profits: {date_list[highest_index]} (${highest_p_l})")
print(f"Greatest Decrease in Profits: {date_list[lowest_index]} (${lowest_p_l})")
# Create output path
output_path = os.path.join('..', 'PyBank','Analysis',"Financial_Analysis.txt")
with open(output_path, 'w', newline='') as text_file:
# Write the report to a text file within the Output folder
print('-----------------------------', file=text_file)
print("Financial Analysis", file=text_file)
print('-----------------------------', file=text_file)
print(f"Total Months: {month_count}", file=text_file)
print(f"Total: ${total_p_l}", file=text_file)
print(f"Average Change: ${average_change}", file=text_file)
print(f"Greatest Increase in Profits: {date_list[highest_index]} (${highest_p_l})", file=text_file)
print(f"Greatest Decrease in Profits: {date_list[lowest_index]} (${lowest_p_l})",file=text_file)
|
en
| 0.759669
|
# Import modules # Define variables # Define path to budget data csv # Open csv # Define csv_reader # Identify header of the csv file and skip over it # Loop through the dataset to count months and add to list of dates and profit/loss values # Create list of profit/loss changes month-to-month # Define function to calc avg change in profit/loss between months # Calc avg change # Calc total profit/loss # Match dates with the highest and lowest profit/loss values # Display output via terminal screen # Create output path # Write the report to a text file within the Output folder
| 3.731217
| 4
|
anaconda_project/requirements_registry/providers/test/test_redis_provider.py
|
kathatherine/anaconda-project
| 0
|
6628784
|
<filename>anaconda_project/requirements_registry/providers/test/test_redis_provider.py<gh_stars>0
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import codecs
import os
import platform
import pytest
import sys
from anaconda_project.test.project_utils import project_no_dedicated_env
from anaconda_project.internal.test.tmpfile_utils import (with_directory_contents,
with_directory_contents_completing_project_file)
from anaconda_project.test.environ_utils import minimal_environ, strip_environ
from anaconda_project.local_state_file import DEFAULT_LOCAL_STATE_FILENAME
from anaconda_project.local_state_file import LocalStateFile
from anaconda_project.requirements_registry.registry import RequirementsRegistry
from anaconda_project.requirements_registry.requirement import UserConfigOverrides
from anaconda_project.requirements_registry.providers.redis import RedisProvider
from anaconda_project.requirements_registry.requirements.redis import RedisRequirement
from anaconda_project.prepare import prepare_without_interaction, unprepare
from anaconda_project import provide
from anaconda_project.project_file import DEFAULT_PROJECT_FILENAME
from anaconda_project.internal import conda_api
# This is kind of an awkward way to do it for historical reasons,
# we print out the logs/errors captured by FakeFrontend, instead
# of rewriting the tests in here to have a frontend that prints.
def _prepare_printing_errors(project, environ=None, mode=provide.PROVIDE_MODE_DEVELOPMENT):
result = prepare_without_interaction(project, environ=environ, mode=mode)
for message in project.frontend.logs:
print(message)
for error in project.frontend.errors:
print(error, file=sys.stderr)
if not result:
assert result.errors == project.frontend.errors
return result
def _redis_requirement():
return RedisRequirement(registry=RequirementsRegistry(), env_var="REDIS_URL")
def test_reading_default_config():
def read_config(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
config = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert 6380 == config['lower_port']
assert 6449 == config['upper_port']
with_directory_contents(dict(), read_config)
def test_reading_valid_config():
def read_config(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
config = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert 7389 == config['lower_port']
assert 7421 == config['upper_port']
assert 'find_all' == config['source']
with_directory_contents(
{
DEFAULT_LOCAL_STATE_FILENAME:
"""
service_options:
REDIS_URL:
port_range: 7389-7421
autostart: false
"""
}, read_config)
def _read_invalid_port_range(capsys, port_range):
def read_config(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
config = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
# revert to defaults
assert 6380 == config['lower_port']
assert 6449 == config['upper_port']
# should have printed an error
out, err = capsys.readouterr()
assert ("Invalid port_range '%s', should be like '6380-6449'\n" % (port_range)) == err
with_directory_contents(
{DEFAULT_LOCAL_STATE_FILENAME: """
service_options:
REDIS_URL:
port_range: %s
""" % port_range}, read_config)
def test_garbage_port_range(capsys):
_read_invalid_port_range(capsys, "abcdefg")
def test_backward_port_range(capsys):
_read_invalid_port_range(capsys, "100-99")
def test_non_integer_port_range(capsys):
_read_invalid_port_range(capsys, "A-Z")
def test_zero_lower_port(capsys):
_read_invalid_port_range(capsys, "0-1")
def test_zero_upper_port(capsys):
_read_invalid_port_range(capsys, "1-0")
def test_set_config_values_as_strings():
def set_config(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
provider.set_config_values_as_strings(requirement, dict(), local_state, 'default', UserConfigOverrides(),
dict(lower_port="6001"))
config = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert config['lower_port'] == 6001
assert config['upper_port'] == 6449
provider.set_config_values_as_strings(requirement, dict(), local_state, 'default', UserConfigOverrides(),
dict(upper_port="6700"))
config2 = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert config2['lower_port'] == 6001
assert config2['upper_port'] == 6700
provider.set_config_values_as_strings(requirement, dict(), local_state, 'default', UserConfigOverrides(),
dict(lower_port="5500", upper_port="6800"))
config2 = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert config2['lower_port'] == 5500
assert config2['upper_port'] == 6800
with_directory_contents(dict(), set_config)
def _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch):
can_connect_args = dict()
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
can_connect_args['host'] = host
can_connect_args['port'] = port
can_connect_args['timeout_seconds'] = timeout_seconds
return True
monkeypatch.setattr("anaconda_project.requirements_registry.network_util.can_connect_to_socket",
mock_can_connect_to_socket)
return can_connect_args
def test_prepare_redis_url_with_dict_in_variables_section(monkeypatch):
can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch)
def prepare_redis_url(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
assert dict(REDIS_URL="redis://localhost:6379",
PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert dict(host='localhost', port=6379, timeout_seconds=0.5) == can_connect_args
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, prepare_redis_url)
def _monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket):
can_connect_args_list = []
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
can_connect_args = dict()
can_connect_args['host'] = host
can_connect_args['port'] = port
can_connect_args['timeout_seconds'] = timeout_seconds
can_connect_args_list.append(can_connect_args)
if port == 6379:
return False
else:
return real_can_connect_to_socket(host, port, timeout_seconds)
monkeypatch.setattr("anaconda_project.requirements_registry.network_util.can_connect_to_socket",
mock_can_connect_to_socket)
return can_connect_args_list
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_and_unprepare_local_redis_server(monkeypatch):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
can_connect_args_list = _monkeypatch_can_connect_to_socket_on_nonstandard_port_only(
monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
local_state_file = LocalStateFile.load_for_directory(dirname)
state = local_state_file.get_service_run_state('REDIS_URL')
assert 'port' in state
port = state['port']
assert dict(REDIS_URL=("redis://localhost:" + str(port)),
PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert len(can_connect_args_list) >= 2
servicedir = os.path.join(dirname, "services")
redisdir = os.path.join(servicedir, "REDIS_URL")
pidfile = os.path.join(redisdir, "redis.pid")
logfile = os.path.join(redisdir, "redis.log")
assert os.path.exists(pidfile)
assert os.path.exists(logfile)
assert real_can_connect_to_socket(host='localhost', port=port)
# now clean it up
status = unprepare(project, result)
assert status
assert not os.path.exists(pidfile)
assert not os.path.exists(logfile)
assert not os.path.exists(redisdir)
assert not os.path.exists(servicedir)
assert not real_can_connect_to_socket(host='localhost', port=port)
local_state_file.load()
assert dict() == local_state_file.get_service_run_state("REDIS_URL")
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_and_unprepare_local_redis_server_with_failed_unprovide(monkeypatch):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
# now clean it up, but arrange for that to fail
local_state_file = LocalStateFile.load_for_directory(dirname)
local_state_file.set_service_run_state('REDIS_URL', {'shutdown_commands': [['false']]})
local_state_file.save()
status = unprepare(project, result)
assert not status
assert status.status_description == 'Shutdown commands failed for REDIS_URL.'
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_and_unprepare_two_local_redis_servers_with_failed_unprovide(monkeypatch):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
# now clean it up, but arrange for that to double-fail
local_state_file = LocalStateFile.load_for_directory(dirname)
local_state_file.set_service_run_state('REDIS_URL', {'shutdown_commands': [['false']]})
local_state_file.set_service_run_state('REDIS_URL_2', {'shutdown_commands': [['false']]})
local_state_file.save()
status = unprepare(project, result)
assert not status
assert status.status_description == 'Failed to clean up REDIS_URL, REDIS_URL_2.'
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
REDIS_URL_2: redis
"""}, start_local_redis)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_local_redis_server_twice_reuses(monkeypatch):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
can_connect_args_list = _monkeypatch_can_connect_to_socket_on_nonstandard_port_only(
monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
assert 'REDIS_URL' in result.environ
local_state_file = LocalStateFile.load_for_directory(dirname)
state = local_state_file.get_service_run_state("REDIS_URL")
assert 'port' in state
port = state['port']
assert dict(REDIS_URL=("redis://localhost:" + str(port)),
PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert len(can_connect_args_list) >= 2
pidfile = os.path.join(dirname, "services/REDIS_URL/redis.pid")
logfile = os.path.join(dirname, "services/REDIS_URL/redis.log")
assert os.path.exists(pidfile)
assert os.path.exists(logfile)
assert real_can_connect_to_socket(host='localhost', port=port)
# be sure we generate the config html that would use the old one
requirement = _redis_requirement()
status = requirement.check_status(result.environ, local_state_file, 'default', UserConfigOverrides())
# now try again, and we should re-use the exact same server
pidfile_mtime = os.path.getmtime(pidfile)
with codecs.open(pidfile, 'r', 'utf-8') as file:
pidfile_content = file.read()
result2 = _prepare_printing_errors(project, environ=minimal_environ())
assert result2
# port should be the same, and set in the environment
assert dict(REDIS_URL=("redis://localhost:" + str(port)),
PROJECT_DIR=project.directory_path) == strip_environ(result2.environ)
# no new pid file
assert pidfile_mtime == os.path.getmtime(pidfile)
with codecs.open(pidfile, 'r', 'utf-8') as file:
pidfile_content2 = file.read()
assert pidfile_content == pidfile_content2
# now clean it up
status = unprepare(project, result2)
assert status
assert not os.path.exists(pidfile)
assert not real_can_connect_to_socket(host='localhost', port=port)
local_state_file.load()
assert dict() == local_state_file.get_service_run_state("REDIS_URL")
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_local_redis_server_times_out(monkeypatch, capsys):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis_and_time_out(dirname):
project = project_no_dedicated_env(dirname)
from time import sleep as real_sleep
killed = {}
def mock_sleep_kills_redis(seconds):
# first time the Redis provider sleeps to wait for the
# server to appear, we kill the server; after that
# we make sleep into a no-op so we rapidly time out.
if 'done' in killed:
return
pidfile = os.path.join(dirname, "services", "REDIS_URL", "redis.pid")
count = 0
while count < 15:
if os.path.exists(pidfile):
break
real_sleep(0.1)
count = count + 1
assert os.path.exists(pidfile)
with codecs.open(pidfile, 'r', 'utf-8') as f:
for line in f.readlines():
try:
import signal
os.kill(int(line.strip()), signal.SIGKILL)
except Exception:
pass
# be sure it's gone
real_sleep(0.1)
killed['done'] = True
monkeypatch.setattr('time.sleep', mock_sleep_kills_redis)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
out, err = capsys.readouterr()
assert "redis-server started successfully, but we timed out trying to connect to it on port" in out
assert "redis-server process failed or timed out, exited with code 0" in err
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis_and_time_out)
def _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch):
can_connect_args_list = []
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
can_connect_args = dict()
can_connect_args['host'] = host
can_connect_args['port'] = port
can_connect_args['timeout_seconds'] = timeout_seconds
can_connect_args_list.append(can_connect_args)
return port != 6379
monkeypatch.setattr("anaconda_project.requirements_registry.network_util.can_connect_to_socket",
mock_can_connect_to_socket)
return can_connect_args_list
def test_fail_to_prepare_local_redis_server_no_port_available(monkeypatch, capsys):
can_connect_args_list = _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
assert 73 == len(can_connect_args_list)
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
out, err = capsys.readouterr()
assert "All ports from 6380 to 6449 were in use, could not start redis-server on one of them." in err
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "" == out
def test_do_not_start_local_redis_server_in_prod_mode(monkeypatch, capsys):
can_connect_args_list = _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch)
def no_start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ(), mode=provide.PROVIDE_MODE_PRODUCTION)
assert not result
assert 3 == len(can_connect_args_list)
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, no_start_local_redis)
out, err = capsys.readouterr()
assert "Could not connect to system default Redis." in err
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "" == out
def test_do_not_start_local_redis_server_in_check_mode(monkeypatch, capsys):
can_connect_args_list = _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch)
def no_start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ(), mode=provide.PROVIDE_MODE_CHECK)
assert not result
assert 3 == len(can_connect_args_list)
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, no_start_local_redis)
out, err = capsys.readouterr()
assert "Could not connect to system default Redis." in err
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "" == out
def _monkeypatch_can_connect_to_socket_always_fails(monkeypatch):
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
return False
monkeypatch.setattr("anaconda_project.requirements_registry.network_util.can_connect_to_socket",
mock_can_connect_to_socket)
def test_fail_to_prepare_local_redis_server_scope_system(monkeypatch, capsys):
_monkeypatch_can_connect_to_socket_always_fails(monkeypatch)
def check_no_autostart(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
with_directory_contents_completing_project_file(
{
DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
""",
DEFAULT_LOCAL_STATE_FILENAME: """
service_options:
REDIS_URL:
scope: system
"""
}, check_no_autostart)
out, err = capsys.readouterr()
assert out == ""
assert err == (
"Could not connect to system default Redis.\n" +
"missing requirement to run this project: A running Redis server, located by a redis: URL set as REDIS_URL.\n" +
" Environment variable REDIS_URL is not set.\n")
def test_redis_server_configure_custom_port_range(monkeypatch, capsys):
can_connect_args_list = _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
assert 36 == len(can_connect_args_list)
with_directory_contents_completing_project_file(
{
DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
""",
DEFAULT_LOCAL_STATE_FILENAME: """
service_options:
REDIS_URL:
port_range: 7389-7421
"""
}, start_local_redis)
out, err = capsys.readouterr()
assert "All ports from 7389 to 7421 were in use, could not start redis-server on one of them." in err
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "" == out
def _fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys, logfile_fail_mode):
# this test will fail if you don't have Redis installed, since
# it actually starts it.
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
logfile = os.path.join(dirname, "services/REDIS_URL/redis.log")
from subprocess import Popen as real_Popen
failscript = os.path.join(dirname, "fail.py")
with codecs.open(failscript, 'w', 'utf-8') as file:
file.write("""
from __future__ import print_function
import codecs
import sys
import os
print('It did not work stdout')
print('It did not work stderr', file=sys.stderr)
logfile = sys.argv[1]
fail_mode = sys.argv[2]
if fail_mode == 'no_logfile':
pass
elif fail_mode == 'is_dir':
os.makedirs(logfile)
else:
with codecs.open(logfile, 'w', 'utf-8') as f:
f.write('This is in the logfile')
sys.exit(1)
""")
def mock_Popen(*args, **kwargs):
if 'args' not in kwargs:
# `pip list` goes through this codepath while redis launch
# happens to specify args= as a kwarg
assert 'pip' in args[0][0]
return real_Popen(*args, **kwargs)
kwargs['args'] = ['python', failscript, logfile, logfile_fail_mode]
return real_Popen(*args, **kwargs)
monkeypatch.setattr("subprocess.Popen", mock_Popen)
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
# this doesn't capture "It did not work stdout" because
# of some pytest detail I don't understand.
out, err = capsys.readouterr()
assert "REDIS_URL" in err
assert "missing requirement" in err
# the failed process writes this to stderr, but prepare() moves it to stdout
assert "Starting " in out
assert "It did not work stderr" in out
if logfile_fail_mode == 'logfile_ok':
assert "This is in the logfile" in out
else:
assert "This is in the logfile" not in out
if logfile_fail_mode == 'is_dir':
assert "Failed to read" in out
else:
assert "Failed to read" not in out
def test_fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys):
_fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys, logfile_fail_mode='logfile_ok')
def test_fail_to_prepare_local_redis_server_exec_fails_no_logfile(monkeypatch, capsys):
_fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys, logfile_fail_mode='no_logfile')
def test_fail_to_prepare_local_redis_server_exec_fails_logfile_is_dir(monkeypatch, capsys):
_fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys, logfile_fail_mode='is_dir')
def test_fail_to_prepare_local_redis_server_not_on_path(monkeypatch, capsys):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
from subprocess import Popen as real_Popen
def mock_Popen(*args, **kwargs):
if 'args' not in kwargs:
# `pip list` goes through this codepath while redis launch
# happens to specify args= as a kwarg
assert 'pip' in args[0][0]
return real_Popen(*args, **kwargs)
kwargs['args'] = ['this-is-not-on-the-path']
return real_Popen(*args, **kwargs)
monkeypatch.setattr("subprocess.Popen", mock_Popen)
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
# this doesn't capture "It did not work stdout" because
# of some pytest detail I don't understand.
out, err = capsys.readouterr()
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "Error executing redis-server: " in err
def test_set_scope_in_local_state(monkeypatch):
can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch)
def prepare_after_setting_scope(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
environ = minimal_environ()
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_all'
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='find_project'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_project'
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='find_all'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_all'
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='environ'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_all' # default if no env var set
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='environ'))
environ_with_redis_url = environ.copy()
environ_with_redis_url['REDIS_URL'] = 'blah'
config = provider.read_config(requirement, environ_with_redis_url, local_state, 'default',
UserConfigOverrides())
assert config['source'] == 'environ' # default when the env var IS set
# use local variable when env var not set
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='variables', value='foo'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'variables'
assert config['value'] == 'foo'
# use local variable when env var _is_ set
provider.set_config_values_as_strings(requirement, environ_with_redis_url, local_state, 'default',
UserConfigOverrides(), dict(source='variables', value='foo'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'variables'
assert config['value'] == 'foo'
# set to use system, which should override using the local state
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='find_system'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_system'
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
assert dict(REDIS_URL="redis://localhost:6379",
PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert dict(host='localhost', port=6379, timeout_seconds=0.5) == can_connect_args
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, prepare_after_setting_scope)
|
<filename>anaconda_project/requirements_registry/providers/test/test_redis_provider.py<gh_stars>0
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import codecs
import os
import platform
import pytest
import sys
from anaconda_project.test.project_utils import project_no_dedicated_env
from anaconda_project.internal.test.tmpfile_utils import (with_directory_contents,
with_directory_contents_completing_project_file)
from anaconda_project.test.environ_utils import minimal_environ, strip_environ
from anaconda_project.local_state_file import DEFAULT_LOCAL_STATE_FILENAME
from anaconda_project.local_state_file import LocalStateFile
from anaconda_project.requirements_registry.registry import RequirementsRegistry
from anaconda_project.requirements_registry.requirement import UserConfigOverrides
from anaconda_project.requirements_registry.providers.redis import RedisProvider
from anaconda_project.requirements_registry.requirements.redis import RedisRequirement
from anaconda_project.prepare import prepare_without_interaction, unprepare
from anaconda_project import provide
from anaconda_project.project_file import DEFAULT_PROJECT_FILENAME
from anaconda_project.internal import conda_api
# This is kind of an awkward way to do it for historical reasons,
# we print out the logs/errors captured by FakeFrontend, instead
# of rewriting the tests in here to have a frontend that prints.
def _prepare_printing_errors(project, environ=None, mode=provide.PROVIDE_MODE_DEVELOPMENT):
result = prepare_without_interaction(project, environ=environ, mode=mode)
for message in project.frontend.logs:
print(message)
for error in project.frontend.errors:
print(error, file=sys.stderr)
if not result:
assert result.errors == project.frontend.errors
return result
def _redis_requirement():
return RedisRequirement(registry=RequirementsRegistry(), env_var="REDIS_URL")
def test_reading_default_config():
def read_config(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
config = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert 6380 == config['lower_port']
assert 6449 == config['upper_port']
with_directory_contents(dict(), read_config)
def test_reading_valid_config():
def read_config(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
config = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert 7389 == config['lower_port']
assert 7421 == config['upper_port']
assert 'find_all' == config['source']
with_directory_contents(
{
DEFAULT_LOCAL_STATE_FILENAME:
"""
service_options:
REDIS_URL:
port_range: 7389-7421
autostart: false
"""
}, read_config)
def _read_invalid_port_range(capsys, port_range):
def read_config(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
config = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
# revert to defaults
assert 6380 == config['lower_port']
assert 6449 == config['upper_port']
# should have printed an error
out, err = capsys.readouterr()
assert ("Invalid port_range '%s', should be like '6380-6449'\n" % (port_range)) == err
with_directory_contents(
{DEFAULT_LOCAL_STATE_FILENAME: """
service_options:
REDIS_URL:
port_range: %s
""" % port_range}, read_config)
def test_garbage_port_range(capsys):
_read_invalid_port_range(capsys, "abcdefg")
def test_backward_port_range(capsys):
_read_invalid_port_range(capsys, "100-99")
def test_non_integer_port_range(capsys):
_read_invalid_port_range(capsys, "A-Z")
def test_zero_lower_port(capsys):
_read_invalid_port_range(capsys, "0-1")
def test_zero_upper_port(capsys):
_read_invalid_port_range(capsys, "1-0")
def test_set_config_values_as_strings():
def set_config(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
provider.set_config_values_as_strings(requirement, dict(), local_state, 'default', UserConfigOverrides(),
dict(lower_port="6001"))
config = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert config['lower_port'] == 6001
assert config['upper_port'] == 6449
provider.set_config_values_as_strings(requirement, dict(), local_state, 'default', UserConfigOverrides(),
dict(upper_port="6700"))
config2 = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert config2['lower_port'] == 6001
assert config2['upper_port'] == 6700
provider.set_config_values_as_strings(requirement, dict(), local_state, 'default', UserConfigOverrides(),
dict(lower_port="5500", upper_port="6800"))
config2 = provider.read_config(requirement, dict(), local_state, 'default', UserConfigOverrides())
assert config2['lower_port'] == 5500
assert config2['upper_port'] == 6800
with_directory_contents(dict(), set_config)
def _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch):
can_connect_args = dict()
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
can_connect_args['host'] = host
can_connect_args['port'] = port
can_connect_args['timeout_seconds'] = timeout_seconds
return True
monkeypatch.setattr("anaconda_project.requirements_registry.network_util.can_connect_to_socket",
mock_can_connect_to_socket)
return can_connect_args
def test_prepare_redis_url_with_dict_in_variables_section(monkeypatch):
can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch)
def prepare_redis_url(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
assert dict(REDIS_URL="redis://localhost:6379",
PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert dict(host='localhost', port=6379, timeout_seconds=0.5) == can_connect_args
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, prepare_redis_url)
def _monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket):
can_connect_args_list = []
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
can_connect_args = dict()
can_connect_args['host'] = host
can_connect_args['port'] = port
can_connect_args['timeout_seconds'] = timeout_seconds
can_connect_args_list.append(can_connect_args)
if port == 6379:
return False
else:
return real_can_connect_to_socket(host, port, timeout_seconds)
monkeypatch.setattr("anaconda_project.requirements_registry.network_util.can_connect_to_socket",
mock_can_connect_to_socket)
return can_connect_args_list
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_and_unprepare_local_redis_server(monkeypatch):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
can_connect_args_list = _monkeypatch_can_connect_to_socket_on_nonstandard_port_only(
monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
local_state_file = LocalStateFile.load_for_directory(dirname)
state = local_state_file.get_service_run_state('REDIS_URL')
assert 'port' in state
port = state['port']
assert dict(REDIS_URL=("redis://localhost:" + str(port)),
PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert len(can_connect_args_list) >= 2
servicedir = os.path.join(dirname, "services")
redisdir = os.path.join(servicedir, "REDIS_URL")
pidfile = os.path.join(redisdir, "redis.pid")
logfile = os.path.join(redisdir, "redis.log")
assert os.path.exists(pidfile)
assert os.path.exists(logfile)
assert real_can_connect_to_socket(host='localhost', port=port)
# now clean it up
status = unprepare(project, result)
assert status
assert not os.path.exists(pidfile)
assert not os.path.exists(logfile)
assert not os.path.exists(redisdir)
assert not os.path.exists(servicedir)
assert not real_can_connect_to_socket(host='localhost', port=port)
local_state_file.load()
assert dict() == local_state_file.get_service_run_state("REDIS_URL")
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_and_unprepare_local_redis_server_with_failed_unprovide(monkeypatch):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
# now clean it up, but arrange for that to fail
local_state_file = LocalStateFile.load_for_directory(dirname)
local_state_file.set_service_run_state('REDIS_URL', {'shutdown_commands': [['false']]})
local_state_file.save()
status = unprepare(project, result)
assert not status
assert status.status_description == 'Shutdown commands failed for REDIS_URL.'
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_and_unprepare_two_local_redis_servers_with_failed_unprovide(monkeypatch):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
# now clean it up, but arrange for that to double-fail
local_state_file = LocalStateFile.load_for_directory(dirname)
local_state_file.set_service_run_state('REDIS_URL', {'shutdown_commands': [['false']]})
local_state_file.set_service_run_state('REDIS_URL_2', {'shutdown_commands': [['false']]})
local_state_file.save()
status = unprepare(project, result)
assert not status
assert status.status_description == 'Failed to clean up REDIS_URL, REDIS_URL_2.'
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
REDIS_URL_2: redis
"""}, start_local_redis)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_local_redis_server_twice_reuses(monkeypatch):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
can_connect_args_list = _monkeypatch_can_connect_to_socket_on_nonstandard_port_only(
monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
assert 'REDIS_URL' in result.environ
local_state_file = LocalStateFile.load_for_directory(dirname)
state = local_state_file.get_service_run_state("REDIS_URL")
assert 'port' in state
port = state['port']
assert dict(REDIS_URL=("redis://localhost:" + str(port)),
PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert len(can_connect_args_list) >= 2
pidfile = os.path.join(dirname, "services/REDIS_URL/redis.pid")
logfile = os.path.join(dirname, "services/REDIS_URL/redis.log")
assert os.path.exists(pidfile)
assert os.path.exists(logfile)
assert real_can_connect_to_socket(host='localhost', port=port)
# be sure we generate the config html that would use the old one
requirement = _redis_requirement()
status = requirement.check_status(result.environ, local_state_file, 'default', UserConfigOverrides())
# now try again, and we should re-use the exact same server
pidfile_mtime = os.path.getmtime(pidfile)
with codecs.open(pidfile, 'r', 'utf-8') as file:
pidfile_content = file.read()
result2 = _prepare_printing_errors(project, environ=minimal_environ())
assert result2
# port should be the same, and set in the environment
assert dict(REDIS_URL=("redis://localhost:" + str(port)),
PROJECT_DIR=project.directory_path) == strip_environ(result2.environ)
# no new pid file
assert pidfile_mtime == os.path.getmtime(pidfile)
with codecs.open(pidfile, 'r', 'utf-8') as file:
pidfile_content2 = file.read()
assert pidfile_content == pidfile_content2
# now clean it up
status = unprepare(project, result2)
assert status
assert not os.path.exists(pidfile)
assert not real_can_connect_to_socket(host='localhost', port=port)
local_state_file.load()
assert dict() == local_state_file.get_service_run_state("REDIS_URL")
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows has a hard time with read-only directories')
@pytest.mark.skipif(conda_api.current_platform() == 'osx-arm64', reason='We cannot install redis server on osx-arm64')
def test_prepare_local_redis_server_times_out(monkeypatch, capsys):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis_and_time_out(dirname):
project = project_no_dedicated_env(dirname)
from time import sleep as real_sleep
killed = {}
def mock_sleep_kills_redis(seconds):
# first time the Redis provider sleeps to wait for the
# server to appear, we kill the server; after that
# we make sleep into a no-op so we rapidly time out.
if 'done' in killed:
return
pidfile = os.path.join(dirname, "services", "REDIS_URL", "redis.pid")
count = 0
while count < 15:
if os.path.exists(pidfile):
break
real_sleep(0.1)
count = count + 1
assert os.path.exists(pidfile)
with codecs.open(pidfile, 'r', 'utf-8') as f:
for line in f.readlines():
try:
import signal
os.kill(int(line.strip()), signal.SIGKILL)
except Exception:
pass
# be sure it's gone
real_sleep(0.1)
killed['done'] = True
monkeypatch.setattr('time.sleep', mock_sleep_kills_redis)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
out, err = capsys.readouterr()
assert "redis-server started successfully, but we timed out trying to connect to it on port" in out
assert "redis-server process failed or timed out, exited with code 0" in err
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis_and_time_out)
def _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch):
can_connect_args_list = []
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
can_connect_args = dict()
can_connect_args['host'] = host
can_connect_args['port'] = port
can_connect_args['timeout_seconds'] = timeout_seconds
can_connect_args_list.append(can_connect_args)
return port != 6379
monkeypatch.setattr("anaconda_project.requirements_registry.network_util.can_connect_to_socket",
mock_can_connect_to_socket)
return can_connect_args_list
def test_fail_to_prepare_local_redis_server_no_port_available(monkeypatch, capsys):
can_connect_args_list = _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
assert 73 == len(can_connect_args_list)
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
out, err = capsys.readouterr()
assert "All ports from 6380 to 6449 were in use, could not start redis-server on one of them." in err
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "" == out
def test_do_not_start_local_redis_server_in_prod_mode(monkeypatch, capsys):
can_connect_args_list = _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch)
def no_start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ(), mode=provide.PROVIDE_MODE_PRODUCTION)
assert not result
assert 3 == len(can_connect_args_list)
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, no_start_local_redis)
out, err = capsys.readouterr()
assert "Could not connect to system default Redis." in err
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "" == out
def test_do_not_start_local_redis_server_in_check_mode(monkeypatch, capsys):
can_connect_args_list = _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch)
def no_start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ(), mode=provide.PROVIDE_MODE_CHECK)
assert not result
assert 3 == len(can_connect_args_list)
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, no_start_local_redis)
out, err = capsys.readouterr()
assert "Could not connect to system default Redis." in err
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "" == out
def _monkeypatch_can_connect_to_socket_always_fails(monkeypatch):
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
return False
monkeypatch.setattr("anaconda_project.requirements_registry.network_util.can_connect_to_socket",
mock_can_connect_to_socket)
def test_fail_to_prepare_local_redis_server_scope_system(monkeypatch, capsys):
_monkeypatch_can_connect_to_socket_always_fails(monkeypatch)
def check_no_autostart(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
with_directory_contents_completing_project_file(
{
DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
""",
DEFAULT_LOCAL_STATE_FILENAME: """
service_options:
REDIS_URL:
scope: system
"""
}, check_no_autostart)
out, err = capsys.readouterr()
assert out == ""
assert err == (
"Could not connect to system default Redis.\n" +
"missing requirement to run this project: A running Redis server, located by a redis: URL set as REDIS_URL.\n" +
" Environment variable REDIS_URL is not set.\n")
def test_redis_server_configure_custom_port_range(monkeypatch, capsys):
can_connect_args_list = _monkeypatch_can_connect_to_socket_always_succeeds_on_nonstandard(monkeypatch)
def start_local_redis(dirname):
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
assert 36 == len(can_connect_args_list)
with_directory_contents_completing_project_file(
{
DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
""",
DEFAULT_LOCAL_STATE_FILENAME: """
service_options:
REDIS_URL:
port_range: 7389-7421
"""
}, start_local_redis)
out, err = capsys.readouterr()
assert "All ports from 7389 to 7421 were in use, could not start redis-server on one of them." in err
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "" == out
def _fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys, logfile_fail_mode):
# this test will fail if you don't have Redis installed, since
# it actually starts it.
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
logfile = os.path.join(dirname, "services/REDIS_URL/redis.log")
from subprocess import Popen as real_Popen
failscript = os.path.join(dirname, "fail.py")
with codecs.open(failscript, 'w', 'utf-8') as file:
file.write("""
from __future__ import print_function
import codecs
import sys
import os
print('It did not work stdout')
print('It did not work stderr', file=sys.stderr)
logfile = sys.argv[1]
fail_mode = sys.argv[2]
if fail_mode == 'no_logfile':
pass
elif fail_mode == 'is_dir':
os.makedirs(logfile)
else:
with codecs.open(logfile, 'w', 'utf-8') as f:
f.write('This is in the logfile')
sys.exit(1)
""")
def mock_Popen(*args, **kwargs):
if 'args' not in kwargs:
# `pip list` goes through this codepath while redis launch
# happens to specify args= as a kwarg
assert 'pip' in args[0][0]
return real_Popen(*args, **kwargs)
kwargs['args'] = ['python', failscript, logfile, logfile_fail_mode]
return real_Popen(*args, **kwargs)
monkeypatch.setattr("subprocess.Popen", mock_Popen)
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
# this doesn't capture "It did not work stdout" because
# of some pytest detail I don't understand.
out, err = capsys.readouterr()
assert "REDIS_URL" in err
assert "missing requirement" in err
# the failed process writes this to stderr, but prepare() moves it to stdout
assert "Starting " in out
assert "It did not work stderr" in out
if logfile_fail_mode == 'logfile_ok':
assert "This is in the logfile" in out
else:
assert "This is in the logfile" not in out
if logfile_fail_mode == 'is_dir':
assert "Failed to read" in out
else:
assert "Failed to read" not in out
def test_fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys):
_fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys, logfile_fail_mode='logfile_ok')
def test_fail_to_prepare_local_redis_server_exec_fails_no_logfile(monkeypatch, capsys):
_fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys, logfile_fail_mode='no_logfile')
def test_fail_to_prepare_local_redis_server_exec_fails_logfile_is_dir(monkeypatch, capsys):
_fail_to_prepare_local_redis_server_exec_fails(monkeypatch, capsys, logfile_fail_mode='is_dir')
def test_fail_to_prepare_local_redis_server_not_on_path(monkeypatch, capsys):
from anaconda_project.requirements_registry.network_util import can_connect_to_socket as real_can_connect_to_socket
_monkeypatch_can_connect_to_socket_on_nonstandard_port_only(monkeypatch, real_can_connect_to_socket)
def start_local_redis(dirname):
from subprocess import Popen as real_Popen
def mock_Popen(*args, **kwargs):
if 'args' not in kwargs:
# `pip list` goes through this codepath while redis launch
# happens to specify args= as a kwarg
assert 'pip' in args[0][0]
return real_Popen(*args, **kwargs)
kwargs['args'] = ['this-is-not-on-the-path']
return real_Popen(*args, **kwargs)
monkeypatch.setattr("subprocess.Popen", mock_Popen)
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert not result
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, start_local_redis)
# this doesn't capture "It did not work stdout" because
# of some pytest detail I don't understand.
out, err = capsys.readouterr()
assert "REDIS_URL" in err
assert "missing requirement" in err
assert "Error executing redis-server: " in err
def test_set_scope_in_local_state(monkeypatch):
can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch)
def prepare_after_setting_scope(dirname):
local_state = LocalStateFile.load_for_directory(dirname)
requirement = _redis_requirement()
provider = RedisProvider()
environ = minimal_environ()
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_all'
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='find_project'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_project'
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='find_all'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_all'
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='environ'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_all' # default if no env var set
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='environ'))
environ_with_redis_url = environ.copy()
environ_with_redis_url['REDIS_URL'] = 'blah'
config = provider.read_config(requirement, environ_with_redis_url, local_state, 'default',
UserConfigOverrides())
assert config['source'] == 'environ' # default when the env var IS set
# use local variable when env var not set
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='variables', value='foo'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'variables'
assert config['value'] == 'foo'
# use local variable when env var _is_ set
provider.set_config_values_as_strings(requirement, environ_with_redis_url, local_state, 'default',
UserConfigOverrides(), dict(source='variables', value='foo'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'variables'
assert config['value'] == 'foo'
# set to use system, which should override using the local state
provider.set_config_values_as_strings(requirement, environ, local_state, 'default', UserConfigOverrides(),
dict(source='find_system'))
config = provider.read_config(requirement, environ, local_state, 'default', UserConfigOverrides())
assert config['source'] == 'find_system'
project = project_no_dedicated_env(dirname)
result = _prepare_printing_errors(project, environ=minimal_environ())
assert result
assert dict(REDIS_URL="redis://localhost:6379",
PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert dict(host='localhost', port=6379, timeout_seconds=0.5) == can_connect_args
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, prepare_after_setting_scope)
|
en
| 0.808832
|
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2016, Anaconda, Inc. All rights reserved. # # Licensed under the terms of the BSD 3-Clause License. # The full license is in the file LICENSE.txt, distributed with this software. # ----------------------------------------------------------------------------- # This is kind of an awkward way to do it for historical reasons, # we print out the logs/errors captured by FakeFrontend, instead # of rewriting the tests in here to have a frontend that prints. service_options: REDIS_URL: port_range: 7389-7421 autostart: false # revert to defaults # should have printed an error service_options: REDIS_URL: port_range: %s services: REDIS_URL: redis # now clean it up services: REDIS_URL: redis # now clean it up, but arrange for that to fail services: REDIS_URL: redis # now clean it up, but arrange for that to double-fail services: REDIS_URL: redis REDIS_URL_2: redis # be sure we generate the config html that would use the old one # now try again, and we should re-use the exact same server # port should be the same, and set in the environment # no new pid file # now clean it up services: REDIS_URL: redis # first time the Redis provider sleeps to wait for the # server to appear, we kill the server; after that # we make sleep into a no-op so we rapidly time out. # be sure it's gone services: REDIS_URL: redis services: REDIS_URL: redis services: REDIS_URL: redis services: REDIS_URL: redis services: REDIS_URL: redis service_options: REDIS_URL: scope: system services: REDIS_URL: redis service_options: REDIS_URL: port_range: 7389-7421 # this test will fail if you don't have Redis installed, since # it actually starts it. from __future__ import print_function import codecs import sys import os print('It did not work stdout') print('It did not work stderr', file=sys.stderr) logfile = sys.argv[1] fail_mode = sys.argv[2] if fail_mode == 'no_logfile': pass elif fail_mode == 'is_dir': os.makedirs(logfile) else: with codecs.open(logfile, 'w', 'utf-8') as f: f.write('This is in the logfile') sys.exit(1) # `pip list` goes through this codepath while redis launch # happens to specify args= as a kwarg services: REDIS_URL: redis # this doesn't capture "It did not work stdout" because # of some pytest detail I don't understand. # the failed process writes this to stderr, but prepare() moves it to stdout # `pip list` goes through this codepath while redis launch # happens to specify args= as a kwarg services: REDIS_URL: redis # this doesn't capture "It did not work stdout" because # of some pytest detail I don't understand. # default if no env var set # default when the env var IS set # use local variable when env var not set # use local variable when env var _is_ set # set to use system, which should override using the local state services: REDIS_URL: redis
| 1.620263
| 2
|
applications/ThermalDEMApplication/python_scripts/thermal_sphere_strategy.py
|
hbayraktaroglu/Kratos
| 0
|
6628785
|
# Imports
from KratosMultiphysics import *
from KratosMultiphysics.DEMApplication import *
from KratosMultiphysics.ThermalDEMApplication import *
import KratosMultiphysics.DEMApplication.sphere_strategy as SolverStrategy
import KratosMultiphysics.ThermalDEMApplication.default_input_settings as DefaultSettings
# Set base class
BaseStrategy = SolverStrategy.ExplicitStrategy
# Auxiliary functions
def GetBoolParameterIfItExists(parameters, key):
if key in parameters.keys():
return parameters[key].GetBool()
else:
return False
# Strategy class
class ExplicitStrategy(BaseStrategy):
####################################### DERIVED METHODS #######################################
#----------------------------------------------------------------------------------------------
def __init__(self, all_model_parts, creator_destructor, dem_fem_search, DEM_parameters, procedures):
# Initialize base class
BaseStrategy.__init__(self, all_model_parts, creator_destructor, dem_fem_search, DEM_parameters, procedures)
# Get and validate input parameters
self.GetProjectParameters(DEM_parameters)
self.CheckProjectParameters()
# Set flags
self.SetVoronoiPorosityFlags()
self.SetGraphFlags()
# Create utilities
self.CreateCPlusPlusUtilities()
#----------------------------------------------------------------------------------------------
def AddVariables(self):
# Add standard variables
BaseStrategy.AddVariables(self)
# Add thermal variables to all model parts
self.AddThermalVariables()
#----------------------------------------------------------------------------------------------
def CreateCPlusPlusStrategy(self):
# Set standard options
BaseStrategy.SetVariablesAndOptions(self)
# Set thermal options (set ProcessInfo values)
self.SetThermalVariablesAndOptions()
# Create cpp strategy object
self.CreateCPlusPlusThermalStrategy()
#----------------------------------------------------------------------------------------------
def ModifyProperties(self, properties, param = 0):
if param:
return
# Set standard properties
BaseStrategy.ModifyProperties(self, properties, param)
# Set pointers: time integration scheme / numerical integration method / constitutive laws (heat transfer models)
self.SetThermalIntegrationScheme(properties)
self.SetNumericalIntegrationMethod(properties)
self.SetConstitutiveLaw(properties)
#----------------------------------------------------------------------------------------------
def Initialize(self):
# Initialize utilities
# (important to be before the initialization of elements, because temperature is set here)
self.InitializeCPlusPlusUtilities()
# Base class initializer
# (initialize the strategy and the elements, so temperature must be already set at this point)
BaseStrategy.Initialize(self)
#----------------------------------------------------------------------------------------------
def InitializeSolutionStep(self):
if (self.compute_motion_option):
BaseStrategy.InitializeSolutionStep(self)
else:
(self.cplusplus_strategy).InitializeSolutionStep()
# Perform tesselation-dependent tasks (triangulation or tetrahedralization)
if (self.IsTimeToUpdateVoronoi() or self.IsTimeToUpdatePorosity()):
self.tesselation_utils.ExecuteInitializeSolutionStep(self.spheres_model_part)
#----------------------------------------------------------------------------------------------
def Predict(self):
if (self.compute_motion_option):
BaseStrategy.Predict(self)
#----------------------------------------------------------------------------------------------
def SolveSolutionStep(self):
# Solve step according to motion type
if (self.compute_motion_option):
(self.cplusplus_strategy).SolveSolutionStep()
else:
(self.cplusplus_strategy).SolveSolutionStepStatic()
return True
#----------------------------------------------------------------------------------------------
def FinalizeSolutionStep(self):
BaseStrategy.FinalizeSolutionStep(self)
# Write output graphs
if (self.write_graph):
self.graph_utils.ExecuteFinalizeSolutionStep(self.spheres_model_part)
#----------------------------------------------------------------------------------------------
def Finalize(self):
BaseStrategy.Finalize(self)
# Close graph files
if (self.write_graph):
self.graph_utils.ExecuteFinalize()
####################################### PARTICULAR METHODS #######################################
#----------------------------------------------------------------------------------------------
def GetProjectParameters(self, DEM_parameters):
# Get thermal settings and assign default values (in case it was not previously done)
default_settings = DefaultSettings.GetDefaultInputSettings()
if "thermal_settings" in self.DEM_parameters.keys():
self.thermal_settings = DEM_parameters["thermal_settings"]
else:
self.thermal_settings = Parameters("""{}""")
self.thermal_settings.ValidateAndAssignDefaults(default_settings)
# General options
self.compute_motion_option = self.thermal_settings["compute_motion"].GetBool()
# Frequencies
self.thermal_solve_frequency = self.thermal_settings["thermal_solve_frequency"].GetInt()
self.voronoi_tesselation_frequency = self.thermal_settings["voronoi_tesselation_frequency"].GetInt()
self.porosity_update_frequency = self.thermal_settings["porosity_update_frequency"].GetInt()
# Integration scheme and method
self.thermal_integration_scheme = self.thermal_settings["thermal_integration_scheme"].GetString()
self.numerical_integration_method = self.thermal_settings["numerical_integration_method"].GetString()
# Models for heat transfer
self.direct_conduction_model = self.thermal_settings["direct_conduction_model"].GetString()
self.indirect_conduction_model = self.thermal_settings["indirect_conduction_model"].GetString()
self.nusselt_correlation = self.thermal_settings["nusselt_correlation"].GetString()
self.radiation_model = self.thermal_settings["radiation_model"].GetString()
self.friction_model = self.thermal_settings["friction_model"].GetString()
self.adjusted_contact_model = self.thermal_settings["adjusted_contact_model"].GetString()
self.voronoi_method = self.thermal_settings["voronoi_method"].GetString()
self.porosity_method = self.thermal_settings["porosity_method"].GetString()
# Active heat transfer mechanisms
self.compute_direct_conduction_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_direct_conduction")
self.compute_indirect_conduction_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_indirect_conduction")
self.compute_convection_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_convection")
self.compute_radiation_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_radiation")
self.compute_friction_heat_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_friction_heat")
self.compute_adjusted_contact_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_adjusted_contact")
# Model parameters
self.min_conduction_distance = self.thermal_settings["min_conduction_distance"].GetDouble()
self.max_conduction_distance = self.thermal_settings["max_conduction_distance"].GetDouble()
self.fluid_layer_thickness = self.thermal_settings["fluid_layer_thickness"].GetDouble()
self.isothermal_core_radius = self.thermal_settings["isothermal_core_radius"].GetDouble()
self.max_radiation_distance = self.thermal_settings["max_radiation_distance"].GetDouble()
self.friction_heat_conversion = self.thermal_settings["friction_heat_conversion_ratio"].GetDouble()
self.global_porosity = self.thermal_settings["global_porosity"].GetDouble()
self.alpha_parameter = self.thermal_settings["alpha_shape_parameter"].GetDouble()
self.integral_tolerance = self.thermal_settings["integral_tolerance"].GetDouble()
# Interstitial fluid properties
self.fluid_props = self.thermal_settings["global_fluid_properties"]
self.fluid_density = self.fluid_props["fluid_density"].GetDouble()
self.fluid_viscosity = self.fluid_props["fluid_viscosity"].GetDouble()
self.fluid_thermal_conductivity = self.fluid_props["fluid_thermal_conductivity"].GetDouble()
self.fluid_heat_capacity = self.fluid_props["fluid_heat_capacity"].GetDouble()
self.fluid_temperature = self.fluid_props["fluid_temperature"].GetDouble()
self.fluid_velocity = Vector(3)
self.fluid_velocity[0] = self.fluid_props["fluid_velocity_X"].GetDouble()
self.fluid_velocity[1] = self.fluid_props["fluid_velocity_Y"].GetDouble()
self.fluid_velocity[2] = self.fluid_props["fluid_velocity_Z"].GetDouble()
# Graph writing
self.PostGraphParticleTempMin = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphParticleTempMin")
self.PostGraphParticleTempMax = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphParticleTempMax")
self.PostGraphParticleTempAvg = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphParticleTempAvg")
self.PostGraphParticleTempDev = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphParticleTempDev")
self.PostGraphModelTempAvg = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphModelTempAvg")
self.PostGraphFluxContributions = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphFluxContributions")
#----------------------------------------------------------------------------------------------
def CheckProjectParameters(self):
# Time integration scheme
if (self.thermal_integration_scheme != "forward_euler"):
raise Exception('ThermalDEM', 'Time integration scheme \'' + self.thermal_integration_scheme + '\' is not implemented.')
# Numerical integration method
if (self.numerical_integration_method != "adaptive_simpson"):
raise Exception('ThermalDEM', 'Numerical integration method \'' + self.numerical_integration_method + '\' is not implemented.')
# Heat transfer models
if (self.direct_conduction_model != "batchelor_obrien" and
self.direct_conduction_model != "thermal_pipe" and
self.direct_conduction_model != "collisional"):
raise Exception('ThermalDEM', 'Direct thermal conduction model \'' + self.direct_conduction_model + '\' is not implemented.')
if (self.indirect_conduction_model != "surrounding_layer" and
self.indirect_conduction_model != "voronoi_a" and
self.indirect_conduction_model != "voronoi_b" and
self.indirect_conduction_model != "vargas_mccarthy"):
raise Exception('ThermalDEM', 'Indirect thermal conduction model \'' + self.indirect_conduction_model + '\' is not implemented.')
if (self.nusselt_correlation != "sphere_hanz_marshall" and
self.nusselt_correlation != "sphere_whitaker" and
self.nusselt_correlation != "sphere_gunn" and
self.nusselt_correlation != "sphere_li_mason"):
raise Exception('ThermalDEM', 'Nusselt number correlation \'' + self.nusselt_correlation + '\' is not implemented.')
if (self.radiation_model != "continuum_zhou" and
self.radiation_model != "continuum_krause"):
raise Exception('ThermalDEM', 'Thermal radiation model \'' + self.radiation_model + '\' is not implemented.')
if (self.friction_model != "coulomb"):
raise Exception('ThermalDEM', 'Frictional heat generation model \'' + self.friction_model + '\' is not implemented.')
if (self.adjusted_contact_model != "zhou" and
self.adjusted_contact_model != "lu" and
self.adjusted_contact_model != "morris"):
raise Exception('ThermalDEM', 'Adjusted contact model \'' + self.adjusted_contact_model + '\' is not implemented.')
# Other methods
if (self.voronoi_method != "tesselation" and
self.voronoi_method != "porosity"):
raise Exception('ThermalDEM', 'Voronoi method \'' + self.voronoi_method + '\' is not implemented.')
if (self.porosity_method != "global" and
self.porosity_method != "average_convex_hull" and
self.porosity_method != "average_alpha_shape"):
raise Exception('ThermalDEM', 'Porosity method \'' + self.porosity_method + '\' is not implemented.')
# Model parameters values
if (self.thermal_solve_frequency <= 0):
self.thermal_solve_frequency = 1
if (self.voronoi_tesselation_frequency < 0):
self.voronoi_tesselation_frequency = 0
if (self.porosity_update_frequency < 0):
self.porosity_update_frequency = 0
if (self.min_conduction_distance <= 0):
raise Exception('ThermalDEM', '"min_conduction_distance" must be positive.')
if (self.max_conduction_distance < 0):
self.max_conduction_distance = 0
if (self.fluid_layer_thickness < 0):
self.fluid_layer_thickness = 0
if (self.isothermal_core_radius < 0):
self.isothermal_core_radius = 0
if (self.isothermal_core_radius > 1):
self.isothermal_core_radius = 1
if (self.max_radiation_distance < 0 ):
self.max_radiation_distance = 0
if (self.friction_heat_conversion < 0 or self.friction_heat_conversion > 1):
raise Exception('ThermalDEM', '"friction_heat_conversion_ratio" must be between zero and one.')
if (self.global_porosity < 0 or self.global_porosity >= 1):
raise Exception('ThermalDEM', '"global_porosity" must be between zero and one.')
if (self.alpha_parameter < 0):
raise Exception('ThermalDEM', '"alpha_shape_parameter" must be positive.')
if (self.integral_tolerance <= 0):
raise Exception('ThermalDEM', '"integral_tolerance" must be positive.')
# Fluid properties values
if (self.fluid_density <= 0 or
self.fluid_viscosity <= 0 or
self.fluid_thermal_conductivity <= 0 or
self.fluid_heat_capacity <= 0):
raise Exception('ThermalDEM', '"global_fluid_properties" must contain positive values for material properties.')
#----------------------------------------------------------------------------------------------
def SetVoronoiPorosityFlags(self):
# Flag for computing voronoi diagram in a given frequency
if (self.compute_indirect_conduction_option and
(self.indirect_conduction_model == "voronoi_a" or
self.indirect_conduction_model == "voronoi_b") and
self.voronoi_method == "tesselation"):
self.compute_voronoi = True
else:
self.compute_voronoi = False
# Flag for computing porosity in a given frequency
if (self.compute_indirect_conduction_option and
(self.indirect_conduction_model == "voronoi_a" or
self.indirect_conduction_model == "voronoi_b") and
self.voronoi_method == "porosity" and
self.porosity_method != "global"):
self.compute_porosity = True
elif (self.compute_convection_option and
(self.nusselt_correlation == "sphere_gunn" or
self.nusselt_correlation == "sphere_li_mason") and
self.porosity_method != "global"):
self.compute_porosity = True
elif (self.compute_radiation_option and
self.radiation_model == "continuum_zhou" and
self.porosity_method != "global"):
self.compute_porosity = True
else:
self.compute_porosity = False
#----------------------------------------------------------------------------------------------
def SetGraphFlags(self):
if (self.PostGraphParticleTempMin or
self.PostGraphParticleTempMax or
self.PostGraphParticleTempAvg or
self.PostGraphParticleTempDev or
self.PostGraphModelTempAvg or
self.PostGraphFluxContributions):
self.write_graph = True
else:
self.write_graph = False
#----------------------------------------------------------------------------------------------
def CreateCPlusPlusUtilities(self):
self.thermal_data_utils = SetThermalDataUtilities()
if (self.compute_voronoi or self.compute_porosity):
if self.dimension == 2:
self.tesselation_utils = TesselationUtilities2D()
elif self.dimension == 3:
self.tesselation_utils = TesselationUtilities3D()
if (self.write_graph):
self.graph_utils = GraphUtilities()
#----------------------------------------------------------------------------------------------
def AddThermalVariables(self):
self.spheres_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
self.cluster_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
self.inlet_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
self.fem_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
self.spheres_model_part.AddNodalSolutionStepVariable(HEATFLUX)
self.cluster_model_part.AddNodalSolutionStepVariable(HEATFLUX)
self.inlet_model_part.AddNodalSolutionStepVariable(HEATFLUX)
self.fem_model_part.AddNodalSolutionStepVariable(HEATFLUX)
#----------------------------------------------------------------------------------------------
def SetThermalIntegrationScheme(self, properties):
if properties.Has(THERMAL_INTEGRATION_SCHEME_NAME):
input_name = properties[THERMAL_INTEGRATION_SCHEME_NAME]
else:
input_name = self.thermal_integration_scheme
if input_name == "forward_euler":
class_name = "ThermalForwardEulerScheme"
else:
raise Exception('ThermalDEM', 'Time integration scheme \'' + input_name + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the time integration scheme named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetThermalIntegrationSchemeInProperties(properties, True)
#----------------------------------------------------------------------------------------------
def SetNumericalIntegrationMethod(self, properties):
if properties.Has(NUMERICAL_INTEGRATION_METHOD_NAME):
input_name = properties[NUMERICAL_INTEGRATION_METHOD_NAME]
else:
input_name = self.numerical_integration_method
if input_name == "adaptive_simpson":
class_name = "AdaptiveSimpsonQuadrature"
else:
raise Exception('ThermalDEM', 'Numerical integration method \'' + input_name + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the numerical integration method named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetNumericalIntegrationMethodInProperties(properties, True)
#----------------------------------------------------------------------------------------------
def SetConstitutiveLaw(self, properties):
# Direct conduction
if self.direct_conduction_model == "batchelor_obrien":
class_name = "DirectConductionBOB"
elif self.direct_conduction_model == "thermal_pipe":
class_name = "DirectConductionPipe"
elif self.direct_conduction_model == "collisional":
class_name = "DirectConductionCollision"
else:
raise Exception('ThermalDEM', 'Direct thermal conduction model \'' + self.direct_conduction_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the direct thermal conduction model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatExchangeMechanismInProperties(properties, True)
# Indirect conduction
if self.indirect_conduction_model == "surrounding_layer":
class_name = "IndirectConductionSurroundLayer"
elif self.indirect_conduction_model == "voronoi_a":
class_name = "IndirectConductionVoronoiA"
elif self.indirect_conduction_model == "voronoi_b":
class_name = "IndirectConductionVoronoiB"
elif self.indirect_conduction_model == "vargas_mccarthy":
class_name = "IndirectConductionVargas"
else:
raise Exception('ThermalDEM', 'Indirect thermal conduction model \'' + self.indirect_conduction_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the indirect thermal conduction model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatExchangeMechanismInProperties(properties, True)
# Convection
if self.nusselt_correlation == "sphere_hanz_marshall":
class_name = "NusseltHanzMarshall"
elif self.nusselt_correlation == "sphere_whitaker":
class_name = "NusseltWhitaker"
elif self.nusselt_correlation == "sphere_gunn":
class_name = "NusseltGunn"
elif self.nusselt_correlation == "sphere_li_mason":
class_name = "NusseltLiMason"
else:
raise Exception('ThermalDEM', 'Nusselt number correlation \'' + self.nusselt_correlation + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the nusselt number correlation named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatExchangeMechanismInProperties(properties, True)
# Radiation
if self.radiation_model == "continuum_zhou":
class_name = "RadiationContinuumZhou"
elif self.radiation_model == "continuum_krause":
class_name = "RadiationContinuumKrause"
else:
raise Exception('ThermalDEM', 'Thermal radiation model \'' + self.radiation_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the thermal radiation model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatExchangeMechanismInProperties(properties, True)
# Friction
if self.friction_model == "coulomb":
class_name = "FrictionCoulomb"
else:
raise Exception('ThermalDEM', 'Frictional heat generation model \'' + self.friction_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the frictional heat generation model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatGenerationMechanismInProperties(properties, True)
# Real contact
if self.adjusted_contact_model == "zhou":
class_name = "RealContactZhou"
elif self.adjusted_contact_model == "lu":
class_name = "RealContactLu"
elif self.adjusted_contact_model == "morris":
class_name = "RealContactMorris"
else:
raise Exception('ThermalDEM', 'Real contact model \'' + self.adjusted_contact_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the real contact model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetRealContactModelInProperties(properties, True)
#----------------------------------------------------------------------------------------------
def SetThermalVariablesAndOptions(self):
# General options
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, MOTION_OPTION, self.compute_motion_option)
self.spheres_model_part.ProcessInfo.SetValue(THERMAL_FREQUENCY, self.thermal_solve_frequency)
# Models for heat transfer
self.spheres_model_part.ProcessInfo.SetValue(DIRECT_CONDUCTION_MODEL_NAME, self.direct_conduction_model)
self.spheres_model_part.ProcessInfo.SetValue(INDIRECT_CONDUCTION_MODEL_NAME, self.indirect_conduction_model)
self.spheres_model_part.ProcessInfo.SetValue(CONVECTION_MODEL_NAME, self.nusselt_correlation)
self.spheres_model_part.ProcessInfo.SetValue(RADIATION_MODEL_NAME, self.radiation_model)
self.spheres_model_part.ProcessInfo.SetValue(FRICTION_MODEL_NAME, self.friction_model)
self.spheres_model_part.ProcessInfo.SetValue(REAL_CONTACT_MODEL_NAME, self.adjusted_contact_model)
self.spheres_model_part.ProcessInfo.SetValue(VORONOI_METHOD_NAME, self.voronoi_method)
self.spheres_model_part.ProcessInfo.SetValue(POROSITY_METHOD_NAME, self.porosity_method)
# Active heat transfer mechanisms
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, DIRECT_CONDUCTION_OPTION, self.compute_direct_conduction_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, INDIRECT_CONDUCTION_OPTION, self.compute_indirect_conduction_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, CONVECTION_OPTION, self.compute_convection_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, RADIATION_OPTION, self.compute_radiation_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, FRICTION_HEAT_OPTION, self.compute_friction_heat_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, REAL_CONTACT_OPTION, self.compute_adjusted_contact_option)
# Model parameters
self.spheres_model_part.ProcessInfo.SetValue(MIN_CONDUCTION_DISTANCE, self.min_conduction_distance)
self.spheres_model_part.ProcessInfo.SetValue(MAX_CONDUCTION_DISTANCE, self.max_conduction_distance)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_LAYER_THICKNESS, self.fluid_layer_thickness)
self.spheres_model_part.ProcessInfo.SetValue(ISOTHERMAL_CORE_RADIUS, self.isothermal_core_radius)
self.spheres_model_part.ProcessInfo.SetValue(MAX_RADIATION_DISTANCE, self.max_radiation_distance)
self.spheres_model_part.ProcessInfo.SetValue(FRICTION_HEAT_CONVERSION, self.friction_heat_conversion)
self.spheres_model_part.ProcessInfo.SetValue(AVERAGE_POROSITY, self.global_porosity)
self.spheres_model_part.ProcessInfo.SetValue(ALPHA_SHAPE_PARAMETER, self.alpha_parameter)
self.spheres_model_part.ProcessInfo.SetValue(INTEGRAL_TOLERANCE, self.integral_tolerance)
# Interstitial fluid properties
self.spheres_model_part.ProcessInfo.SetValue(FLUID_DENSITY, self.fluid_density)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_VISCOSITY, self.fluid_viscosity)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_THERMAL_CONDUCTIVITY, self.fluid_thermal_conductivity)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_HEAT_CAPACITY, self.fluid_heat_capacity)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_TEMPERATURE, self.fluid_temperature)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_VELOCITY, self.fluid_velocity)
#----------------------------------------------------------------------------------------------
def CreateCPlusPlusThermalStrategy(self):
translational_integration_scheme = self.DEM_parameters["TranslationalIntegrationScheme"].GetString()
if (translational_integration_scheme == 'Velocity_Verlet'):
raise Exception('ThermalDEM', '"Thermal strategy for translational integration scheme \'' + translational_integration_scheme + '\' is not implemented.')
else:
self.cplusplus_strategy = ThermalExplicitSolverStrategy(self.settings,
self.max_delta_time,
self.n_step_search,
self.safety_factor,
self.delta_option,
self.creator_destructor,
self.dem_fem_search,
self.search_strategy,
self.solver_settings)
#----------------------------------------------------------------------------------------------
def InitializeCPlusPlusUtilities(self):
self.thermal_data_utils.ExecuteInitialize(self.spheres_model_part,self.fem_model_part)
if (self.compute_voronoi or self.compute_porosity):
self.tesselation_utils.ExecuteInitialize(self.spheres_model_part, self.compute_voronoi, self.compute_porosity)
if (self.write_graph):
self.graph_utils.ExecuteInitialize(self.PostGraphParticleTempMin,
self.PostGraphParticleTempMax,
self.PostGraphParticleTempAvg,
self.PostGraphParticleTempDev,
self.PostGraphModelTempAvg,
self.PostGraphFluxContributions)
#----------------------------------------------------------------------------------------------
def IsTimeToUpdateVoronoi(self):
if (self.compute_voronoi):
step = self.spheres_model_part.ProcessInfo[TIME_STEPS]
freq = self.voronoi_tesselation_frequency
return step == 1 or (freq != 0 and step%freq == 0)
else:
return False
#----------------------------------------------------------------------------------------------
def IsTimeToUpdatePorosity(self):
if (self.compute_porosity):
step = self.spheres_model_part.ProcessInfo[TIME_STEPS]
freq = self.porosity_update_frequency
return step == 1 or (freq != 0 and step%freq == 0)
else:
return False
|
# Imports
from KratosMultiphysics import *
from KratosMultiphysics.DEMApplication import *
from KratosMultiphysics.ThermalDEMApplication import *
import KratosMultiphysics.DEMApplication.sphere_strategy as SolverStrategy
import KratosMultiphysics.ThermalDEMApplication.default_input_settings as DefaultSettings
# Set base class
BaseStrategy = SolverStrategy.ExplicitStrategy
# Auxiliary functions
def GetBoolParameterIfItExists(parameters, key):
if key in parameters.keys():
return parameters[key].GetBool()
else:
return False
# Strategy class
class ExplicitStrategy(BaseStrategy):
####################################### DERIVED METHODS #######################################
#----------------------------------------------------------------------------------------------
def __init__(self, all_model_parts, creator_destructor, dem_fem_search, DEM_parameters, procedures):
# Initialize base class
BaseStrategy.__init__(self, all_model_parts, creator_destructor, dem_fem_search, DEM_parameters, procedures)
# Get and validate input parameters
self.GetProjectParameters(DEM_parameters)
self.CheckProjectParameters()
# Set flags
self.SetVoronoiPorosityFlags()
self.SetGraphFlags()
# Create utilities
self.CreateCPlusPlusUtilities()
#----------------------------------------------------------------------------------------------
def AddVariables(self):
# Add standard variables
BaseStrategy.AddVariables(self)
# Add thermal variables to all model parts
self.AddThermalVariables()
#----------------------------------------------------------------------------------------------
def CreateCPlusPlusStrategy(self):
# Set standard options
BaseStrategy.SetVariablesAndOptions(self)
# Set thermal options (set ProcessInfo values)
self.SetThermalVariablesAndOptions()
# Create cpp strategy object
self.CreateCPlusPlusThermalStrategy()
#----------------------------------------------------------------------------------------------
def ModifyProperties(self, properties, param = 0):
if param:
return
# Set standard properties
BaseStrategy.ModifyProperties(self, properties, param)
# Set pointers: time integration scheme / numerical integration method / constitutive laws (heat transfer models)
self.SetThermalIntegrationScheme(properties)
self.SetNumericalIntegrationMethod(properties)
self.SetConstitutiveLaw(properties)
#----------------------------------------------------------------------------------------------
def Initialize(self):
# Initialize utilities
# (important to be before the initialization of elements, because temperature is set here)
self.InitializeCPlusPlusUtilities()
# Base class initializer
# (initialize the strategy and the elements, so temperature must be already set at this point)
BaseStrategy.Initialize(self)
#----------------------------------------------------------------------------------------------
def InitializeSolutionStep(self):
if (self.compute_motion_option):
BaseStrategy.InitializeSolutionStep(self)
else:
(self.cplusplus_strategy).InitializeSolutionStep()
# Perform tesselation-dependent tasks (triangulation or tetrahedralization)
if (self.IsTimeToUpdateVoronoi() or self.IsTimeToUpdatePorosity()):
self.tesselation_utils.ExecuteInitializeSolutionStep(self.spheres_model_part)
#----------------------------------------------------------------------------------------------
def Predict(self):
if (self.compute_motion_option):
BaseStrategy.Predict(self)
#----------------------------------------------------------------------------------------------
def SolveSolutionStep(self):
# Solve step according to motion type
if (self.compute_motion_option):
(self.cplusplus_strategy).SolveSolutionStep()
else:
(self.cplusplus_strategy).SolveSolutionStepStatic()
return True
#----------------------------------------------------------------------------------------------
def FinalizeSolutionStep(self):
BaseStrategy.FinalizeSolutionStep(self)
# Write output graphs
if (self.write_graph):
self.graph_utils.ExecuteFinalizeSolutionStep(self.spheres_model_part)
#----------------------------------------------------------------------------------------------
def Finalize(self):
BaseStrategy.Finalize(self)
# Close graph files
if (self.write_graph):
self.graph_utils.ExecuteFinalize()
####################################### PARTICULAR METHODS #######################################
#----------------------------------------------------------------------------------------------
def GetProjectParameters(self, DEM_parameters):
# Get thermal settings and assign default values (in case it was not previously done)
default_settings = DefaultSettings.GetDefaultInputSettings()
if "thermal_settings" in self.DEM_parameters.keys():
self.thermal_settings = DEM_parameters["thermal_settings"]
else:
self.thermal_settings = Parameters("""{}""")
self.thermal_settings.ValidateAndAssignDefaults(default_settings)
# General options
self.compute_motion_option = self.thermal_settings["compute_motion"].GetBool()
# Frequencies
self.thermal_solve_frequency = self.thermal_settings["thermal_solve_frequency"].GetInt()
self.voronoi_tesselation_frequency = self.thermal_settings["voronoi_tesselation_frequency"].GetInt()
self.porosity_update_frequency = self.thermal_settings["porosity_update_frequency"].GetInt()
# Integration scheme and method
self.thermal_integration_scheme = self.thermal_settings["thermal_integration_scheme"].GetString()
self.numerical_integration_method = self.thermal_settings["numerical_integration_method"].GetString()
# Models for heat transfer
self.direct_conduction_model = self.thermal_settings["direct_conduction_model"].GetString()
self.indirect_conduction_model = self.thermal_settings["indirect_conduction_model"].GetString()
self.nusselt_correlation = self.thermal_settings["nusselt_correlation"].GetString()
self.radiation_model = self.thermal_settings["radiation_model"].GetString()
self.friction_model = self.thermal_settings["friction_model"].GetString()
self.adjusted_contact_model = self.thermal_settings["adjusted_contact_model"].GetString()
self.voronoi_method = self.thermal_settings["voronoi_method"].GetString()
self.porosity_method = self.thermal_settings["porosity_method"].GetString()
# Active heat transfer mechanisms
self.compute_direct_conduction_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_direct_conduction")
self.compute_indirect_conduction_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_indirect_conduction")
self.compute_convection_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_convection")
self.compute_radiation_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_radiation")
self.compute_friction_heat_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_friction_heat")
self.compute_adjusted_contact_option = GetBoolParameterIfItExists(self.thermal_settings, "compute_adjusted_contact")
# Model parameters
self.min_conduction_distance = self.thermal_settings["min_conduction_distance"].GetDouble()
self.max_conduction_distance = self.thermal_settings["max_conduction_distance"].GetDouble()
self.fluid_layer_thickness = self.thermal_settings["fluid_layer_thickness"].GetDouble()
self.isothermal_core_radius = self.thermal_settings["isothermal_core_radius"].GetDouble()
self.max_radiation_distance = self.thermal_settings["max_radiation_distance"].GetDouble()
self.friction_heat_conversion = self.thermal_settings["friction_heat_conversion_ratio"].GetDouble()
self.global_porosity = self.thermal_settings["global_porosity"].GetDouble()
self.alpha_parameter = self.thermal_settings["alpha_shape_parameter"].GetDouble()
self.integral_tolerance = self.thermal_settings["integral_tolerance"].GetDouble()
# Interstitial fluid properties
self.fluid_props = self.thermal_settings["global_fluid_properties"]
self.fluid_density = self.fluid_props["fluid_density"].GetDouble()
self.fluid_viscosity = self.fluid_props["fluid_viscosity"].GetDouble()
self.fluid_thermal_conductivity = self.fluid_props["fluid_thermal_conductivity"].GetDouble()
self.fluid_heat_capacity = self.fluid_props["fluid_heat_capacity"].GetDouble()
self.fluid_temperature = self.fluid_props["fluid_temperature"].GetDouble()
self.fluid_velocity = Vector(3)
self.fluid_velocity[0] = self.fluid_props["fluid_velocity_X"].GetDouble()
self.fluid_velocity[1] = self.fluid_props["fluid_velocity_Y"].GetDouble()
self.fluid_velocity[2] = self.fluid_props["fluid_velocity_Z"].GetDouble()
# Graph writing
self.PostGraphParticleTempMin = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphParticleTempMin")
self.PostGraphParticleTempMax = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphParticleTempMax")
self.PostGraphParticleTempAvg = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphParticleTempAvg")
self.PostGraphParticleTempDev = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphParticleTempDev")
self.PostGraphModelTempAvg = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphModelTempAvg")
self.PostGraphFluxContributions = GetBoolParameterIfItExists(self.DEM_parameters, "PostGraphFluxContributions")
#----------------------------------------------------------------------------------------------
def CheckProjectParameters(self):
# Time integration scheme
if (self.thermal_integration_scheme != "forward_euler"):
raise Exception('ThermalDEM', 'Time integration scheme \'' + self.thermal_integration_scheme + '\' is not implemented.')
# Numerical integration method
if (self.numerical_integration_method != "adaptive_simpson"):
raise Exception('ThermalDEM', 'Numerical integration method \'' + self.numerical_integration_method + '\' is not implemented.')
# Heat transfer models
if (self.direct_conduction_model != "batchelor_obrien" and
self.direct_conduction_model != "thermal_pipe" and
self.direct_conduction_model != "collisional"):
raise Exception('ThermalDEM', 'Direct thermal conduction model \'' + self.direct_conduction_model + '\' is not implemented.')
if (self.indirect_conduction_model != "surrounding_layer" and
self.indirect_conduction_model != "voronoi_a" and
self.indirect_conduction_model != "voronoi_b" and
self.indirect_conduction_model != "vargas_mccarthy"):
raise Exception('ThermalDEM', 'Indirect thermal conduction model \'' + self.indirect_conduction_model + '\' is not implemented.')
if (self.nusselt_correlation != "sphere_hanz_marshall" and
self.nusselt_correlation != "sphere_whitaker" and
self.nusselt_correlation != "sphere_gunn" and
self.nusselt_correlation != "sphere_li_mason"):
raise Exception('ThermalDEM', 'Nusselt number correlation \'' + self.nusselt_correlation + '\' is not implemented.')
if (self.radiation_model != "continuum_zhou" and
self.radiation_model != "continuum_krause"):
raise Exception('ThermalDEM', 'Thermal radiation model \'' + self.radiation_model + '\' is not implemented.')
if (self.friction_model != "coulomb"):
raise Exception('ThermalDEM', 'Frictional heat generation model \'' + self.friction_model + '\' is not implemented.')
if (self.adjusted_contact_model != "zhou" and
self.adjusted_contact_model != "lu" and
self.adjusted_contact_model != "morris"):
raise Exception('ThermalDEM', 'Adjusted contact model \'' + self.adjusted_contact_model + '\' is not implemented.')
# Other methods
if (self.voronoi_method != "tesselation" and
self.voronoi_method != "porosity"):
raise Exception('ThermalDEM', 'Voronoi method \'' + self.voronoi_method + '\' is not implemented.')
if (self.porosity_method != "global" and
self.porosity_method != "average_convex_hull" and
self.porosity_method != "average_alpha_shape"):
raise Exception('ThermalDEM', 'Porosity method \'' + self.porosity_method + '\' is not implemented.')
# Model parameters values
if (self.thermal_solve_frequency <= 0):
self.thermal_solve_frequency = 1
if (self.voronoi_tesselation_frequency < 0):
self.voronoi_tesselation_frequency = 0
if (self.porosity_update_frequency < 0):
self.porosity_update_frequency = 0
if (self.min_conduction_distance <= 0):
raise Exception('ThermalDEM', '"min_conduction_distance" must be positive.')
if (self.max_conduction_distance < 0):
self.max_conduction_distance = 0
if (self.fluid_layer_thickness < 0):
self.fluid_layer_thickness = 0
if (self.isothermal_core_radius < 0):
self.isothermal_core_radius = 0
if (self.isothermal_core_radius > 1):
self.isothermal_core_radius = 1
if (self.max_radiation_distance < 0 ):
self.max_radiation_distance = 0
if (self.friction_heat_conversion < 0 or self.friction_heat_conversion > 1):
raise Exception('ThermalDEM', '"friction_heat_conversion_ratio" must be between zero and one.')
if (self.global_porosity < 0 or self.global_porosity >= 1):
raise Exception('ThermalDEM', '"global_porosity" must be between zero and one.')
if (self.alpha_parameter < 0):
raise Exception('ThermalDEM', '"alpha_shape_parameter" must be positive.')
if (self.integral_tolerance <= 0):
raise Exception('ThermalDEM', '"integral_tolerance" must be positive.')
# Fluid properties values
if (self.fluid_density <= 0 or
self.fluid_viscosity <= 0 or
self.fluid_thermal_conductivity <= 0 or
self.fluid_heat_capacity <= 0):
raise Exception('ThermalDEM', '"global_fluid_properties" must contain positive values for material properties.')
#----------------------------------------------------------------------------------------------
def SetVoronoiPorosityFlags(self):
# Flag for computing voronoi diagram in a given frequency
if (self.compute_indirect_conduction_option and
(self.indirect_conduction_model == "voronoi_a" or
self.indirect_conduction_model == "voronoi_b") and
self.voronoi_method == "tesselation"):
self.compute_voronoi = True
else:
self.compute_voronoi = False
# Flag for computing porosity in a given frequency
if (self.compute_indirect_conduction_option and
(self.indirect_conduction_model == "voronoi_a" or
self.indirect_conduction_model == "voronoi_b") and
self.voronoi_method == "porosity" and
self.porosity_method != "global"):
self.compute_porosity = True
elif (self.compute_convection_option and
(self.nusselt_correlation == "sphere_gunn" or
self.nusselt_correlation == "sphere_li_mason") and
self.porosity_method != "global"):
self.compute_porosity = True
elif (self.compute_radiation_option and
self.radiation_model == "continuum_zhou" and
self.porosity_method != "global"):
self.compute_porosity = True
else:
self.compute_porosity = False
#----------------------------------------------------------------------------------------------
def SetGraphFlags(self):
if (self.PostGraphParticleTempMin or
self.PostGraphParticleTempMax or
self.PostGraphParticleTempAvg or
self.PostGraphParticleTempDev or
self.PostGraphModelTempAvg or
self.PostGraphFluxContributions):
self.write_graph = True
else:
self.write_graph = False
#----------------------------------------------------------------------------------------------
def CreateCPlusPlusUtilities(self):
self.thermal_data_utils = SetThermalDataUtilities()
if (self.compute_voronoi or self.compute_porosity):
if self.dimension == 2:
self.tesselation_utils = TesselationUtilities2D()
elif self.dimension == 3:
self.tesselation_utils = TesselationUtilities3D()
if (self.write_graph):
self.graph_utils = GraphUtilities()
#----------------------------------------------------------------------------------------------
def AddThermalVariables(self):
self.spheres_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
self.cluster_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
self.inlet_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
self.fem_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
self.spheres_model_part.AddNodalSolutionStepVariable(HEATFLUX)
self.cluster_model_part.AddNodalSolutionStepVariable(HEATFLUX)
self.inlet_model_part.AddNodalSolutionStepVariable(HEATFLUX)
self.fem_model_part.AddNodalSolutionStepVariable(HEATFLUX)
#----------------------------------------------------------------------------------------------
def SetThermalIntegrationScheme(self, properties):
if properties.Has(THERMAL_INTEGRATION_SCHEME_NAME):
input_name = properties[THERMAL_INTEGRATION_SCHEME_NAME]
else:
input_name = self.thermal_integration_scheme
if input_name == "forward_euler":
class_name = "ThermalForwardEulerScheme"
else:
raise Exception('ThermalDEM', 'Time integration scheme \'' + input_name + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the time integration scheme named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetThermalIntegrationSchemeInProperties(properties, True)
#----------------------------------------------------------------------------------------------
def SetNumericalIntegrationMethod(self, properties):
if properties.Has(NUMERICAL_INTEGRATION_METHOD_NAME):
input_name = properties[NUMERICAL_INTEGRATION_METHOD_NAME]
else:
input_name = self.numerical_integration_method
if input_name == "adaptive_simpson":
class_name = "AdaptiveSimpsonQuadrature"
else:
raise Exception('ThermalDEM', 'Numerical integration method \'' + input_name + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the numerical integration method named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetNumericalIntegrationMethodInProperties(properties, True)
#----------------------------------------------------------------------------------------------
def SetConstitutiveLaw(self, properties):
# Direct conduction
if self.direct_conduction_model == "batchelor_obrien":
class_name = "DirectConductionBOB"
elif self.direct_conduction_model == "thermal_pipe":
class_name = "DirectConductionPipe"
elif self.direct_conduction_model == "collisional":
class_name = "DirectConductionCollision"
else:
raise Exception('ThermalDEM', 'Direct thermal conduction model \'' + self.direct_conduction_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the direct thermal conduction model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatExchangeMechanismInProperties(properties, True)
# Indirect conduction
if self.indirect_conduction_model == "surrounding_layer":
class_name = "IndirectConductionSurroundLayer"
elif self.indirect_conduction_model == "voronoi_a":
class_name = "IndirectConductionVoronoiA"
elif self.indirect_conduction_model == "voronoi_b":
class_name = "IndirectConductionVoronoiB"
elif self.indirect_conduction_model == "vargas_mccarthy":
class_name = "IndirectConductionVargas"
else:
raise Exception('ThermalDEM', 'Indirect thermal conduction model \'' + self.indirect_conduction_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the indirect thermal conduction model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatExchangeMechanismInProperties(properties, True)
# Convection
if self.nusselt_correlation == "sphere_hanz_marshall":
class_name = "NusseltHanzMarshall"
elif self.nusselt_correlation == "sphere_whitaker":
class_name = "NusseltWhitaker"
elif self.nusselt_correlation == "sphere_gunn":
class_name = "NusseltGunn"
elif self.nusselt_correlation == "sphere_li_mason":
class_name = "NusseltLiMason"
else:
raise Exception('ThermalDEM', 'Nusselt number correlation \'' + self.nusselt_correlation + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the nusselt number correlation named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatExchangeMechanismInProperties(properties, True)
# Radiation
if self.radiation_model == "continuum_zhou":
class_name = "RadiationContinuumZhou"
elif self.radiation_model == "continuum_krause":
class_name = "RadiationContinuumKrause"
else:
raise Exception('ThermalDEM', 'Thermal radiation model \'' + self.radiation_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the thermal radiation model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatExchangeMechanismInProperties(properties, True)
# Friction
if self.friction_model == "coulomb":
class_name = "FrictionCoulomb"
else:
raise Exception('ThermalDEM', 'Frictional heat generation model \'' + self.friction_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the frictional heat generation model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetHeatGenerationMechanismInProperties(properties, True)
# Real contact
if self.adjusted_contact_model == "zhou":
class_name = "RealContactZhou"
elif self.adjusted_contact_model == "lu":
class_name = "RealContactLu"
elif self.adjusted_contact_model == "morris":
class_name = "RealContactMorris"
else:
raise Exception('ThermalDEM', 'Real contact model \'' + self.adjusted_contact_model + '\' is not implemented.')
try:
object = eval(class_name)()
except:
raise Exception('The class corresponding to the real contact model named ' + class_name + ' has not been added to python. Please, select a different name or add the required class.')
object.SetRealContactModelInProperties(properties, True)
#----------------------------------------------------------------------------------------------
def SetThermalVariablesAndOptions(self):
# General options
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, MOTION_OPTION, self.compute_motion_option)
self.spheres_model_part.ProcessInfo.SetValue(THERMAL_FREQUENCY, self.thermal_solve_frequency)
# Models for heat transfer
self.spheres_model_part.ProcessInfo.SetValue(DIRECT_CONDUCTION_MODEL_NAME, self.direct_conduction_model)
self.spheres_model_part.ProcessInfo.SetValue(INDIRECT_CONDUCTION_MODEL_NAME, self.indirect_conduction_model)
self.spheres_model_part.ProcessInfo.SetValue(CONVECTION_MODEL_NAME, self.nusselt_correlation)
self.spheres_model_part.ProcessInfo.SetValue(RADIATION_MODEL_NAME, self.radiation_model)
self.spheres_model_part.ProcessInfo.SetValue(FRICTION_MODEL_NAME, self.friction_model)
self.spheres_model_part.ProcessInfo.SetValue(REAL_CONTACT_MODEL_NAME, self.adjusted_contact_model)
self.spheres_model_part.ProcessInfo.SetValue(VORONOI_METHOD_NAME, self.voronoi_method)
self.spheres_model_part.ProcessInfo.SetValue(POROSITY_METHOD_NAME, self.porosity_method)
# Active heat transfer mechanisms
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, DIRECT_CONDUCTION_OPTION, self.compute_direct_conduction_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, INDIRECT_CONDUCTION_OPTION, self.compute_indirect_conduction_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, CONVECTION_OPTION, self.compute_convection_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, RADIATION_OPTION, self.compute_radiation_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, FRICTION_HEAT_OPTION, self.compute_friction_heat_option)
self.SetOneOrZeroInProcessInfoAccordingToBoolValue(self.spheres_model_part, REAL_CONTACT_OPTION, self.compute_adjusted_contact_option)
# Model parameters
self.spheres_model_part.ProcessInfo.SetValue(MIN_CONDUCTION_DISTANCE, self.min_conduction_distance)
self.spheres_model_part.ProcessInfo.SetValue(MAX_CONDUCTION_DISTANCE, self.max_conduction_distance)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_LAYER_THICKNESS, self.fluid_layer_thickness)
self.spheres_model_part.ProcessInfo.SetValue(ISOTHERMAL_CORE_RADIUS, self.isothermal_core_radius)
self.spheres_model_part.ProcessInfo.SetValue(MAX_RADIATION_DISTANCE, self.max_radiation_distance)
self.spheres_model_part.ProcessInfo.SetValue(FRICTION_HEAT_CONVERSION, self.friction_heat_conversion)
self.spheres_model_part.ProcessInfo.SetValue(AVERAGE_POROSITY, self.global_porosity)
self.spheres_model_part.ProcessInfo.SetValue(ALPHA_SHAPE_PARAMETER, self.alpha_parameter)
self.spheres_model_part.ProcessInfo.SetValue(INTEGRAL_TOLERANCE, self.integral_tolerance)
# Interstitial fluid properties
self.spheres_model_part.ProcessInfo.SetValue(FLUID_DENSITY, self.fluid_density)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_VISCOSITY, self.fluid_viscosity)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_THERMAL_CONDUCTIVITY, self.fluid_thermal_conductivity)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_HEAT_CAPACITY, self.fluid_heat_capacity)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_TEMPERATURE, self.fluid_temperature)
self.spheres_model_part.ProcessInfo.SetValue(FLUID_VELOCITY, self.fluid_velocity)
#----------------------------------------------------------------------------------------------
def CreateCPlusPlusThermalStrategy(self):
translational_integration_scheme = self.DEM_parameters["TranslationalIntegrationScheme"].GetString()
if (translational_integration_scheme == 'Velocity_Verlet'):
raise Exception('ThermalDEM', '"Thermal strategy for translational integration scheme \'' + translational_integration_scheme + '\' is not implemented.')
else:
self.cplusplus_strategy = ThermalExplicitSolverStrategy(self.settings,
self.max_delta_time,
self.n_step_search,
self.safety_factor,
self.delta_option,
self.creator_destructor,
self.dem_fem_search,
self.search_strategy,
self.solver_settings)
#----------------------------------------------------------------------------------------------
def InitializeCPlusPlusUtilities(self):
self.thermal_data_utils.ExecuteInitialize(self.spheres_model_part,self.fem_model_part)
if (self.compute_voronoi or self.compute_porosity):
self.tesselation_utils.ExecuteInitialize(self.spheres_model_part, self.compute_voronoi, self.compute_porosity)
if (self.write_graph):
self.graph_utils.ExecuteInitialize(self.PostGraphParticleTempMin,
self.PostGraphParticleTempMax,
self.PostGraphParticleTempAvg,
self.PostGraphParticleTempDev,
self.PostGraphModelTempAvg,
self.PostGraphFluxContributions)
#----------------------------------------------------------------------------------------------
def IsTimeToUpdateVoronoi(self):
if (self.compute_voronoi):
step = self.spheres_model_part.ProcessInfo[TIME_STEPS]
freq = self.voronoi_tesselation_frequency
return step == 1 or (freq != 0 and step%freq == 0)
else:
return False
#----------------------------------------------------------------------------------------------
def IsTimeToUpdatePorosity(self):
if (self.compute_porosity):
step = self.spheres_model_part.ProcessInfo[TIME_STEPS]
freq = self.porosity_update_frequency
return step == 1 or (freq != 0 and step%freq == 0)
else:
return False
|
en
| 0.240275
|
# Imports # Set base class # Auxiliary functions # Strategy class ####################################### DERIVED METHODS ####################################### #---------------------------------------------------------------------------------------------- # Initialize base class # Get and validate input parameters # Set flags # Create utilities #---------------------------------------------------------------------------------------------- # Add standard variables # Add thermal variables to all model parts #---------------------------------------------------------------------------------------------- # Set standard options # Set thermal options (set ProcessInfo values) # Create cpp strategy object #---------------------------------------------------------------------------------------------- # Set standard properties # Set pointers: time integration scheme / numerical integration method / constitutive laws (heat transfer models) #---------------------------------------------------------------------------------------------- # Initialize utilities # (important to be before the initialization of elements, because temperature is set here) # Base class initializer # (initialize the strategy and the elements, so temperature must be already set at this point) #---------------------------------------------------------------------------------------------- # Perform tesselation-dependent tasks (triangulation or tetrahedralization) #---------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------- # Solve step according to motion type #---------------------------------------------------------------------------------------------- # Write output graphs #---------------------------------------------------------------------------------------------- # Close graph files ####################################### PARTICULAR METHODS ####################################### #---------------------------------------------------------------------------------------------- # Get thermal settings and assign default values (in case it was not previously done) {} # General options # Frequencies # Integration scheme and method # Models for heat transfer # Active heat transfer mechanisms # Model parameters # Interstitial fluid properties # Graph writing #---------------------------------------------------------------------------------------------- # Time integration scheme # Numerical integration method # Heat transfer models # Other methods # Model parameters values # Fluid properties values #---------------------------------------------------------------------------------------------- # Flag for computing voronoi diagram in a given frequency # Flag for computing porosity in a given frequency #---------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------- # Direct conduction # Indirect conduction # Convection # Radiation # Friction # Real contact #---------------------------------------------------------------------------------------------- # General options # Models for heat transfer # Active heat transfer mechanisms # Model parameters # Interstitial fluid properties #---------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------
| 1.927109
| 2
|
tests/test_mint.py
|
LandRegistry/mint-alpha
| 0
|
6628786
|
<gh_stars>0
import unittest
import mock
import requests
import json
from themint import server
class MintTestCase(unittest.TestCase):
def setUp(self):
server.app.config['TESTING'] = True
self.app = server.app.test_client()
def test_server(self):
self.assertEqual((self.app.get('/')).status, '200 OK')
def test_get_not_allowed(self):
self.assertEqual((self.app.get('/titles/DN1234')).status, '405 METHOD NOT ALLOWED')
def post_to_mint(self, data):
#does not 'mint' anything provided the test environment variables remain blank.
headers = {'content-type': 'application/json; charset=utf-8'}
return self.app.post('/titles/dn1234', data=json.dumps(data, encoding='utf-8'), headers=headers)
def test_error_upon_incorrect_json_schema(self):
data = {}
res = self.post_to_mint(data)
self.assertEqual(res.status, '400 BAD REQUEST')
def test_400_upon_unknown_error(self):
#validation fails because an integer passed which can't be iterated over
#Ideally response json should be checked here to ensure it contains 'Error when minting new'
data = 1
res = self.post_to_mint(data)
self.assertEqual(res.status, '400 BAD REQUEST')
def test_for_correct_response_upon_successful_post(self):
response = requests.get("https://raw.githubusercontent.com/LandRegistry/generate-test-data/master/sample_titles/title-full.json")
data = response.json()
self.assertEqual(self.post_to_mint(data).status, '201 CREATED')
@mock.patch('redis.Redis.info')
def test_health(self, mock_info):
self.assertEqual((self.app.get('/health')).status, '200 OK')
|
import unittest
import mock
import requests
import json
from themint import server
class MintTestCase(unittest.TestCase):
def setUp(self):
server.app.config['TESTING'] = True
self.app = server.app.test_client()
def test_server(self):
self.assertEqual((self.app.get('/')).status, '200 OK')
def test_get_not_allowed(self):
self.assertEqual((self.app.get('/titles/DN1234')).status, '405 METHOD NOT ALLOWED')
def post_to_mint(self, data):
#does not 'mint' anything provided the test environment variables remain blank.
headers = {'content-type': 'application/json; charset=utf-8'}
return self.app.post('/titles/dn1234', data=json.dumps(data, encoding='utf-8'), headers=headers)
def test_error_upon_incorrect_json_schema(self):
data = {}
res = self.post_to_mint(data)
self.assertEqual(res.status, '400 BAD REQUEST')
def test_400_upon_unknown_error(self):
#validation fails because an integer passed which can't be iterated over
#Ideally response json should be checked here to ensure it contains 'Error when minting new'
data = 1
res = self.post_to_mint(data)
self.assertEqual(res.status, '400 BAD REQUEST')
def test_for_correct_response_upon_successful_post(self):
response = requests.get("https://raw.githubusercontent.com/LandRegistry/generate-test-data/master/sample_titles/title-full.json")
data = response.json()
self.assertEqual(self.post_to_mint(data).status, '201 CREATED')
@mock.patch('redis.Redis.info')
def test_health(self, mock_info):
self.assertEqual((self.app.get('/health')).status, '200 OK')
|
en
| 0.850602
|
#does not 'mint' anything provided the test environment variables remain blank. #validation fails because an integer passed which can't be iterated over #Ideally response json should be checked here to ensure it contains 'Error when minting new'
| 3.041978
| 3
|
pkgcore/test/merge/test_engine.py
|
pombreda/pkgcore
| 1
|
6628787
|
# Copyright: 2007-2010 <NAME> <<EMAIL>>
# License: GPL2/BSD
import os
from snakeoil.osutils import pjoin
from snakeoil.test.mixins import tempdir_decorator
from pkgcore.fs import livefs
from pkgcore.fs.contents import contentsSet
from pkgcore.merge import engine
from pkgcore.test import TestCase
from pkgcore.test.fs.fs_util import fsFile, fsDir, fsSymlink
from pkgcore.test.merge.util import fake_engine
class fake_pkg(object):
def __init__(self, contents, label=None):
self.label = label
self.contents = contents
def __str__(self):
return "fake_pkg: %s" % self.label
class Test_MergeEngineCsets(TestCase):
simple_cset = list(fsFile(x) for x in ("/foon", "/usr/dar", "/blah"))
simple_cset.extend(fsDir(x) for x in ("/usr", "/usr/lib"))
simple_cset.append(fsSymlink("/usr/lib/blah", "../../blah"))
simple_cset.append(fsSymlink("/broken-symlink", "dar"))
simple_cset = contentsSet(simple_cset, mutable=False)
kls = engine.MergeEngine
def assertCsetEqual(self, cset1, cset2):
if not isinstance(cset1, contentsSet):
cset1 = contentsSet(cset1)
if not isinstance(cset2, contentsSet):
cset2 = contentsSet(cset2)
self.assertEqual(cset1, cset2, reflective=False)
def assertCsetNotEqual(self, cset1, cset2):
if not isinstance(cset1, contentsSet):
cset1 = contentsSet(cset1)
if not isinstance(cset2, contentsSet):
cset2 = contentsSet(cset2)
self.assertNotEqual(cset1, cset2, reflective=False)
def run_cset(self, target, engine, *args):
return getattr(self.kls, target)(engine, engine.csets, *args)
def test_generate_offset_cset(self):
engine = fake_engine(csets={"new_cset":self.simple_cset},
offset='/')
def run(engine, cset):
return self.run_cset('generate_offset_cset', engine,
lambda e, c:c[cset])
self.assertCsetEqual(self.simple_cset, run(engine, 'new_cset'))
engine.offset = '/foon/'
run(engine, 'new_cset')
self.assertCsetEqual(self.simple_cset.insert_offset(engine.offset),
run(engine, 'new_cset'))
def test_get_pkg_contents(self):
new_cset = self.kls.get_pkg_contents(None, None, fake_pkg(self.simple_cset))
self.assertCsetEqual(self.simple_cset, new_cset)
# must differ; shouldn't be modifying the original cset
self.assertNotIdentical(self.simple_cset, new_cset)
def test_get_remove_cset(self):
files = contentsSet(self.simple_cset.iterfiles(invert=True))
engine = fake_engine(csets={'install':files,
'old_cset':self.simple_cset})
self.assertCsetEqual(self.simple_cset.iterfiles(),
self.run_cset('get_remove_cset', engine))
def test_get_replace_cset(self):
files = contentsSet(self.simple_cset.iterfiles(invert=True))
engine = fake_engine(csets={'install':files,
'old_cset':self.simple_cset})
self.assertCsetEqual(files,
self.run_cset('get_replace_cset', engine))
@tempdir_decorator
def test_rewrite_awareness(self):
src = contentsSet(self.simple_cset)
src.add(fsFile("/usr/lib/donkey"))
trg = src.difference(["/usr/lib/donkey"])
trg.add(fsFile("/usr/lib64/donkey"))
trg = trg.insert_offset(self.dir)
os.mkdir(pjoin(self.dir, 'usr'))
os.mkdir(pjoin(self.dir, 'usr', 'lib64'))
os.symlink('lib64', pjoin(self.dir, 'usr', 'lib'))
pkg = fake_pkg(src)
engine = self.kls.install(self.dir, pkg, offset=self.dir)
result = engine.csets['resolved_install']
self.assertEqual(sorted(result.iterfiles()), sorted(trg.iterfiles()))
@tempdir_decorator
def test_symlink_awareness(self):
src = contentsSet(self.simple_cset)
src.add(fsFile("/usr/lib/blah/donkey"))
trg = src.difference(["/usr/lib/blah/donkey"])
trg.add(fsFile("/blah/donkey"))
trg = trg.insert_offset(self.dir)
pkg = fake_pkg(src)
engine = self.kls.install(self.dir, pkg, offset=self.dir)
result = engine.csets['new_cset']
self.assertEqual(sorted(result.iterfiles()), sorted(trg.iterfiles()))
test_symlink_awareness.skip = "contentset should handle this"
@tempdir_decorator
def test__get_livefs_intersect_cset(self):
old_cset = self.simple_cset.insert_offset(self.dir)
# have to add it; scan adds the root node
old_cset.add(fsDir(self.dir))
os.mkdir(pjoin(self.dir, "usr"))
open(pjoin(self.dir, "usr", "dar"), 'w').close()
open(pjoin(self.dir, 'foon'), 'w').close()
# note that this *is* a sym in the cset; adding this specific
# check so that if the code differs, the test breaks, and the tests
# get updated (additionally, folks may not be aware of the potential)
open(pjoin(self.dir, 'broken-symlink'), 'w').close()
engine = fake_engine(csets={'test':old_cset})
existent = livefs.scan(self.dir)
generated = self.run_cset('_get_livefs_intersect_cset', engine,
'test')
self.assertEqual(generated, existent)
|
# Copyright: 2007-2010 <NAME> <<EMAIL>>
# License: GPL2/BSD
import os
from snakeoil.osutils import pjoin
from snakeoil.test.mixins import tempdir_decorator
from pkgcore.fs import livefs
from pkgcore.fs.contents import contentsSet
from pkgcore.merge import engine
from pkgcore.test import TestCase
from pkgcore.test.fs.fs_util import fsFile, fsDir, fsSymlink
from pkgcore.test.merge.util import fake_engine
class fake_pkg(object):
def __init__(self, contents, label=None):
self.label = label
self.contents = contents
def __str__(self):
return "fake_pkg: %s" % self.label
class Test_MergeEngineCsets(TestCase):
simple_cset = list(fsFile(x) for x in ("/foon", "/usr/dar", "/blah"))
simple_cset.extend(fsDir(x) for x in ("/usr", "/usr/lib"))
simple_cset.append(fsSymlink("/usr/lib/blah", "../../blah"))
simple_cset.append(fsSymlink("/broken-symlink", "dar"))
simple_cset = contentsSet(simple_cset, mutable=False)
kls = engine.MergeEngine
def assertCsetEqual(self, cset1, cset2):
if not isinstance(cset1, contentsSet):
cset1 = contentsSet(cset1)
if not isinstance(cset2, contentsSet):
cset2 = contentsSet(cset2)
self.assertEqual(cset1, cset2, reflective=False)
def assertCsetNotEqual(self, cset1, cset2):
if not isinstance(cset1, contentsSet):
cset1 = contentsSet(cset1)
if not isinstance(cset2, contentsSet):
cset2 = contentsSet(cset2)
self.assertNotEqual(cset1, cset2, reflective=False)
def run_cset(self, target, engine, *args):
return getattr(self.kls, target)(engine, engine.csets, *args)
def test_generate_offset_cset(self):
engine = fake_engine(csets={"new_cset":self.simple_cset},
offset='/')
def run(engine, cset):
return self.run_cset('generate_offset_cset', engine,
lambda e, c:c[cset])
self.assertCsetEqual(self.simple_cset, run(engine, 'new_cset'))
engine.offset = '/foon/'
run(engine, 'new_cset')
self.assertCsetEqual(self.simple_cset.insert_offset(engine.offset),
run(engine, 'new_cset'))
def test_get_pkg_contents(self):
new_cset = self.kls.get_pkg_contents(None, None, fake_pkg(self.simple_cset))
self.assertCsetEqual(self.simple_cset, new_cset)
# must differ; shouldn't be modifying the original cset
self.assertNotIdentical(self.simple_cset, new_cset)
def test_get_remove_cset(self):
files = contentsSet(self.simple_cset.iterfiles(invert=True))
engine = fake_engine(csets={'install':files,
'old_cset':self.simple_cset})
self.assertCsetEqual(self.simple_cset.iterfiles(),
self.run_cset('get_remove_cset', engine))
def test_get_replace_cset(self):
files = contentsSet(self.simple_cset.iterfiles(invert=True))
engine = fake_engine(csets={'install':files,
'old_cset':self.simple_cset})
self.assertCsetEqual(files,
self.run_cset('get_replace_cset', engine))
@tempdir_decorator
def test_rewrite_awareness(self):
src = contentsSet(self.simple_cset)
src.add(fsFile("/usr/lib/donkey"))
trg = src.difference(["/usr/lib/donkey"])
trg.add(fsFile("/usr/lib64/donkey"))
trg = trg.insert_offset(self.dir)
os.mkdir(pjoin(self.dir, 'usr'))
os.mkdir(pjoin(self.dir, 'usr', 'lib64'))
os.symlink('lib64', pjoin(self.dir, 'usr', 'lib'))
pkg = fake_pkg(src)
engine = self.kls.install(self.dir, pkg, offset=self.dir)
result = engine.csets['resolved_install']
self.assertEqual(sorted(result.iterfiles()), sorted(trg.iterfiles()))
@tempdir_decorator
def test_symlink_awareness(self):
src = contentsSet(self.simple_cset)
src.add(fsFile("/usr/lib/blah/donkey"))
trg = src.difference(["/usr/lib/blah/donkey"])
trg.add(fsFile("/blah/donkey"))
trg = trg.insert_offset(self.dir)
pkg = fake_pkg(src)
engine = self.kls.install(self.dir, pkg, offset=self.dir)
result = engine.csets['new_cset']
self.assertEqual(sorted(result.iterfiles()), sorted(trg.iterfiles()))
test_symlink_awareness.skip = "contentset should handle this"
@tempdir_decorator
def test__get_livefs_intersect_cset(self):
old_cset = self.simple_cset.insert_offset(self.dir)
# have to add it; scan adds the root node
old_cset.add(fsDir(self.dir))
os.mkdir(pjoin(self.dir, "usr"))
open(pjoin(self.dir, "usr", "dar"), 'w').close()
open(pjoin(self.dir, 'foon'), 'w').close()
# note that this *is* a sym in the cset; adding this specific
# check so that if the code differs, the test breaks, and the tests
# get updated (additionally, folks may not be aware of the potential)
open(pjoin(self.dir, 'broken-symlink'), 'w').close()
engine = fake_engine(csets={'test':old_cset})
existent = livefs.scan(self.dir)
generated = self.run_cset('_get_livefs_intersect_cset', engine,
'test')
self.assertEqual(generated, existent)
|
en
| 0.777942
|
# Copyright: 2007-2010 <NAME> <<EMAIL>> # License: GPL2/BSD # must differ; shouldn't be modifying the original cset # have to add it; scan adds the root node # note that this *is* a sym in the cset; adding this specific # check so that if the code differs, the test breaks, and the tests # get updated (additionally, folks may not be aware of the potential)
| 2.100241
| 2
|
tools/downloader/quantizer.py
|
Aya-ZIbra/open_model_zoo
| 4
|
6628788
|
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import re
import string
import subprocess
import sys
import tempfile
from pathlib import Path
import common
OMZ_ROOT = Path(__file__).resolve().parents[2]
def quantize(model, precision, args, output_dir, pot_path, pot_env):
input_precision = common.KNOWN_QUANTIZED_PRECISIONS[precision]
pot_config = {
'compression': {
'algorithms': [
{
'name': 'DefaultQuantization',
'params': {
'preset': 'performance',
'stat_subset_size': 300,
},
},
],
},
'engine': {
'config': str(OMZ_ROOT / 'tools/accuracy_checker/configs' / (model.name + '.yml')),
},
'model': {
'model': str(args.model_dir / model.subdirectory / input_precision / (model.name + '.xml')),
'weights': str(args.model_dir / model.subdirectory / input_precision / (model.name + '.bin')),
'model_name': model.name,
}
}
if args.target_device:
pot_config['compression']['target_device'] = args.target_device
print('========= {}Quantizing {} from {} to {}'.format(
'(DRY RUN) ' if args.dry_run else '', model.name, input_precision, precision))
model_output_dir = output_dir / model.subdirectory / precision
pot_config_path = model_output_dir / 'pot-config.json'
print('Creating {}...'.format(pot_config_path))
pot_config_path.parent.mkdir(parents=True, exist_ok=True)
with pot_config_path.open('w') as pot_config_file:
json.dump(pot_config, pot_config_file, indent=4)
pot_config_file.write('\n')
pot_output_dir = model_output_dir / 'pot-output'
pot_output_dir.mkdir(parents=True, exist_ok=True)
pot_cmd = [str(args.python), '--', str(pot_path),
'--config={}'.format(pot_config_path),
'--direct-dump',
'--output-dir={}'.format(pot_output_dir),
]
print('Quantization command: {}'.format(common.command_string(pot_cmd)))
print('Quantization environment: {}'.format(
' '.join('{}={}'.format(k, common.quote_arg(v))
for k, v in sorted(pot_env.items()))))
success = True
if not args.dry_run:
print('', flush=True)
success = subprocess.run(pot_cmd, env={**os.environ, **pot_env}).returncode == 0
print('')
if not success: return False
if not args.dry_run:
print('Moving quantized model to {}...'.format(model_output_dir))
for ext in ['.xml', '.bin']:
(pot_output_dir / 'optimized' / (model.name + ext)).replace(
model_output_dir / (model.name + ext))
print('')
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=Path, metavar='DIR',
default=Path.cwd(), help='root of the directory tree with the full precision model files')
parser.add_argument('--dataset_dir', type=Path, help='root of the dataset directory tree')
parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
help='root of the directory tree to place quantized model files into')
parser.add_argument('--name', metavar='PAT[,PAT...]',
help='quantize only models whose names match at least one of the specified patterns')
parser.add_argument('--list', type=Path, metavar='FILE.LST',
help='quantize only models whose names match at least one of the patterns in the specified file')
parser.add_argument('--all', action='store_true', help='quantize all available models')
parser.add_argument('--print_all', action='store_true', help='print all available models')
parser.add_argument('-p', '--python', type=Path, metavar='PYTHON', default=sys.executable,
help='Python executable to run Post-Training Optimization Toolkit with')
parser.add_argument('--pot', type=Path, help='Post-Training Optimization Toolkit entry point script')
parser.add_argument('--dry_run', action='store_true',
help='print the quantization commands without running them')
parser.add_argument('--precisions', metavar='PREC[,PREC...]',
help='quantize only to the specified precisions')
parser.add_argument('--target_device', help='target device for the quantized model')
args = parser.parse_args()
pot_path = args.pot
if pot_path is None:
try:
pot_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/tools/post_training_optimization_toolkit/main.py'
except KeyError:
sys.exit('Unable to locate Post-Training Optimization Toolkit. '
+ 'Use --pot or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')
models = common.load_models_from_args(parser, args)
# We can't mark it as required, because it's not required when --print_all is specified.
# So we have to check it manually.
if not args.dataset_dir:
sys.exit('--dataset_dir must be specified.')
if args.precisions is None:
requested_precisions = common.KNOWN_QUANTIZED_PRECISIONS.keys()
else:
requested_precisions = set(args.precisions.split(','))
unknown_precisions = requested_precisions - common.KNOWN_QUANTIZED_PRECISIONS.keys()
if unknown_precisions:
sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))
output_dir = args.output_dir or args.model_dir
failed_models = []
with tempfile.TemporaryDirectory() as temp_dir:
annotation_dir = Path(temp_dir) / 'annotations'
annotation_dir.mkdir()
pot_env = {
'ANNOTATIONS_DIR': str(annotation_dir),
'DATA_DIR': str(args.dataset_dir),
}
for model in models:
if not model.quantizable:
print('========= Skipping {} (quantization not supported)'.format(model.name))
print('')
continue
for precision in sorted(requested_precisions):
if not quantize(model, precision, args, output_dir, pot_path, pot_env):
failed_models.append(model.name)
break
if failed_models:
print('FAILED:')
print(*sorted(failed_models), sep='\n')
sys.exit(1)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import re
import string
import subprocess
import sys
import tempfile
from pathlib import Path
import common
OMZ_ROOT = Path(__file__).resolve().parents[2]
def quantize(model, precision, args, output_dir, pot_path, pot_env):
input_precision = common.KNOWN_QUANTIZED_PRECISIONS[precision]
pot_config = {
'compression': {
'algorithms': [
{
'name': 'DefaultQuantization',
'params': {
'preset': 'performance',
'stat_subset_size': 300,
},
},
],
},
'engine': {
'config': str(OMZ_ROOT / 'tools/accuracy_checker/configs' / (model.name + '.yml')),
},
'model': {
'model': str(args.model_dir / model.subdirectory / input_precision / (model.name + '.xml')),
'weights': str(args.model_dir / model.subdirectory / input_precision / (model.name + '.bin')),
'model_name': model.name,
}
}
if args.target_device:
pot_config['compression']['target_device'] = args.target_device
print('========= {}Quantizing {} from {} to {}'.format(
'(DRY RUN) ' if args.dry_run else '', model.name, input_precision, precision))
model_output_dir = output_dir / model.subdirectory / precision
pot_config_path = model_output_dir / 'pot-config.json'
print('Creating {}...'.format(pot_config_path))
pot_config_path.parent.mkdir(parents=True, exist_ok=True)
with pot_config_path.open('w') as pot_config_file:
json.dump(pot_config, pot_config_file, indent=4)
pot_config_file.write('\n')
pot_output_dir = model_output_dir / 'pot-output'
pot_output_dir.mkdir(parents=True, exist_ok=True)
pot_cmd = [str(args.python), '--', str(pot_path),
'--config={}'.format(pot_config_path),
'--direct-dump',
'--output-dir={}'.format(pot_output_dir),
]
print('Quantization command: {}'.format(common.command_string(pot_cmd)))
print('Quantization environment: {}'.format(
' '.join('{}={}'.format(k, common.quote_arg(v))
for k, v in sorted(pot_env.items()))))
success = True
if not args.dry_run:
print('', flush=True)
success = subprocess.run(pot_cmd, env={**os.environ, **pot_env}).returncode == 0
print('')
if not success: return False
if not args.dry_run:
print('Moving quantized model to {}...'.format(model_output_dir))
for ext in ['.xml', '.bin']:
(pot_output_dir / 'optimized' / (model.name + ext)).replace(
model_output_dir / (model.name + ext))
print('')
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=Path, metavar='DIR',
default=Path.cwd(), help='root of the directory tree with the full precision model files')
parser.add_argument('--dataset_dir', type=Path, help='root of the dataset directory tree')
parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
help='root of the directory tree to place quantized model files into')
parser.add_argument('--name', metavar='PAT[,PAT...]',
help='quantize only models whose names match at least one of the specified patterns')
parser.add_argument('--list', type=Path, metavar='FILE.LST',
help='quantize only models whose names match at least one of the patterns in the specified file')
parser.add_argument('--all', action='store_true', help='quantize all available models')
parser.add_argument('--print_all', action='store_true', help='print all available models')
parser.add_argument('-p', '--python', type=Path, metavar='PYTHON', default=sys.executable,
help='Python executable to run Post-Training Optimization Toolkit with')
parser.add_argument('--pot', type=Path, help='Post-Training Optimization Toolkit entry point script')
parser.add_argument('--dry_run', action='store_true',
help='print the quantization commands without running them')
parser.add_argument('--precisions', metavar='PREC[,PREC...]',
help='quantize only to the specified precisions')
parser.add_argument('--target_device', help='target device for the quantized model')
args = parser.parse_args()
pot_path = args.pot
if pot_path is None:
try:
pot_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/tools/post_training_optimization_toolkit/main.py'
except KeyError:
sys.exit('Unable to locate Post-Training Optimization Toolkit. '
+ 'Use --pot or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')
models = common.load_models_from_args(parser, args)
# We can't mark it as required, because it's not required when --print_all is specified.
# So we have to check it manually.
if not args.dataset_dir:
sys.exit('--dataset_dir must be specified.')
if args.precisions is None:
requested_precisions = common.KNOWN_QUANTIZED_PRECISIONS.keys()
else:
requested_precisions = set(args.precisions.split(','))
unknown_precisions = requested_precisions - common.KNOWN_QUANTIZED_PRECISIONS.keys()
if unknown_precisions:
sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))
output_dir = args.output_dir or args.model_dir
failed_models = []
with tempfile.TemporaryDirectory() as temp_dir:
annotation_dir = Path(temp_dir) / 'annotations'
annotation_dir.mkdir()
pot_env = {
'ANNOTATIONS_DIR': str(annotation_dir),
'DATA_DIR': str(args.dataset_dir),
}
for model in models:
if not model.quantizable:
print('========= Skipping {} (quantization not supported)'.format(model.name))
print('')
continue
for precision in sorted(requested_precisions):
if not quantize(model, precision, args, output_dir, pot_path, pot_env):
failed_models.append(model.name)
break
if failed_models:
print('FAILED:')
print(*sorted(failed_models), sep='\n')
sys.exit(1)
if __name__ == '__main__':
main()
|
en
| 0.88301
|
#!/usr/bin/env python3 # Copyright (c) 2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # We can't mark it as required, because it's not required when --print_all is specified. # So we have to check it manually.
| 2.130897
| 2
|
projects/migrations/0036_project_ownername.py
|
peppasd/LIT
| 2
|
6628789
|
<filename>projects/migrations/0036_project_ownername.py
# Generated by Django 3.0.8 on 2020-07-23 16:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0035_auto_20200723_1652'),
]
operations = [
migrations.AddField(
model_name='project',
name='ownerName',
field=models.CharField(default='', max_length=160),
),
]
|
<filename>projects/migrations/0036_project_ownername.py
# Generated by Django 3.0.8 on 2020-07-23 16:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0035_auto_20200723_1652'),
]
operations = [
migrations.AddField(
model_name='project',
name='ownerName',
field=models.CharField(default='', max_length=160),
),
]
|
en
| 0.785731
|
# Generated by Django 3.0.8 on 2020-07-23 16:32
| 1.455463
| 1
|
evennia/settings_default.py
|
davidrideout/evennia
| 0
|
6628790
|
"""
Master configuration file for Evennia.
NOTE: NO MODIFICATIONS SHOULD BE MADE TO THIS FILE!
All settings changes should be done by copy-pasting the variable and
its value to <gamedir>/server/conf/settings.py.
Hint: Don't copy&paste over more from this file than you actually want
to change. Anything you don't copy&paste will thus retain its default
value - which may change as Evennia is developed. This way you can
always be sure of what you have changed and what is default behaviour.
"""
from django.contrib.messages import constants as messages
from django.urls import reverse_lazy
import os
import sys
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = "Evennia"
# Short one-sentence blurb describing your game. Shown under the title
# on the website and could be used in online listings of your game etc.
GAME_SLOGAN = "The Python MUD/MU* creation system"
# The url address to your server, like mymudgame.com. This should be the publicly
# visible location. This is used e.g. on the web site to show how you connect to the
# game over telnet. Default is localhost (only on your machine).
SERVER_HOSTNAME = "localhost"
# Lockdown mode will cut off the game from any external connections
# and only allow connections from localhost. Requires a cold reboot.
LOCKDOWN_MODE = False
# Activate telnet service
TELNET_ENABLED = True
# A list of ports the Evennia telnet server listens on Can be one or many.
TELNET_PORTS = [4000]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
TELNET_INTERFACES = ["0.0.0.0"]
# Activate Telnet+SSL protocol (SecureSocketLibrary) for supporting clients
SSL_ENABLED = False
# Ports to use for Telnet+SSL
SSL_PORTS = [4003]
# Telnet+SSL Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSL_INTERFACES = ["0.0.0.0"]
# OOB (out-of-band) telnet communication allows Evennia to communicate
# special commands and data with enabled Telnet clients. This is used
# to create custom client interfaces over a telnet connection. To make
# full use of OOB, you need to prepare functions to handle the data
# server-side (see INPUT_FUNC_MODULES). TELNET_ENABLED is required for this
# to work.
TELNET_OOB_ENABLED = False
# Activate SSH protocol communication (SecureShell)
SSH_ENABLED = False
# Ports to use for SSH
SSH_PORTS = [4004]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSH_INTERFACES = ["0.0.0.0"]
# Start the evennia django+twisted webserver so you can
# browse the evennia website and the admin interface
# (Obs - further web configuration can be found below
# in the section 'Config for Django web features')
WEBSERVER_ENABLED = True
# This is a security setting protecting against host poisoning
# attacks. It defaults to allowing all. In production, make
# sure to change this to your actual host addresses/IPs.
ALLOWED_HOSTS = ["*"]
# The webserver sits behind a Portal proxy. This is a list
# of tuples (proxyport,serverport) used. The proxyports are what
# the Portal proxy presents to the world. The serverports are
# the internal ports the proxy uses to forward data to the Server-side
# webserver (these should not be publicly open)
WEBSERVER_PORTS = [(4001, 4005)]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSERVER_INTERFACES = ["0.0.0.0"]
# IP addresses that may talk to the server in a reverse proxy configuration,
# like NginX.
UPSTREAM_IPS = ["127.0.0.1"]
# The webserver uses threadpool for handling requests. This will scale
# with server load. Set the minimum and maximum number of threads it
# may use as (min, max) (must be > 0)
WEBSERVER_THREADPOOL_LIMITS = (1, 20)
# Start the evennia webclient. This requires the webserver to be running and
# offers the fallback ajax-based webclient backbone for browsers not supporting
# the websocket one.
WEBCLIENT_ENABLED = True
# Activate Websocket support for modern browsers. If this is on, the
# default webclient will use this and only use the ajax version if the browser
# is too old to support websockets. Requires WEBCLIENT_ENABLED.
WEBSOCKET_CLIENT_ENABLED = True
# Server-side websocket port to open for the webclient. Note that this value will
# be dynamically encoded in the webclient html page to allow the webclient to call
# home. If the external encoded value needs to be different than this, due to
# working through a proxy or docker port-remapping, the environment variable
# WEBCLIENT_CLIENT_PROXY_PORT can be used to override this port only for the
# front-facing client's sake.
WEBSOCKET_CLIENT_PORT = 4002
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_CLIENT_INTERFACE = "0.0.0.0"
# Actual URL for webclient component to reach the websocket. You only need
# to set this if you know you need it, like using some sort of proxy setup.
# If given it must be on the form "ws[s]://hostname[:port]". If left at None,
# the client will itself figure out this url based on the server's hostname.
# e.g. ws://external.example.com or wss://external.example.com:443
WEBSOCKET_CLIENT_URL = None
# This determine's whether Evennia's custom admin page is used, or if the
# standard Django admin is used.
EVENNIA_ADMIN = True
# The Server opens an AMP port so that the portal can
# communicate with it. This is an internal functionality of Evennia, usually
# operating between two processes on the same machine. You usually don't need to
# change this unless you cannot use the default AMP port/host for
# whatever reason.
AMP_HOST = "localhost"
AMP_PORT = 4006
AMP_INTERFACE = "127.0.0.1"
# Path to the lib directory containing the bulk of the codebase's code.
EVENNIA_DIR = os.path.dirname(os.path.abspath(__file__))
# Path to the game directory (containing the server/conf/settings.py file)
# This is dynamically created- there is generally no need to change this!
if EVENNIA_DIR.lower() == os.getcwd().lower() or (
sys.argv[1] == "test" if len(sys.argv) > 1 else False
):
# unittesting mode
GAME_DIR = os.getcwd()
else:
# Fallback location (will be replaced by the actual game dir at runtime)
GAME_DIR = os.path.join(EVENNIA_DIR, "game_template")
for i in range(10):
gpath = os.getcwd()
if "server" in os.listdir(gpath):
if os.path.isfile(os.path.join("server", "conf", "settings.py")):
GAME_DIR = gpath
break
os.chdir(os.pardir)
# Place to put log files, how often to rotate the log and how big each log file
# may become before rotating.
LOG_DIR = os.path.join(GAME_DIR, "server", "logs")
SERVER_LOG_FILE = os.path.join(LOG_DIR, "server.log")
SERVER_LOG_DAY_ROTATION = 7
SERVER_LOG_MAX_SIZE = 1000000
PORTAL_LOG_FILE = os.path.join(LOG_DIR, "portal.log")
PORTAL_LOG_DAY_ROTATION = 7
PORTAL_LOG_MAX_SIZE = 1000000
# The http log is usually only for debugging since it's very spammy
HTTP_LOG_FILE = os.path.join(LOG_DIR, "http_requests.log")
# if this is set to the empty string, lockwarnings will be turned off.
LOCKWARNING_LOG_FILE = os.path.join(LOG_DIR, "lockwarnings.log")
# Number of lines to append to rotating channel logs when they rotate
CHANNEL_LOG_NUM_TAIL_LINES = 20
# Max size (in bytes) of channel log files before they rotate
CHANNEL_LOG_ROTATE_SIZE = 1000000
# Unused by default, but used by e.g. the MapSystem contrib. A place for storing
# semi-permanent data and avoid it being rebuilt over and over. It is created
# on-demand only.
CACHE_DIR = os.path.join(GAME_DIR, "server", ".cache")
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.0/interactive/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = "UTC"
# Activate time zone in datetimes
USE_TZ = True
# Authentication backends. This is the code used to authenticate a user.
AUTHENTICATION_BACKENDS = ["evennia.web.utils.backends.CaseInsensitiveModelBackend"]
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
LANGUAGE_CODE = "en-us"
# How long time (in seconds) a user may idle before being logged
# out. This can be set as big as desired. A user may avoid being
# thrown off by sending the empty system command 'idle' to the server
# at regular intervals. Set <=0 to deactivate idle timeout completely.
IDLE_TIMEOUT = -1
# The idle command can be sent to keep your session active without actually
# having to spam normal commands regularly. It gives no feedback, only updates
# the idle timer. Note that "idle" will *always* work, even if a different
# command-name is given here; this is because the webclient needs a default
# to send to avoid proxy timeouts.
IDLE_COMMAND = "idle"
# The set of encodings tried. An Account object may set an attribute "encoding" on
# itself to match the client used. If not set, or wrong encoding is
# given, this list is tried, in order, aborting on the first match.
# Add sets for languages/regions your accounts are likely to use.
# (see http://en.wikipedia.org/wiki/Character_encoding)
# Telnet default encoding, unless specified by the client, will be ENCODINGS[0].
ENCODINGS = ["utf-8", "latin-1", "ISO-8859-1"]
# Regular expression applied to all output to a given session in order
# to strip away characters (usually various forms of decorations) for the benefit
# of users with screen readers. Note that ANSI/MXP doesn't need to
# be stripped this way, that is handled automatically.
SCREENREADER_REGEX_STRIP = r"\+-+|\+$|\+~|--+|~~+|==+"
# MXP support means the ability to show clickable links in the client. Clicking
# the link will execute a game command. It's a way to add mouse input to the game.
MXP_ENABLED = True
# If this is set, MXP can only be sent by the server and not added from the
# client side. Disabling this is a potential security risk because it could
# allow malevolent players to lure others to execute commands they did not
# intend to.
MXP_OUTGOING_ONLY = True
# Database objects are cached in what is known as the idmapper. The idmapper
# caching results in a massive speedup of the server (since it dramatically
# limits the number of database accesses needed) and also allows for
# storing temporary data on objects. It is however also the main memory
# consumer of Evennia. With this setting the cache can be capped and
# flushed when it reaches a certain size. Minimum is 50 MB but it is
# not recommended to set this to less than 100 MB for a distribution
# system.
# Empirically, N_objects_in_cache ~ ((RMEM - 35) / 0.0157):
# mem(MB) | objs in cache || mem(MB) | objs in cache
# 50 | ~1000 || 800 | ~49 000
# 100 | ~4000 || 1200 | ~75 000
# 200 | ~10 000 || 1600 | ~100 000
# 500 | ~30 000 || 2000 | ~125 000
# Note that the estimated memory usage is not exact (and the cap is only
# checked every 5 minutes), so err on the side of caution if
# running on a server with limited memory. Also note that Python
# will not necessarily return the memory to the OS when the idmapper
# flashes (the memory will be freed and made available to the Python
# process only). How many objects need to be in memory at any given
# time depends very much on your game so some experimentation may
# be necessary (use @server to see how many objects are in the idmapper
# cache at any time). Setting this to None disables the cache cap.
IDMAPPER_CACHE_MAXSIZE = 200 # (MB)
# This determines how many connections per second the Portal should
# accept, as a DoS countermeasure. If the rate exceeds this number, incoming
# connections will be queued to this rate, so none will be lost.
# Must be set to a value > 0.
MAX_CONNECTION_RATE = 2
# Determine how many commands per second a given Session is allowed
# to send to the Portal via a connected protocol. Too high rate will
# drop the command and echo a warning. Note that this will also cap
# OOB messages so don't set it too low if you expect a lot of events
# from the client! To turn the limiter off, set to <= 0.
MAX_COMMAND_RATE = 80
# The warning to echo back to users if they send commands too fast
COMMAND_RATE_WARNING = "You entered commands too fast. Wait a moment and try again."
# custom, extra commands to add to the `evennia` launcher. This is a dict
# of {'cmdname': 'path.to.callable', ...}, where the callable will be passed
# any extra args given on the command line. For example `evennia cmdname foo bar`.
EXTRA_LAUNCHER_COMMANDS = {}
# Determine how large of a string can be sent to the server in number
# of characters. If they attempt to enter a string over this character
# limit, we stop them and send a message. To make unlimited, set to
# 0 or less.
MAX_CHAR_LIMIT = 6000
# The warning to echo back to users if they enter a very large string
MAX_CHAR_LIMIT_WARNING = (
"You entered a string that was too long. " "Please break it up into multiple parts."
)
# If this is true, errors and tracebacks from the engine will be
# echoed as text in-game as well as to the log. This can speed up
# debugging. OBS: Showing full tracebacks to regular users could be a
# security problem -turn this off in a production game!
IN_GAME_ERRORS = True
# Broadcast "Server restart"-like messages to all sessions.
BROADCAST_SERVER_RESTART_MESSAGES = True
######################################################################
# Evennia Database config
######################################################################
# Database config syntax:
# ENGINE - path to the the database backend. Possible choices are:
# 'django.db.backends.sqlite3', (default)
# 'django.db.backends.mysql',
# 'django.db.backends.postgresql',
# 'django.db.backends.oracle' (untested).
# NAME - database name, or path to the db file for sqlite3
# USER - db admin (unused in sqlite3)
# PASSWORD - <PASSWORD> (unused in sqlite3)
# HOST - empty string is localhost (unused in sqlite3)
# PORT - empty string defaults to localhost (unused in sqlite3)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.getenv("TEST_DB_PATH", os.path.join(GAME_DIR, "server", "evennia.db3")),
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
}
}
# How long the django-database connection should be kept open, in seconds.
# If you get errors about the database having gone away after long idle
# periods, shorten this value (e.g. MySQL defaults to a timeout of 8 hrs)
CONN_MAX_AGE = 3600 * 7
# When removing or renaming models, such models stored in Attributes may
# become orphaned and will return as None. If the change is a rename (that
# is, there is a 1:1 pk mapping between the old and the new), the unserializer
# can convert old to new when retrieving them. This is a list of tuples
# (old_natural_key, new_natural_key). Note that Django ContentTypes'
# natural_keys are themselves tuples (appname, modelname). Creation-dates will
# not be checked for models specified here. If new_natural_key does not exist,
# `None` will be returned and stored back as if no replacement was set.
ATTRIBUTE_STORED_MODEL_RENAME = [
(("players", "playerdb"), ("accounts", "accountdb")),
(("typeclasses", "defaultplayer"), ("typeclasses", "defaultaccount")),
]
# Default type of autofield (required by Django)
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
######################################################################
# Evennia webclient options
######################################################################
# default webclient options (without user changing it)
WEBCLIENT_OPTIONS = {
# Gags prompts in output window and puts them on the input bar
"gagprompt": True,
# Shows help files in a new popup window instead of in-pane
"helppopup": False,
# Shows notifications of new messages as popup windows
"notification_popup": False,
# Plays a sound for notifications of new messages
"notification_sound": False
}
######################################################################
# Evennia pluggable modules
######################################################################
# Plugin modules extend Evennia in various ways. In the cases with no
# existing default, there are examples of many of these modules
# in contrib/examples.
# The command parser module to use. See the default module for which
# functions it must implement
COMMAND_PARSER = "evennia.commands.cmdparser.cmdparser"
# On a multi-match when search objects or commands, the user has the
# ability to search again with an index marker that differentiates
# the results. If multiple "box" objects
# are found, they can by default be separated as 1-box, 2-box. Below you
# can change the regular expression used. The regex must have one
# have two capturing groups (?P<number>...) and (?P<name>...) - the default
# parser expects this. It should also involve a number starting from 1.
# When changing this you must also update SEARCH_MULTIMATCH_TEMPLATE
# to properly describe the syntax.
SEARCH_MULTIMATCH_REGEX = r"(?P<name>[^-]*)-(?P<number>[0-9]+)(?P<args>.*)"
# To display multimatch errors in various listings we must display
# the syntax in a way that matches what SEARCH_MULTIMATCH_REGEX understand.
# The template will be populated with data and expects the following markup:
# {number} - the order of the multimatch, starting from 1; {name} - the
# name (key) of the multimatched entity; {aliases} - eventual
# aliases for the entity; {info} - extra info like #dbrefs for staff. Don't
# forget a line break if you want one match per line.
SEARCH_MULTIMATCH_TEMPLATE = " {name}-{number}{aliases}{info}\n"
# The handler that outputs errors when using any API-level search
# (not manager methods). This function should correctly report errors
# both for command- and object-searches. This allows full control
# over the error output (it uses SEARCH_MULTIMATCH_TEMPLATE by default).
SEARCH_AT_RESULT = "evennia.utils.utils.at_search_result"
# Single characters to ignore at the beginning of a command. When set, e.g.
# cmd, @cmd and +cmd will all find a command "cmd" or one named "@cmd" etc. If
# you have defined two different commands cmd and @cmd you can still enter
# @cmd to exactly target the second one. Single-character commands consisting
# of only a prefix character will not be stripped. Set to the empty
# string ("") to turn off prefix ignore.
CMD_IGNORE_PREFIXES = "@&/+"
# The module holding text strings for the connection screen.
# This module should contain one or more variables
# with strings defining the look of the screen.
CONNECTION_SCREEN_MODULE = "server.conf.connection_screens"
# Delay to use before sending the evennia.syscmdkeys.CMD_LOGINSTART Command
# when a new session connects (this defaults the unloggedin-look for showing
# the connection screen). The delay is useful mainly for telnet, to allow
# client/server to establish client capabilities like color/mxp etc before
# sending any text. A value of 0.3 should be enough. While a good idea, it may
# cause issues with menu-logins and autoconnects since the menu will not have
# started when the autoconnects starts sending menu commands.
DELAY_CMD_LOGINSTART = 0.3
# A module that must exist - this holds the instructions Evennia will use to
# first prepare the database for use (create user #1 and Limbo etc). Only override if
# you really know what # you are doing. If replacing, it must contain a function
# handle_setup(stepname=None). The function will start being called with no argument
# and is expected to maintain a named sequence of steps. Once each step is completed, it
# should be saved with ServerConfig.objects.conf('last_initial_setup_step', stepname)
# on a crash, the system will continue by calling handle_setup with the last completed
# step. The last step in the sequence must be named 'done'. Once this key is saved,
# initialization will not run again.
INITIAL_SETUP_MODULE = "evennia.server.initial_setup"
# An optional module that, if existing, must hold a function
# named at_initial_setup(). This hook method can be used to customize
# the server's initial setup sequence (the very first startup of the system).
# The check will fail quietly if module doesn't exist or fails to load.
AT_INITIAL_SETUP_HOOK_MODULE = "server.conf.at_initial_setup"
# Module containing your custom at_server_start(), at_server_reload() and
# at_server_stop() methods. These methods will be called every time
# the server starts, reloads and resets/stops respectively.
AT_SERVER_STARTSTOP_MODULE = "server.conf.at_server_startstop"
# List of one or more module paths to modules containing a function start_
# plugin_services(application). This module will be called with the main
# Evennia Server application when the Server is initiated.
# It will be called last in the startup sequence.
SERVER_SERVICES_PLUGIN_MODULES = ["server.conf.server_services_plugins"]
# List of one or more module paths to modules containing a function
# start_plugin_services(application). This module will be called with the
# main Evennia Portal application when the Portal is initiated.
# It will be called last in the startup sequence.
PORTAL_SERVICES_PLUGIN_MODULES = ["server.conf.portal_services_plugins"]
# Module holding MSSP meta data. This is used by MUD-crawlers to determine
# what type of game you are running, how many accounts you have etc.
MSSP_META_MODULE = "server.conf.mssp"
# Module for web plugins.
WEB_PLUGINS_MODULE = "server.conf.web_plugins"
# Tuple of modules implementing lock functions. All callable functions
# inside these modules will be available as lock functions.
LOCK_FUNC_MODULES = ("evennia.locks.lockfuncs", "server.conf.lockfuncs")
# Module holding handlers for managing incoming data from the client. These
# will be loaded in order, meaning functions in later modules may overload
# previous ones if having the same name.
INPUT_FUNC_MODULES = ["evennia.server.inputfuncs", "server.conf.inputfuncs"]
# Modules that contain prototypes for use with the spawner mechanism.
PROTOTYPE_MODULES = ["world.prototypes"]
# Modules containining Prototype functions able to be embedded in prototype
# definitions from in-game.
PROT_FUNC_MODULES = ["evennia.prototypes.protfuncs"]
# Module holding settings/actions for the dummyrunner program (see the
# dummyrunner for more information)
DUMMYRUNNER_SETTINGS_MODULE = "evennia.server.profiling.dummyrunner_settings"
# Mapping to extend Evennia's normal ANSI color tags. The mapping is a list of
# tuples mapping the exact tag (not a regex!) to the ANSI convertion, like
# `(r"%c%r", ansi.ANSI_RED)` (the evennia.utils.ansi module contains all
# ANSI escape sequences). Default is to use `|` and `|[` -prefixes.
COLOR_ANSI_EXTRA_MAP = []
# Extend the available regexes for adding XTERM256 colors in-game. This is given
# as a list of regexes, where each regex must contain three anonymous groups for
# holding integers 0-5 for the red, green and blue components Default is
# is r'\|([0-5])([0-5])([0-5])', which allows e.g. |500 for red.
# XTERM256 foreground color replacement
COLOR_XTERM256_EXTRA_FG = []
# XTERM256 background color replacement. Default is \|\[([0-5])([0-5])([0-5])'
COLOR_XTERM256_EXTRA_BG = []
# Extend the available regexes for adding XTERM256 grayscale values in-game. Given
# as a list of regexes, where each regex must contain one anonymous group containing
# a single letter a-z to mark the level from white to black. Default is r'\|=([a-z])',
# which allows e.g. |=k for a medium gray.
# XTERM256 grayscale foreground
COLOR_XTERM256_EXTRA_GFG = []
# XTERM256 grayscale background. Default is \|\[=([a-z])'
COLOR_XTERM256_EXTRA_GBG = []
# ANSI does not support bright backgrounds, so Evennia fakes this by mapping it to
# XTERM256 backgrounds where supported. This is a list of tuples that maps the wanted
# ansi tag (not a regex!) to a valid XTERM256 background tag, such as `(r'{[r', r'{[500')`.
COLOR_ANSI_XTERM256_BRIGHT_BG_EXTRA_MAP = []
# If set True, the above color settings *replace* the default |-style color markdown
# rather than extend it.
COLOR_NO_DEFAULT = False
######################################################################
# Default command sets and commands
######################################################################
# Command set used on session before account has logged in
CMDSET_UNLOGGEDIN = "commands.default_cmdsets.UnloggedinCmdSet"
# (Note that changing these three following cmdset paths will only affect NEW
# created characters/objects, not those already in play. So if you want to
# change this and have it apply to every object, it's recommended you do it
# before having created a lot of objects (or simply reset the database after
# the change for simplicity)).
# Command set used on the logged-in session
CMDSET_SESSION = "commands.default_cmdsets.SessionCmdSet"
# Default set for logged in account with characters (fallback)
CMDSET_CHARACTER = "commands.default_cmdsets.CharacterCmdSet"
# Command set for accounts without a character (ooc)
CMDSET_ACCOUNT = "commands.default_cmdsets.AccountCmdSet"
# Location to search for cmdsets if full path not given
CMDSET_PATHS = ["commands", "evennia", "evennia.contrib"]
# Fallbacks for cmdset paths that fail to load. Note that if you change the path for your
# default cmdsets, you will also need to copy CMDSET_FALLBACKS after your change in your
# settings file for it to detect the change.
CMDSET_FALLBACKS = {
CMDSET_CHARACTER: "evennia.commands.default.cmdset_character.CharacterCmdSet",
CMDSET_ACCOUNT: "evennia.commands.default.cmdset_account.AccountCmdSet",
CMDSET_SESSION: "evennia.commands.default.cmdset_session.SessionCmdSet",
CMDSET_UNLOGGEDIN: "evennia.commands.default.cmdset_unloggedin.UnloggedinCmdSet",
}
# Parent class for all default commands. Changing this class will
# modify all default commands, so do so carefully.
COMMAND_DEFAULT_CLASS = "evennia.commands.default.muxcommand.MuxCommand"
# Command.arg_regex is a regular expression desribing how the arguments
# to the command must be structured for the command to match a given user
# input. By default the command-name should end with a space or / (since the
# default commands uses MuxCommand and /switches).
COMMAND_DEFAULT_ARG_REGEX = r'^[ /]+.*$|$'
# By default, Command.msg will only send data to the Session calling
# the Command in the first place. If set, Command.msg will instead return
# data to all Sessions connected to the Account/Character associated with
# calling the Command. This may be more intuitive for users in certain
# multisession modes.
COMMAND_DEFAULT_MSG_ALL_SESSIONS = False
# The default lockstring of a command.
COMMAND_DEFAULT_LOCKS = ""
######################################################################
# Typeclasses and other paths
######################################################################
# These are paths that will be prefixed to the paths given if the
# immediately entered path fail to find a typeclass. It allows for
# shorter input strings. They must either base off the game directory
# or start from the evennia library.
TYPECLASS_PATHS = [
"typeclasses",
"evennia",
"evennia.contrib",
"evennia.contrib.game_systems",
"evennia.contrib.base_systems",
"evennia.contrib.full_systems",
"evennia.contrib.tutorials",
"evennia.contrib.utils",
]
# Typeclass for account objects (linked to a character) (fallback)
BASE_ACCOUNT_TYPECLASS = "typeclasses.accounts.Account"
# Typeclass and base for all objects (fallback)
BASE_OBJECT_TYPECLASS = "typeclasses.objects.Object"
# Typeclass for character objects linked to an account (fallback)
BASE_CHARACTER_TYPECLASS = "typeclasses.characters.Character"
# Typeclass for rooms (fallback)
BASE_ROOM_TYPECLASS = "typeclasses.rooms.Room"
# Typeclass for Exit objects (fallback).
BASE_EXIT_TYPECLASS = "typeclasses.exits.Exit"
# Typeclass for Channel (fallback).
BASE_CHANNEL_TYPECLASS = "typeclasses.channels.Channel"
# Typeclass for Scripts (fallback). You usually don't need to change this
# but create custom variations of scripts on a per-case basis instead.
BASE_SCRIPT_TYPECLASS = "typeclasses.scripts.Script"
# The default home location used for all objects. This is used as a
# fallback if an object's normal home location is deleted. Default
# is Limbo (#2).
DEFAULT_HOME = "#2"
# The start position for new characters. Default is Limbo (#2).
# MULTISESSION_MODE = 0, 1 - used by default unloggedin create command
# MULTISESSION_MODE = 2, 3 - used by default character_create command
START_LOCATION = "#2"
# Lookups of Attributes, Tags, Nicks, Aliases can be aggressively
# cached to avoid repeated database hits. This often gives noticeable
# performance gains since they are called so often. Drawback is that
# if you are accessing the database from multiple processes (such as
# from a website -not- running Evennia's own webserver) data may go
# out of sync between the processes. Keep on unless you face such
# issues.
TYPECLASS_AGGRESSIVE_CACHE = True
# These are fallbacks for BASE typeclasses failing to load. Usually needed only
# during doc building. The system expects these to *always* load correctly, so
# only modify if you are making fundamental changes to how objects/accounts
# work and know what you are doing
FALLBACK_ACCOUNT_TYPECLASS = "evennia.accounts.accounts.DefaultAccount"
FALLBACK_OBJECT_TYPECLASS = "evennia.objects.objects.DefaultObject"
FALLBACK_CHARACTER_TYPECLASS = "evennia.objects.objects.DefaultCharacter"
FALLBACK_ROOM_TYPECLASS = "evennia.objects.objects.DefaultRoom"
FALLBACK_EXIT_TYPECLASS = "evennia.objects.objects.DefaultExit"
FALLBACK_CHANNEL_TYPECLASS = "evennia.comms.comms.DefaultChannel"
FALLBACK_SCRIPT_TYPECLASS = "evennia.scripts.scripts.DefaultScript"
######################################################################
# Options and validators
######################################################################
# Options available on Accounts. Each such option is described by a
# class available from evennia.OPTION_CLASSES, in turn making use
# of validators from evennia.VALIDATOR_FUNCS to validate input when
# the user changes an option. The options are accessed through the
# `Account.options` handler.
# ("Description", 'Option Class name in evennia.OPTION_CLASS_MODULES', 'Default Value')
OPTIONS_ACCOUNT_DEFAULT = {
"border_color": ("Headers, footers, table borders, etc.", "Color", "n"),
"header_star_color": ("* inside Header lines.", "Color", "n"),
"header_text_color": ("Text inside Header lines.", "Color", "w"),
"header_fill": ("Fill for Header lines.", "Text", "="),
"separator_star_color": ("* inside Separator lines.", "Color", "n"),
"separator_text_color": ("Text inside Separator lines.", "Color", "w"),
"separator_fill": ("Fill for Separator Lines.", "Text", "-"),
"footer_star_color": ("* inside Footer lines.", "Color", "n"),
"footer_text_color": ("Text inside Footer Lines.", "Color", "n"),
"footer_fill": ("Fill for Footer Lines.", "Text", "="),
"column_names_color": ("Table column header text.", "Color", "w"),
"help_category_color": ("Help category names.", "Color", "n"),
"help_entry_color": ("Help entry names.", "Color", "n"),
"timezone": ("Timezone for dates. @tz for a list.", "Timezone", "UTC"),
}
# Modules holding Option classes, responsible for serializing the option and
# calling validator functions on it. Same-named functions in modules added
# later in this list will override those added earlier.
OPTION_CLASS_MODULES = ["evennia.utils.optionclasses"]
# Module holding validator functions. These are used as a resource for
# validating options, but can also be used as input validators in general.
# Same-named functions in modules added later in this list will override those
# added earlier.
VALIDATOR_FUNC_MODULES = ["evennia.utils.validatorfuncs"]
######################################################################
# Batch processors
######################################################################
# Python path to a directory to be searched for batch scripts
# for the batch processors (.ev and/or .py files).
BASE_BATCHPROCESS_PATHS = [
"world",
"evennia.contrib",
"evennia.contrib.tutorials",
]
######################################################################
# Game Time setup
######################################################################
# You don't actually have to use this, but it affects the routines in
# evennia.utils.gametime.py and allows for a convenient measure to
# determine the current in-game time. You can of course interpret
# "week", "month" etc as your own in-game time units as desired.
# The time factor dictates if the game world runs faster (timefactor>1)
# or slower (timefactor<1) than the real world.
TIME_FACTOR = 2.0
# The starting point of your game time (the epoch), in seconds.
# In Python a value of 0 means Jan 1 1970 (use negatives for earlier
# start date). This will affect the returns from the utils.gametime
# module. If None, the server's first start-time is used as the epoch.
TIME_GAME_EPOCH = None
# Normally, game time will only increase when the server runs. If this is True,
# game time will not pause when the server reloads or goes offline. This setting
# together with a time factor of 1 should keep the game in sync with
# the real time (add a different epoch to shift time)
TIME_IGNORE_DOWNTIMES = False
######################################################################
# Help system
######################################################################
# Help output from CmdHelp are wrapped in an EvMore call
# (excluding webclient with separate help popups). If continuous scroll
# is preferred, change 'HELP_MORE' to False. EvMORE uses CLIENT_DEFAULT_HEIGHT
HELP_MORE_ENABLED = True
# The help category of a command if not specified.
COMMAND_DEFAULT_HELP_CATEGORY = "general"
# The help category of a db or file-based help entry if not specified
DEFAULT_HELP_CATEGORY = "general"
# File-based help entries. These are modules containing dicts defining help
# entries. They can be used together with in-database entries created in-game.
FILE_HELP_ENTRY_MODULES = ["world.help_entries"]
# if topics listed in help should be clickable
# clickable links only work on clients that support MXP.
HELP_CLICKABLE_TOPICS = True
######################################################################
# FuncParser
#
# Strings parsed with the FuncParser can contain 'callables' on the
# form $funcname(args,kwargs), which will lead to actual Python functions
# being executed.
######################################################################
# This changes the start-symbol for the funcparser callable. Note that
# this will make a lot of documentation invalid and there may also be
# other unexpected side effects, so change with caution.
FUNCPARSER_START_CHAR = '$'
# The symbol to use to escape Func
FUNCPARSER_ESCAPE_CHAR = '\\'
# This is the global max nesting-level for nesting functions in
# the funcparser. This protects against infinite loops.
FUNCPARSER_MAX_NESTING = 20
# Activate funcparser for all outgoing strings. The current Session
# will be passed into the parser (used to be called inlinefuncs)
FUNCPARSER_PARSE_OUTGOING_MESSAGES_ENABLED = False
# Only functions defined globally (and not starting with '_') in
# these modules will be considered valid inlinefuncs. The list
# is loaded from left-to-right, same-named functions will overload
FUNCPARSER_OUTGOING_MESSAGES_MODULES = ["evennia.utils.funcparser", "server.conf.inlinefuncs"]
# Prototype values are also parsed with FuncParser. These modules
# define which $func callables are available to use in prototypes.
FUNCPARSER_PROTOTYPE_PARSING_MODULES = ["evennia.prototypes.protfuncs",
"server.conf.prototypefuncs"]
######################################################################
# Global Scripts
######################################################################
# Global scripts started here will be available through
# 'evennia.GLOBAL_SCRIPTS.key'. The scripts will survive a reload and be
# recreated automatically if deleted. Each entry must have the script keys,
# whereas all other fields in the specification are optional. If 'typeclass' is
# not given, BASE_SCRIPT_TYPECLASS will be assumed. Note that if you change
# typeclass for the same key, a new Script will replace the old one on
# `evennia.GLOBAL_SCRIPTS`.
GLOBAL_SCRIPTS = {
# 'key': {'typeclass': 'typeclass.path.here',
# 'repeats': -1, 'interval': 50, 'desc': 'Example script'},
}
######################################################################
# Default Account setup and access
######################################################################
# Different Multisession modes allow a player (=account) to connect to the
# game simultaneously with multiple clients (=sessions). In modes 0,1 there is
# only one character created to the same name as the account at first login.
# In modes 2,3 no default character will be created and the MAX_NR_CHARACTERS
# value (below) defines how many characters the default char_create command
# allow per account.
# 0 - single session, one account, one character, when a new session is
# connected, the old one is disconnected
# 1 - multiple sessions, one account, one character, each session getting
# the same data
# 2 - multiple sessions, one account, many characters, one session per
# character (disconnects multiplets)
# 3 - like mode 2, except multiple sessions can puppet one character, each
# session getting the same data.
MULTISESSION_MODE = 0
# The maximum number of characters allowed by the default ooc char-creation command
MAX_NR_CHARACTERS = 1
# The access hierarchy, in climbing order. A higher permission in the
# hierarchy includes access of all levels below it. Used by the perm()/pperm()
# lock functions, which accepts both plural and singular (Admin & Admins)
PERMISSION_HIERARCHY = [
"Guest", # note-only used if GUEST_ENABLED=True
"Player",
"Helper",
"Builder",
"Admin",
"Developer",
]
# The default permission given to all new accounts
PERMISSION_ACCOUNT_DEFAULT = "Player"
# Default sizes for client window (in number of characters), if client
# is not supplying this on its own
CLIENT_DEFAULT_WIDTH = 78
# telnet standard height is 24; does anyone use such low-res displays anymore?
CLIENT_DEFAULT_HEIGHT = 45
# Set rate limits per-IP on account creations and login attempts. Set limits
# to None to disable.
CREATION_THROTTLE_LIMIT = 2
CREATION_THROTTLE_TIMEOUT = 10 * 60
LOGIN_THROTTLE_LIMIT = 5
LOGIN_THROTTLE_TIMEOUT = 5 * 60
# Certain characters, like html tags, line breaks and tabs are stripped
# from user input for commands using the `evennia.utils.strip_unsafe_input` helper
# since they can be exploitative. This list defines Account-level permissions
# (and higher) that bypass this stripping. It is used as a fallback if a
# specific list of perms are not given to the helper function.
INPUT_CLEANUP_BYPASS_PERMISSIONS = ['Builder']
######################################################################
# Guest accounts
######################################################################
# This enables guest logins, by default via "connect guest". Note that
# you need to edit your login screen to inform about this possibility.
GUEST_ENABLED = False
# Typeclass for guest account objects (linked to a character)
BASE_GUEST_TYPECLASS = "typeclasses.accounts.Guest"
# The permission given to guests
PERMISSION_GUEST_DEFAULT = "Guests"
# The default home location used for guests.
GUEST_HOME = DEFAULT_HOME
# The start position used for guest characters.
GUEST_START_LOCATION = START_LOCATION
# The naming convention used for creating new guest
# accounts/characters. The size of this list also determines how many
# guests may be on the game at once. The default is a maximum of nine
# guests, named Guest1 through Guest9.
GUEST_LIST = ["Guest" + str(s + 1) for s in range(9)]
######################################################################
# In-game Channels created from server start
######################################################################
# The mudinfo channel is a read-only channel used by Evennia to replay status
# messages, connection info etc to staff. The superuser will automatically be
# subscribed to this channel. If set to None, the channel is disabled and
# status messages will only be logged (not recommended).
CHANNEL_MUDINFO = {
"key": "MudInfo",
"aliases": "",
"desc": "Connection log",
"locks": "control:perm(Developer);listen:perm(Admin);send:false()",
}
# Optional channel (same form as CHANNEL_MUDINFO) that will receive connection
# messages like ("<account> has (dis)connected"). While the MudInfo channel
# will also receieve this info, this channel is meant for non-staffers. If
# None, this information will only be logged.
CHANNEL_CONNECTINFO = None
# New accounts will auto-sub to the default channels given below (but they can
# unsub at any time). Traditionally, at least 'public' should exist. Entries
# will be (re)created on the next reload, but removing or updating a same-key
# channel from this list will NOT automatically change/remove it in the game,
# that needs to be done manually. Note: To create other, non-auto-subbed
# channels, create them manually in server/conf/at_initial_setup.py.
DEFAULT_CHANNELS = [
{
"key": "Public",
"aliases": ("pub",),
"desc": "Public discussion",
"locks": "control:perm(Admin);listen:all();send:all()",
"typeclass": BASE_CHANNEL_TYPECLASS,
}
]
######################################################################
# External Connections
######################################################################
# Note: You do *not* have to make your MUD open to
# the public to use the external connections, they
# operate as long as you have an internet connection,
# just like stand-alone chat clients.
# The Evennia Game Index is a dynamic listing of Evennia games. You can add your game
# to this list also if it is in closed pre-alpha development.
GAME_INDEX_ENABLED = False
# This dict
GAME_INDEX_LISTING = {
"game_name": "Mygame", # usually SERVERNAME
"game_status": "pre-alpha", # pre-alpha, alpha, beta or launched
"short_description": "", # could be GAME_SLOGAN
"long_description": "",
"listing_contact": "", # email
"telnet_hostname": "", # mygame.com
"telnet_port": "", # 1234
"game_website": "", # http://mygame.com
"web_client_url": "", # http://mygame.com/webclient
}
# Evennia can connect to external IRC channels and
# echo what is said on the channel to IRC and vice
# versa. Obs - make sure the IRC network allows bots.
# When enabled, command @irc2chan will be available in-game
# IRC requires that you have twisted.words installed.
IRC_ENABLED = False
# RSS allows to connect RSS feeds (from forum updates, blogs etc) to
# an in-game channel. The channel will be updated when the rss feed
# updates. Use @rss2chan in game to connect if this setting is
# active. OBS: RSS support requires the python-feedparser package to
# be installed (through package manager or from the website
# http://code.google.com/p/feedparser/)
RSS_ENABLED = False
RSS_UPDATE_INTERVAL = 60 * 10 # 10 minutes
# Grapevine (grapevine.haus) is a network for listing MUDs as well as allow
# users of said MUDs to communicate with each other on shared channels. To use,
# your game must first be registered by logging in and creating a game entry at
# https://grapevine.haus. Evennia links grapevine channels to in-game channels
# with the @grapevine2chan command, available once this flag is set
# Grapevine requires installing the pyopenssl library (pip install pyopenssl)
GRAPEVINE_ENABLED = False
# Grapevine channels to allow connection to. See https://grapevine.haus/chat
# for the available channels. Only channels in this list can be linked to in-game
# channels later.
GRAPEVINE_CHANNELS = ["gossip", "testing"]
# Grapevine authentication. Register your game at https://grapevine.haus to get
# them. These are secret and should thus be overridden in secret_settings file
GRAPEVINE_CLIENT_ID = ""
GRAPEVINE_CLIENT_SECRET = ""
######################################################################
# Django web features
######################################################################
# While DEBUG is False, show a regular server error page on the web
# stuff, email the traceback to the people in the ADMINS tuple
# below. If True, show a detailed traceback for the web
# browser to display. Note however that this will leak memory when
# active, so make sure to turn it off for a production server!
DEBUG = False
# Emails are sent to these people if the above DEBUG value is False. If you'd
# rather prefer nobody receives emails, leave this commented out or empty.
ADMINS = () # '<NAME>', '<EMAIL>'),)
# These guys get broken link notifications when SEND_BROKEN_LINK_EMAILS is True.
MANAGERS = ADMINS
# This is a public point of contact for players or the public to contact
# a staff member or administrator of the site. It is publicly posted.
STAFF_CONTACT_EMAIL = None
# If using Sites/Pages from the web admin, this value must be set to the
# database-id of the Site (domain) we want to use with this game's Pages.
SITE_ID = 1
# The age for sessions.
# Default: 1209600 (2 weeks, in seconds)
SESSION_COOKIE_AGE = 1209600
# Session cookie domain
# Default: None
SESSION_COOKIE_DOMAIN = None
# The name of the cookie to use for sessions.
# Default: 'sessionid'
SESSION_COOKIE_NAME = "sessionid"
# Should the session expire when the browser closes?
# Default: False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Where to find locales (no need to change this, most likely)
LOCALE_PATHS = [os.path.join(EVENNIA_DIR, "locale/")]
# How to display time stamps in e.g. the admin
SHORT_DATETIME_FORMAT = 'Y-m-d H:i:s.u'
DATETIME_FORMAT = 'Y-m-d H:i:s' # ISO 8601 but without T and timezone
# This should be turned off unless you want to do tests with Django's
# development webserver (normally Evennia runs its own server)
SERVE_MEDIA = False
# The master urlconf file that contains all of the sub-branches to the
# applications. Change this to add your own URLs to the website.
ROOT_URLCONF = "web.urls"
# Where users are redirected after logging in via contrib.auth.login.
LOGIN_REDIRECT_URL = "/"
# Where to redirect users when using the @login_required decorator.
LOGIN_URL = reverse_lazy("login")
# Where to redirect users who wish to logout.
LOGOUT_URL = reverse_lazy("logout")
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = "/media/"
# Absolute path to the directory that holds file uploads from web apps.
MEDIA_ROOT = os.path.join(GAME_DIR, "server", ".media")
# URL prefix for admin media -- CSS, JavaScript and images. Make sure
# to use a trailing slash. Admin-related files are searched under STATIC_URL/admin.
STATIC_URL = "/static/"
# Absolute path to directory where the static data will be gathered into to be
# served by webserver.
STATIC_ROOT = os.path.join(GAME_DIR, "server", ".static")
# Location of static data to overload the defaults from
# evennia/web/static.
STATICFILES_DIRS = [os.path.join(GAME_DIR, "web", "static")]
# Patterns of files in the static directories. Used here to make sure that
# its readme file is preserved but unused.
STATICFILES_IGNORE_PATTERNS = ["README.md"]
# The name of the currently selected web template. This corresponds to the
# directory names shown in the templates directory.
WEBSITE_TEMPLATE = "website"
WEBCLIENT_TEMPLATE = "webclient"
# We setup the location of the website template as well as the admin site.
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(GAME_DIR, "web", "templates"),
os.path.join(GAME_DIR, "web", "templates", WEBSITE_TEMPLATE),
os.path.join(GAME_DIR, "web", "templates", WEBCLIENT_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "templates"),
os.path.join(EVENNIA_DIR, "web", "templates", WEBSITE_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "templates", WEBCLIENT_TEMPLATE),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.i18n",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.media",
"django.template.context_processors.debug",
"django.contrib.messages.context_processors.messages",
"sekizai.context_processors.sekizai",
"evennia.web.utils.general_context.general_context",
],
# While true, show "pretty" error messages for template syntax errors.
"debug": DEBUG,
},
}
]
# Django cache settings
# https://docs.djangoproject.com/en/dev/topics/cache/#setting-up-the-cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'throttle': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'TIMEOUT': 60 * 5,
'OPTIONS': {
'MAX_ENTRIES': 2000
}
}
}
# MiddleWare are semi-transparent extensions to Django's functionality.
# see http://www.djangoproject.com/documentation/middleware/ for a more detailed
# explanation.
MIDDLEWARE = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.messages.middleware.MessageMiddleware", # 1.4?
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.admindocs.middleware.XViewMiddleware",
"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware",
"evennia.web.utils.middleware.SharedLoginMiddleware",
]
######################################################################
# Evennia components
######################################################################
# Global and Evennia-specific apps. This ties everything together so we can
# refer to app models and perform DB syncs.
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.admindocs",
"django.contrib.flatpages",
"django.contrib.sites",
"django.contrib.staticfiles",
"evennia.web.utils.adminsite.EvenniaAdminApp", # replaces django.contrib.admin
"django.contrib.messages",
"rest_framework",
"django_filters",
"sekizai",
"evennia.utils.idmapper",
"evennia.server",
"evennia.typeclasses",
"evennia.accounts",
"evennia.objects",
"evennia.comms",
"evennia.help",
"evennia.scripts",
"evennia.web",
]
# The user profile extends the User object with more functionality;
# This should usually not be changed.
AUTH_USER_MODEL = "accounts.AccountDB"
# Password validation plugins
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {"min_length": 8},
},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
{"NAME": "evennia.server.validators.EvenniaPasswordValidator"},
]
# Username validation plugins
AUTH_USERNAME_VALIDATORS = [
{"NAME": "django.contrib.auth.validators.ASCIIUsernameValidator"},
{"NAME": "django.core.validators.MinLengthValidator", "OPTIONS": {"limit_value": 3},},
{"NAME": "django.core.validators.MaxLengthValidator", "OPTIONS": {"limit_value": 30},},
{"NAME": "evennia.server.validators.EvenniaUsernameAvailabilityValidator"},
]
# Use a custom test runner that just tests Evennia-specific apps.
TEST_RUNNER = "evennia.server.tests.testrunner.EvenniaTestSuiteRunner"
# Messages and Bootstrap don't classify events the same way; this setting maps
# messages.error() to Bootstrap 'danger' classes.
MESSAGE_TAGS = {messages.ERROR: "danger"}
# Django REST Framework settings
REST_FRAMEWORK = {
# django_filters allows you to specify search fields for models in an API View
"DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",),
# whether to paginate results and how many per page
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 25,
# require logged in users to call API so that access checks can work on them
"DEFAULT_PERMISSION_CLASSES": ["rest_framework.permissions.IsAuthenticated",],
# These are the different ways people can authenticate for API requests - via
# session or with user/password. Other ways are possible, such as via tokens
# or oauth, but require additional dependencies.
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
# default permission checks used by the EvenniaPermission class
"DEFAULT_CREATE_PERMISSION": "builder",
"DEFAULT_LIST_PERMISSION": "builder",
"DEFAULT_VIEW_LOCKS": ["examine"],
"DEFAULT_DESTROY_LOCKS": ["delete"],
"DEFAULT_UPDATE_LOCKS": ["control", "edit"],
# No throttle class set by default. Setting one also requires a cache backend to be specified.
}
# To enable the REST api, turn this to True
REST_API_ENABLED = False
######################################################################
# Networking Replaceables
######################################################################
# This allows for replacing the very core of the infrastructure holding Evennia
# together with your own variations. You should usually never have to touch
# this, and if so, you really need to know what you are doing.
# The Base Session Class is used as a parent class for all Protocols such as
# Telnet and SSH.) Changing this could be really dangerous. It will cascade
# to tons of classes. You generally shouldn't need to touch protocols.
BASE_SESSION_CLASS = "evennia.server.session.Session"
# Telnet Protocol inherits from whatever above BASE_SESSION_CLASS is specified.
# It is used for all telnet connections, and is also inherited by the SSL Protocol
# (which is just TLS + Telnet).
TELNET_PROTOCOL_CLASS = "evennia.server.portal.telnet.TelnetProtocol"
SSL_PROTOCOL_CLASS = "evennia.server.portal.ssl.SSLProtocol"
# Websocket Client Protocol. This inherits from BASE_SESSION_CLASS. It is used
# for all webclient connections.
WEBSOCKET_PROTOCOL_CLASS = "evennia.server.portal.webclient.WebSocketClient"
# Protocol for the SSH interface. This inherits from BASE_SESSION_CLASS.
SSH_PROTOCOL_CLASS = "evennia.server.portal.ssh.SshProtocol"
# Server-side session class used. This will inherit from BASE_SESSION_CLASS.
# This one isn't as dangerous to replace.
SERVER_SESSION_CLASS = "evennia.server.serversession.ServerSession"
# The Server SessionHandler manages all ServerSessions, handling logins,
# ensuring the login process happens smoothly, handling expected and
# unexpected disconnects. You shouldn't need to touch it, but you can.
# Replace it to implement altered game logic.
SERVER_SESSION_HANDLER_CLASS = "evennia.server.sessionhandler.ServerSessionHandler"
# The Portal SessionHandler manages all incoming connections regardless of
# the protocol in use. It is responsible for keeping them going and informing
# the Server Session Handler of the connections and synchronizing them across the
# AMP connection. You shouldn't ever need to change this. But you can.
PORTAL_SESSION_HANDLER_CLASS = "evennia.server.portal.portalsessionhandler.PortalSessionHandler"
# These are members / properties / attributes kept on both Server and
# Portal Sessions. They are sync'd at various points, such as logins and
# reloads. If you add to this, you may need to adjust the class __init__
# so the additions have somewhere to go. These must be simple things that
# can be pickled - stuff you could serialize to JSON is best.
SESSION_SYNC_ATTRS = (
"protocol_key",
"address",
"suid",
"sessid",
"uid",
"csessid",
"uname",
"logged_in",
"puid",
"conn_time",
"cmd_last",
"cmd_last_visible",
"cmd_total",
"protocol_flags",
"server_data",
"cmdset_storage_string",
)
# The following are used for the communications between the Portal and Server.
# Very dragons territory.
AMP_SERVER_PROTOCOL_CLASS = "evennia.server.portal.amp_server.AMPServerProtocol"
AMP_CLIENT_PROTOCOL_CLASS = "evennia.server.amp_client.AMPServerClientProtocol"
# don't change this manually, it can be checked from code to know if
# being run from a unit test (set by the evennia.utils.test_resources.BaseEvenniaTest
# and BaseEvenniaTestCase unit testing parents)
_TEST_ENVIRONMENT = False
######################################################################
# Django extensions
######################################################################
# Django extesions are useful third-party tools that are not
# always included in the default django distro.
try:
import django_extensions # noqa
INSTALLED_APPS += ["django_extensions"]
except ImportError:
# Django extensions are not installed in all distros.
pass
#######################################################################
# SECRET_KEY
#######################################################################
# This is the signing key for the cookies generated by Evennia's
# web interface.
#
# It is a fallback for the SECRET_KEY setting in settings.py, which
# is randomly seeded when settings.py is first created. If copying
# from here, make sure to change it!
SECRET_KEY = "changeme!(*#&*($&*(#*(&SDFKJJKLS*(@#KJAS"
|
"""
Master configuration file for Evennia.
NOTE: NO MODIFICATIONS SHOULD BE MADE TO THIS FILE!
All settings changes should be done by copy-pasting the variable and
its value to <gamedir>/server/conf/settings.py.
Hint: Don't copy&paste over more from this file than you actually want
to change. Anything you don't copy&paste will thus retain its default
value - which may change as Evennia is developed. This way you can
always be sure of what you have changed and what is default behaviour.
"""
from django.contrib.messages import constants as messages
from django.urls import reverse_lazy
import os
import sys
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = "Evennia"
# Short one-sentence blurb describing your game. Shown under the title
# on the website and could be used in online listings of your game etc.
GAME_SLOGAN = "The Python MUD/MU* creation system"
# The url address to your server, like mymudgame.com. This should be the publicly
# visible location. This is used e.g. on the web site to show how you connect to the
# game over telnet. Default is localhost (only on your machine).
SERVER_HOSTNAME = "localhost"
# Lockdown mode will cut off the game from any external connections
# and only allow connections from localhost. Requires a cold reboot.
LOCKDOWN_MODE = False
# Activate telnet service
TELNET_ENABLED = True
# A list of ports the Evennia telnet server listens on Can be one or many.
TELNET_PORTS = [4000]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
TELNET_INTERFACES = ["0.0.0.0"]
# Activate Telnet+SSL protocol (SecureSocketLibrary) for supporting clients
SSL_ENABLED = False
# Ports to use for Telnet+SSL
SSL_PORTS = [4003]
# Telnet+SSL Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSL_INTERFACES = ["0.0.0.0"]
# OOB (out-of-band) telnet communication allows Evennia to communicate
# special commands and data with enabled Telnet clients. This is used
# to create custom client interfaces over a telnet connection. To make
# full use of OOB, you need to prepare functions to handle the data
# server-side (see INPUT_FUNC_MODULES). TELNET_ENABLED is required for this
# to work.
TELNET_OOB_ENABLED = False
# Activate SSH protocol communication (SecureShell)
SSH_ENABLED = False
# Ports to use for SSH
SSH_PORTS = [4004]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSH_INTERFACES = ["0.0.0.0"]
# Start the evennia django+twisted webserver so you can
# browse the evennia website and the admin interface
# (Obs - further web configuration can be found below
# in the section 'Config for Django web features')
WEBSERVER_ENABLED = True
# This is a security setting protecting against host poisoning
# attacks. It defaults to allowing all. In production, make
# sure to change this to your actual host addresses/IPs.
ALLOWED_HOSTS = ["*"]
# The webserver sits behind a Portal proxy. This is a list
# of tuples (proxyport,serverport) used. The proxyports are what
# the Portal proxy presents to the world. The serverports are
# the internal ports the proxy uses to forward data to the Server-side
# webserver (these should not be publicly open)
WEBSERVER_PORTS = [(4001, 4005)]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSERVER_INTERFACES = ["0.0.0.0"]
# IP addresses that may talk to the server in a reverse proxy configuration,
# like NginX.
UPSTREAM_IPS = ["127.0.0.1"]
# The webserver uses threadpool for handling requests. This will scale
# with server load. Set the minimum and maximum number of threads it
# may use as (min, max) (must be > 0)
WEBSERVER_THREADPOOL_LIMITS = (1, 20)
# Start the evennia webclient. This requires the webserver to be running and
# offers the fallback ajax-based webclient backbone for browsers not supporting
# the websocket one.
WEBCLIENT_ENABLED = True
# Activate Websocket support for modern browsers. If this is on, the
# default webclient will use this and only use the ajax version if the browser
# is too old to support websockets. Requires WEBCLIENT_ENABLED.
WEBSOCKET_CLIENT_ENABLED = True
# Server-side websocket port to open for the webclient. Note that this value will
# be dynamically encoded in the webclient html page to allow the webclient to call
# home. If the external encoded value needs to be different than this, due to
# working through a proxy or docker port-remapping, the environment variable
# WEBCLIENT_CLIENT_PROXY_PORT can be used to override this port only for the
# front-facing client's sake.
WEBSOCKET_CLIENT_PORT = 4002
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_CLIENT_INTERFACE = "0.0.0.0"
# Actual URL for webclient component to reach the websocket. You only need
# to set this if you know you need it, like using some sort of proxy setup.
# If given it must be on the form "ws[s]://hostname[:port]". If left at None,
# the client will itself figure out this url based on the server's hostname.
# e.g. ws://external.example.com or wss://external.example.com:443
WEBSOCKET_CLIENT_URL = None
# This determine's whether Evennia's custom admin page is used, or if the
# standard Django admin is used.
EVENNIA_ADMIN = True
# The Server opens an AMP port so that the portal can
# communicate with it. This is an internal functionality of Evennia, usually
# operating between two processes on the same machine. You usually don't need to
# change this unless you cannot use the default AMP port/host for
# whatever reason.
AMP_HOST = "localhost"
AMP_PORT = 4006
AMP_INTERFACE = "127.0.0.1"
# Path to the lib directory containing the bulk of the codebase's code.
EVENNIA_DIR = os.path.dirname(os.path.abspath(__file__))
# Path to the game directory (containing the server/conf/settings.py file)
# This is dynamically created- there is generally no need to change this!
if EVENNIA_DIR.lower() == os.getcwd().lower() or (
sys.argv[1] == "test" if len(sys.argv) > 1 else False
):
# unittesting mode
GAME_DIR = os.getcwd()
else:
# Fallback location (will be replaced by the actual game dir at runtime)
GAME_DIR = os.path.join(EVENNIA_DIR, "game_template")
for i in range(10):
gpath = os.getcwd()
if "server" in os.listdir(gpath):
if os.path.isfile(os.path.join("server", "conf", "settings.py")):
GAME_DIR = gpath
break
os.chdir(os.pardir)
# Place to put log files, how often to rotate the log and how big each log file
# may become before rotating.
LOG_DIR = os.path.join(GAME_DIR, "server", "logs")
SERVER_LOG_FILE = os.path.join(LOG_DIR, "server.log")
SERVER_LOG_DAY_ROTATION = 7
SERVER_LOG_MAX_SIZE = 1000000
PORTAL_LOG_FILE = os.path.join(LOG_DIR, "portal.log")
PORTAL_LOG_DAY_ROTATION = 7
PORTAL_LOG_MAX_SIZE = 1000000
# The http log is usually only for debugging since it's very spammy
HTTP_LOG_FILE = os.path.join(LOG_DIR, "http_requests.log")
# if this is set to the empty string, lockwarnings will be turned off.
LOCKWARNING_LOG_FILE = os.path.join(LOG_DIR, "lockwarnings.log")
# Number of lines to append to rotating channel logs when they rotate
CHANNEL_LOG_NUM_TAIL_LINES = 20
# Max size (in bytes) of channel log files before they rotate
CHANNEL_LOG_ROTATE_SIZE = 1000000
# Unused by default, but used by e.g. the MapSystem contrib. A place for storing
# semi-permanent data and avoid it being rebuilt over and over. It is created
# on-demand only.
CACHE_DIR = os.path.join(GAME_DIR, "server", ".cache")
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.0/interactive/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = "UTC"
# Activate time zone in datetimes
USE_TZ = True
# Authentication backends. This is the code used to authenticate a user.
AUTHENTICATION_BACKENDS = ["evennia.web.utils.backends.CaseInsensitiveModelBackend"]
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
LANGUAGE_CODE = "en-us"
# How long time (in seconds) a user may idle before being logged
# out. This can be set as big as desired. A user may avoid being
# thrown off by sending the empty system command 'idle' to the server
# at regular intervals. Set <=0 to deactivate idle timeout completely.
IDLE_TIMEOUT = -1
# The idle command can be sent to keep your session active without actually
# having to spam normal commands regularly. It gives no feedback, only updates
# the idle timer. Note that "idle" will *always* work, even if a different
# command-name is given here; this is because the webclient needs a default
# to send to avoid proxy timeouts.
IDLE_COMMAND = "idle"
# The set of encodings tried. An Account object may set an attribute "encoding" on
# itself to match the client used. If not set, or wrong encoding is
# given, this list is tried, in order, aborting on the first match.
# Add sets for languages/regions your accounts are likely to use.
# (see http://en.wikipedia.org/wiki/Character_encoding)
# Telnet default encoding, unless specified by the client, will be ENCODINGS[0].
ENCODINGS = ["utf-8", "latin-1", "ISO-8859-1"]
# Regular expression applied to all output to a given session in order
# to strip away characters (usually various forms of decorations) for the benefit
# of users with screen readers. Note that ANSI/MXP doesn't need to
# be stripped this way, that is handled automatically.
SCREENREADER_REGEX_STRIP = r"\+-+|\+$|\+~|--+|~~+|==+"
# MXP support means the ability to show clickable links in the client. Clicking
# the link will execute a game command. It's a way to add mouse input to the game.
MXP_ENABLED = True
# If this is set, MXP can only be sent by the server and not added from the
# client side. Disabling this is a potential security risk because it could
# allow malevolent players to lure others to execute commands they did not
# intend to.
MXP_OUTGOING_ONLY = True
# Database objects are cached in what is known as the idmapper. The idmapper
# caching results in a massive speedup of the server (since it dramatically
# limits the number of database accesses needed) and also allows for
# storing temporary data on objects. It is however also the main memory
# consumer of Evennia. With this setting the cache can be capped and
# flushed when it reaches a certain size. Minimum is 50 MB but it is
# not recommended to set this to less than 100 MB for a distribution
# system.
# Empirically, N_objects_in_cache ~ ((RMEM - 35) / 0.0157):
# mem(MB) | objs in cache || mem(MB) | objs in cache
# 50 | ~1000 || 800 | ~49 000
# 100 | ~4000 || 1200 | ~75 000
# 200 | ~10 000 || 1600 | ~100 000
# 500 | ~30 000 || 2000 | ~125 000
# Note that the estimated memory usage is not exact (and the cap is only
# checked every 5 minutes), so err on the side of caution if
# running on a server with limited memory. Also note that Python
# will not necessarily return the memory to the OS when the idmapper
# flashes (the memory will be freed and made available to the Python
# process only). How many objects need to be in memory at any given
# time depends very much on your game so some experimentation may
# be necessary (use @server to see how many objects are in the idmapper
# cache at any time). Setting this to None disables the cache cap.
IDMAPPER_CACHE_MAXSIZE = 200 # (MB)
# This determines how many connections per second the Portal should
# accept, as a DoS countermeasure. If the rate exceeds this number, incoming
# connections will be queued to this rate, so none will be lost.
# Must be set to a value > 0.
MAX_CONNECTION_RATE = 2
# Determine how many commands per second a given Session is allowed
# to send to the Portal via a connected protocol. Too high rate will
# drop the command and echo a warning. Note that this will also cap
# OOB messages so don't set it too low if you expect a lot of events
# from the client! To turn the limiter off, set to <= 0.
MAX_COMMAND_RATE = 80
# The warning to echo back to users if they send commands too fast
COMMAND_RATE_WARNING = "You entered commands too fast. Wait a moment and try again."
# custom, extra commands to add to the `evennia` launcher. This is a dict
# of {'cmdname': 'path.to.callable', ...}, where the callable will be passed
# any extra args given on the command line. For example `evennia cmdname foo bar`.
EXTRA_LAUNCHER_COMMANDS = {}
# Determine how large of a string can be sent to the server in number
# of characters. If they attempt to enter a string over this character
# limit, we stop them and send a message. To make unlimited, set to
# 0 or less.
MAX_CHAR_LIMIT = 6000
# The warning to echo back to users if they enter a very large string
MAX_CHAR_LIMIT_WARNING = (
"You entered a string that was too long. " "Please break it up into multiple parts."
)
# If this is true, errors and tracebacks from the engine will be
# echoed as text in-game as well as to the log. This can speed up
# debugging. OBS: Showing full tracebacks to regular users could be a
# security problem -turn this off in a production game!
IN_GAME_ERRORS = True
# Broadcast "Server restart"-like messages to all sessions.
BROADCAST_SERVER_RESTART_MESSAGES = True
######################################################################
# Evennia Database config
######################################################################
# Database config syntax:
# ENGINE - path to the the database backend. Possible choices are:
# 'django.db.backends.sqlite3', (default)
# 'django.db.backends.mysql',
# 'django.db.backends.postgresql',
# 'django.db.backends.oracle' (untested).
# NAME - database name, or path to the db file for sqlite3
# USER - db admin (unused in sqlite3)
# PASSWORD - <PASSWORD> (unused in sqlite3)
# HOST - empty string is localhost (unused in sqlite3)
# PORT - empty string defaults to localhost (unused in sqlite3)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.getenv("TEST_DB_PATH", os.path.join(GAME_DIR, "server", "evennia.db3")),
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
}
}
# How long the django-database connection should be kept open, in seconds.
# If you get errors about the database having gone away after long idle
# periods, shorten this value (e.g. MySQL defaults to a timeout of 8 hrs)
CONN_MAX_AGE = 3600 * 7
# When removing or renaming models, such models stored in Attributes may
# become orphaned and will return as None. If the change is a rename (that
# is, there is a 1:1 pk mapping between the old and the new), the unserializer
# can convert old to new when retrieving them. This is a list of tuples
# (old_natural_key, new_natural_key). Note that Django ContentTypes'
# natural_keys are themselves tuples (appname, modelname). Creation-dates will
# not be checked for models specified here. If new_natural_key does not exist,
# `None` will be returned and stored back as if no replacement was set.
ATTRIBUTE_STORED_MODEL_RENAME = [
(("players", "playerdb"), ("accounts", "accountdb")),
(("typeclasses", "defaultplayer"), ("typeclasses", "defaultaccount")),
]
# Default type of autofield (required by Django)
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
######################################################################
# Evennia webclient options
######################################################################
# default webclient options (without user changing it)
WEBCLIENT_OPTIONS = {
# Gags prompts in output window and puts them on the input bar
"gagprompt": True,
# Shows help files in a new popup window instead of in-pane
"helppopup": False,
# Shows notifications of new messages as popup windows
"notification_popup": False,
# Plays a sound for notifications of new messages
"notification_sound": False
}
######################################################################
# Evennia pluggable modules
######################################################################
# Plugin modules extend Evennia in various ways. In the cases with no
# existing default, there are examples of many of these modules
# in contrib/examples.
# The command parser module to use. See the default module for which
# functions it must implement
COMMAND_PARSER = "evennia.commands.cmdparser.cmdparser"
# On a multi-match when search objects or commands, the user has the
# ability to search again with an index marker that differentiates
# the results. If multiple "box" objects
# are found, they can by default be separated as 1-box, 2-box. Below you
# can change the regular expression used. The regex must have one
# have two capturing groups (?P<number>...) and (?P<name>...) - the default
# parser expects this. It should also involve a number starting from 1.
# When changing this you must also update SEARCH_MULTIMATCH_TEMPLATE
# to properly describe the syntax.
SEARCH_MULTIMATCH_REGEX = r"(?P<name>[^-]*)-(?P<number>[0-9]+)(?P<args>.*)"
# To display multimatch errors in various listings we must display
# the syntax in a way that matches what SEARCH_MULTIMATCH_REGEX understand.
# The template will be populated with data and expects the following markup:
# {number} - the order of the multimatch, starting from 1; {name} - the
# name (key) of the multimatched entity; {aliases} - eventual
# aliases for the entity; {info} - extra info like #dbrefs for staff. Don't
# forget a line break if you want one match per line.
SEARCH_MULTIMATCH_TEMPLATE = " {name}-{number}{aliases}{info}\n"
# The handler that outputs errors when using any API-level search
# (not manager methods). This function should correctly report errors
# both for command- and object-searches. This allows full control
# over the error output (it uses SEARCH_MULTIMATCH_TEMPLATE by default).
SEARCH_AT_RESULT = "evennia.utils.utils.at_search_result"
# Single characters to ignore at the beginning of a command. When set, e.g.
# cmd, @cmd and +cmd will all find a command "cmd" or one named "@cmd" etc. If
# you have defined two different commands cmd and @cmd you can still enter
# @cmd to exactly target the second one. Single-character commands consisting
# of only a prefix character will not be stripped. Set to the empty
# string ("") to turn off prefix ignore.
CMD_IGNORE_PREFIXES = "@&/+"
# The module holding text strings for the connection screen.
# This module should contain one or more variables
# with strings defining the look of the screen.
CONNECTION_SCREEN_MODULE = "server.conf.connection_screens"
# Delay to use before sending the evennia.syscmdkeys.CMD_LOGINSTART Command
# when a new session connects (this defaults the unloggedin-look for showing
# the connection screen). The delay is useful mainly for telnet, to allow
# client/server to establish client capabilities like color/mxp etc before
# sending any text. A value of 0.3 should be enough. While a good idea, it may
# cause issues with menu-logins and autoconnects since the menu will not have
# started when the autoconnects starts sending menu commands.
DELAY_CMD_LOGINSTART = 0.3
# A module that must exist - this holds the instructions Evennia will use to
# first prepare the database for use (create user #1 and Limbo etc). Only override if
# you really know what # you are doing. If replacing, it must contain a function
# handle_setup(stepname=None). The function will start being called with no argument
# and is expected to maintain a named sequence of steps. Once each step is completed, it
# should be saved with ServerConfig.objects.conf('last_initial_setup_step', stepname)
# on a crash, the system will continue by calling handle_setup with the last completed
# step. The last step in the sequence must be named 'done'. Once this key is saved,
# initialization will not run again.
INITIAL_SETUP_MODULE = "evennia.server.initial_setup"
# An optional module that, if existing, must hold a function
# named at_initial_setup(). This hook method can be used to customize
# the server's initial setup sequence (the very first startup of the system).
# The check will fail quietly if module doesn't exist or fails to load.
AT_INITIAL_SETUP_HOOK_MODULE = "server.conf.at_initial_setup"
# Module containing your custom at_server_start(), at_server_reload() and
# at_server_stop() methods. These methods will be called every time
# the server starts, reloads and resets/stops respectively.
AT_SERVER_STARTSTOP_MODULE = "server.conf.at_server_startstop"
# List of one or more module paths to modules containing a function start_
# plugin_services(application). This module will be called with the main
# Evennia Server application when the Server is initiated.
# It will be called last in the startup sequence.
SERVER_SERVICES_PLUGIN_MODULES = ["server.conf.server_services_plugins"]
# List of one or more module paths to modules containing a function
# start_plugin_services(application). This module will be called with the
# main Evennia Portal application when the Portal is initiated.
# It will be called last in the startup sequence.
PORTAL_SERVICES_PLUGIN_MODULES = ["server.conf.portal_services_plugins"]
# Module holding MSSP meta data. This is used by MUD-crawlers to determine
# what type of game you are running, how many accounts you have etc.
MSSP_META_MODULE = "server.conf.mssp"
# Module for web plugins.
WEB_PLUGINS_MODULE = "server.conf.web_plugins"
# Tuple of modules implementing lock functions. All callable functions
# inside these modules will be available as lock functions.
LOCK_FUNC_MODULES = ("evennia.locks.lockfuncs", "server.conf.lockfuncs")
# Module holding handlers for managing incoming data from the client. These
# will be loaded in order, meaning functions in later modules may overload
# previous ones if having the same name.
INPUT_FUNC_MODULES = ["evennia.server.inputfuncs", "server.conf.inputfuncs"]
# Modules that contain prototypes for use with the spawner mechanism.
PROTOTYPE_MODULES = ["world.prototypes"]
# Modules containining Prototype functions able to be embedded in prototype
# definitions from in-game.
PROT_FUNC_MODULES = ["evennia.prototypes.protfuncs"]
# Module holding settings/actions for the dummyrunner program (see the
# dummyrunner for more information)
DUMMYRUNNER_SETTINGS_MODULE = "evennia.server.profiling.dummyrunner_settings"
# Mapping to extend Evennia's normal ANSI color tags. The mapping is a list of
# tuples mapping the exact tag (not a regex!) to the ANSI convertion, like
# `(r"%c%r", ansi.ANSI_RED)` (the evennia.utils.ansi module contains all
# ANSI escape sequences). Default is to use `|` and `|[` -prefixes.
COLOR_ANSI_EXTRA_MAP = []
# Extend the available regexes for adding XTERM256 colors in-game. This is given
# as a list of regexes, where each regex must contain three anonymous groups for
# holding integers 0-5 for the red, green and blue components Default is
# is r'\|([0-5])([0-5])([0-5])', which allows e.g. |500 for red.
# XTERM256 foreground color replacement
COLOR_XTERM256_EXTRA_FG = []
# XTERM256 background color replacement. Default is \|\[([0-5])([0-5])([0-5])'
COLOR_XTERM256_EXTRA_BG = []
# Extend the available regexes for adding XTERM256 grayscale values in-game. Given
# as a list of regexes, where each regex must contain one anonymous group containing
# a single letter a-z to mark the level from white to black. Default is r'\|=([a-z])',
# which allows e.g. |=k for a medium gray.
# XTERM256 grayscale foreground
COLOR_XTERM256_EXTRA_GFG = []
# XTERM256 grayscale background. Default is \|\[=([a-z])'
COLOR_XTERM256_EXTRA_GBG = []
# ANSI does not support bright backgrounds, so Evennia fakes this by mapping it to
# XTERM256 backgrounds where supported. This is a list of tuples that maps the wanted
# ansi tag (not a regex!) to a valid XTERM256 background tag, such as `(r'{[r', r'{[500')`.
COLOR_ANSI_XTERM256_BRIGHT_BG_EXTRA_MAP = []
# If set True, the above color settings *replace* the default |-style color markdown
# rather than extend it.
COLOR_NO_DEFAULT = False
######################################################################
# Default command sets and commands
######################################################################
# Command set used on session before account has logged in
CMDSET_UNLOGGEDIN = "commands.default_cmdsets.UnloggedinCmdSet"
# (Note that changing these three following cmdset paths will only affect NEW
# created characters/objects, not those already in play. So if you want to
# change this and have it apply to every object, it's recommended you do it
# before having created a lot of objects (or simply reset the database after
# the change for simplicity)).
# Command set used on the logged-in session
CMDSET_SESSION = "commands.default_cmdsets.SessionCmdSet"
# Default set for logged in account with characters (fallback)
CMDSET_CHARACTER = "commands.default_cmdsets.CharacterCmdSet"
# Command set for accounts without a character (ooc)
CMDSET_ACCOUNT = "commands.default_cmdsets.AccountCmdSet"
# Location to search for cmdsets if full path not given
CMDSET_PATHS = ["commands", "evennia", "evennia.contrib"]
# Fallbacks for cmdset paths that fail to load. Note that if you change the path for your
# default cmdsets, you will also need to copy CMDSET_FALLBACKS after your change in your
# settings file for it to detect the change.
CMDSET_FALLBACKS = {
CMDSET_CHARACTER: "evennia.commands.default.cmdset_character.CharacterCmdSet",
CMDSET_ACCOUNT: "evennia.commands.default.cmdset_account.AccountCmdSet",
CMDSET_SESSION: "evennia.commands.default.cmdset_session.SessionCmdSet",
CMDSET_UNLOGGEDIN: "evennia.commands.default.cmdset_unloggedin.UnloggedinCmdSet",
}
# Parent class for all default commands. Changing this class will
# modify all default commands, so do so carefully.
COMMAND_DEFAULT_CLASS = "evennia.commands.default.muxcommand.MuxCommand"
# Command.arg_regex is a regular expression desribing how the arguments
# to the command must be structured for the command to match a given user
# input. By default the command-name should end with a space or / (since the
# default commands uses MuxCommand and /switches).
COMMAND_DEFAULT_ARG_REGEX = r'^[ /]+.*$|$'
# By default, Command.msg will only send data to the Session calling
# the Command in the first place. If set, Command.msg will instead return
# data to all Sessions connected to the Account/Character associated with
# calling the Command. This may be more intuitive for users in certain
# multisession modes.
COMMAND_DEFAULT_MSG_ALL_SESSIONS = False
# The default lockstring of a command.
COMMAND_DEFAULT_LOCKS = ""
######################################################################
# Typeclasses and other paths
######################################################################
# These are paths that will be prefixed to the paths given if the
# immediately entered path fail to find a typeclass. It allows for
# shorter input strings. They must either base off the game directory
# or start from the evennia library.
TYPECLASS_PATHS = [
"typeclasses",
"evennia",
"evennia.contrib",
"evennia.contrib.game_systems",
"evennia.contrib.base_systems",
"evennia.contrib.full_systems",
"evennia.contrib.tutorials",
"evennia.contrib.utils",
]
# Typeclass for account objects (linked to a character) (fallback)
BASE_ACCOUNT_TYPECLASS = "typeclasses.accounts.Account"
# Typeclass and base for all objects (fallback)
BASE_OBJECT_TYPECLASS = "typeclasses.objects.Object"
# Typeclass for character objects linked to an account (fallback)
BASE_CHARACTER_TYPECLASS = "typeclasses.characters.Character"
# Typeclass for rooms (fallback)
BASE_ROOM_TYPECLASS = "typeclasses.rooms.Room"
# Typeclass for Exit objects (fallback).
BASE_EXIT_TYPECLASS = "typeclasses.exits.Exit"
# Typeclass for Channel (fallback).
BASE_CHANNEL_TYPECLASS = "typeclasses.channels.Channel"
# Typeclass for Scripts (fallback). You usually don't need to change this
# but create custom variations of scripts on a per-case basis instead.
BASE_SCRIPT_TYPECLASS = "typeclasses.scripts.Script"
# The default home location used for all objects. This is used as a
# fallback if an object's normal home location is deleted. Default
# is Limbo (#2).
DEFAULT_HOME = "#2"
# The start position for new characters. Default is Limbo (#2).
# MULTISESSION_MODE = 0, 1 - used by default unloggedin create command
# MULTISESSION_MODE = 2, 3 - used by default character_create command
START_LOCATION = "#2"
# Lookups of Attributes, Tags, Nicks, Aliases can be aggressively
# cached to avoid repeated database hits. This often gives noticeable
# performance gains since they are called so often. Drawback is that
# if you are accessing the database from multiple processes (such as
# from a website -not- running Evennia's own webserver) data may go
# out of sync between the processes. Keep on unless you face such
# issues.
TYPECLASS_AGGRESSIVE_CACHE = True
# These are fallbacks for BASE typeclasses failing to load. Usually needed only
# during doc building. The system expects these to *always* load correctly, so
# only modify if you are making fundamental changes to how objects/accounts
# work and know what you are doing
FALLBACK_ACCOUNT_TYPECLASS = "evennia.accounts.accounts.DefaultAccount"
FALLBACK_OBJECT_TYPECLASS = "evennia.objects.objects.DefaultObject"
FALLBACK_CHARACTER_TYPECLASS = "evennia.objects.objects.DefaultCharacter"
FALLBACK_ROOM_TYPECLASS = "evennia.objects.objects.DefaultRoom"
FALLBACK_EXIT_TYPECLASS = "evennia.objects.objects.DefaultExit"
FALLBACK_CHANNEL_TYPECLASS = "evennia.comms.comms.DefaultChannel"
FALLBACK_SCRIPT_TYPECLASS = "evennia.scripts.scripts.DefaultScript"
######################################################################
# Options and validators
######################################################################
# Options available on Accounts. Each such option is described by a
# class available from evennia.OPTION_CLASSES, in turn making use
# of validators from evennia.VALIDATOR_FUNCS to validate input when
# the user changes an option. The options are accessed through the
# `Account.options` handler.
# ("Description", 'Option Class name in evennia.OPTION_CLASS_MODULES', 'Default Value')
OPTIONS_ACCOUNT_DEFAULT = {
"border_color": ("Headers, footers, table borders, etc.", "Color", "n"),
"header_star_color": ("* inside Header lines.", "Color", "n"),
"header_text_color": ("Text inside Header lines.", "Color", "w"),
"header_fill": ("Fill for Header lines.", "Text", "="),
"separator_star_color": ("* inside Separator lines.", "Color", "n"),
"separator_text_color": ("Text inside Separator lines.", "Color", "w"),
"separator_fill": ("Fill for Separator Lines.", "Text", "-"),
"footer_star_color": ("* inside Footer lines.", "Color", "n"),
"footer_text_color": ("Text inside Footer Lines.", "Color", "n"),
"footer_fill": ("Fill for Footer Lines.", "Text", "="),
"column_names_color": ("Table column header text.", "Color", "w"),
"help_category_color": ("Help category names.", "Color", "n"),
"help_entry_color": ("Help entry names.", "Color", "n"),
"timezone": ("Timezone for dates. @tz for a list.", "Timezone", "UTC"),
}
# Modules holding Option classes, responsible for serializing the option and
# calling validator functions on it. Same-named functions in modules added
# later in this list will override those added earlier.
OPTION_CLASS_MODULES = ["evennia.utils.optionclasses"]
# Module holding validator functions. These are used as a resource for
# validating options, but can also be used as input validators in general.
# Same-named functions in modules added later in this list will override those
# added earlier.
VALIDATOR_FUNC_MODULES = ["evennia.utils.validatorfuncs"]
######################################################################
# Batch processors
######################################################################
# Python path to a directory to be searched for batch scripts
# for the batch processors (.ev and/or .py files).
BASE_BATCHPROCESS_PATHS = [
"world",
"evennia.contrib",
"evennia.contrib.tutorials",
]
######################################################################
# Game Time setup
######################################################################
# You don't actually have to use this, but it affects the routines in
# evennia.utils.gametime.py and allows for a convenient measure to
# determine the current in-game time. You can of course interpret
# "week", "month" etc as your own in-game time units as desired.
# The time factor dictates if the game world runs faster (timefactor>1)
# or slower (timefactor<1) than the real world.
TIME_FACTOR = 2.0
# The starting point of your game time (the epoch), in seconds.
# In Python a value of 0 means Jan 1 1970 (use negatives for earlier
# start date). This will affect the returns from the utils.gametime
# module. If None, the server's first start-time is used as the epoch.
TIME_GAME_EPOCH = None
# Normally, game time will only increase when the server runs. If this is True,
# game time will not pause when the server reloads or goes offline. This setting
# together with a time factor of 1 should keep the game in sync with
# the real time (add a different epoch to shift time)
TIME_IGNORE_DOWNTIMES = False
######################################################################
# Help system
######################################################################
# Help output from CmdHelp are wrapped in an EvMore call
# (excluding webclient with separate help popups). If continuous scroll
# is preferred, change 'HELP_MORE' to False. EvMORE uses CLIENT_DEFAULT_HEIGHT
HELP_MORE_ENABLED = True
# The help category of a command if not specified.
COMMAND_DEFAULT_HELP_CATEGORY = "general"
# The help category of a db or file-based help entry if not specified
DEFAULT_HELP_CATEGORY = "general"
# File-based help entries. These are modules containing dicts defining help
# entries. They can be used together with in-database entries created in-game.
FILE_HELP_ENTRY_MODULES = ["world.help_entries"]
# if topics listed in help should be clickable
# clickable links only work on clients that support MXP.
HELP_CLICKABLE_TOPICS = True
######################################################################
# FuncParser
#
# Strings parsed with the FuncParser can contain 'callables' on the
# form $funcname(args,kwargs), which will lead to actual Python functions
# being executed.
######################################################################
# This changes the start-symbol for the funcparser callable. Note that
# this will make a lot of documentation invalid and there may also be
# other unexpected side effects, so change with caution.
FUNCPARSER_START_CHAR = '$'
# The symbol to use to escape Func
FUNCPARSER_ESCAPE_CHAR = '\\'
# This is the global max nesting-level for nesting functions in
# the funcparser. This protects against infinite loops.
FUNCPARSER_MAX_NESTING = 20
# Activate funcparser for all outgoing strings. The current Session
# will be passed into the parser (used to be called inlinefuncs)
FUNCPARSER_PARSE_OUTGOING_MESSAGES_ENABLED = False
# Only functions defined globally (and not starting with '_') in
# these modules will be considered valid inlinefuncs. The list
# is loaded from left-to-right, same-named functions will overload
FUNCPARSER_OUTGOING_MESSAGES_MODULES = ["evennia.utils.funcparser", "server.conf.inlinefuncs"]
# Prototype values are also parsed with FuncParser. These modules
# define which $func callables are available to use in prototypes.
FUNCPARSER_PROTOTYPE_PARSING_MODULES = ["evennia.prototypes.protfuncs",
"server.conf.prototypefuncs"]
######################################################################
# Global Scripts
######################################################################
# Global scripts started here will be available through
# 'evennia.GLOBAL_SCRIPTS.key'. The scripts will survive a reload and be
# recreated automatically if deleted. Each entry must have the script keys,
# whereas all other fields in the specification are optional. If 'typeclass' is
# not given, BASE_SCRIPT_TYPECLASS will be assumed. Note that if you change
# typeclass for the same key, a new Script will replace the old one on
# `evennia.GLOBAL_SCRIPTS`.
GLOBAL_SCRIPTS = {
# 'key': {'typeclass': 'typeclass.path.here',
# 'repeats': -1, 'interval': 50, 'desc': 'Example script'},
}
######################################################################
# Default Account setup and access
######################################################################
# Different Multisession modes allow a player (=account) to connect to the
# game simultaneously with multiple clients (=sessions). In modes 0,1 there is
# only one character created to the same name as the account at first login.
# In modes 2,3 no default character will be created and the MAX_NR_CHARACTERS
# value (below) defines how many characters the default char_create command
# allow per account.
# 0 - single session, one account, one character, when a new session is
# connected, the old one is disconnected
# 1 - multiple sessions, one account, one character, each session getting
# the same data
# 2 - multiple sessions, one account, many characters, one session per
# character (disconnects multiplets)
# 3 - like mode 2, except multiple sessions can puppet one character, each
# session getting the same data.
MULTISESSION_MODE = 0
# The maximum number of characters allowed by the default ooc char-creation command
MAX_NR_CHARACTERS = 1
# The access hierarchy, in climbing order. A higher permission in the
# hierarchy includes access of all levels below it. Used by the perm()/pperm()
# lock functions, which accepts both plural and singular (Admin & Admins)
PERMISSION_HIERARCHY = [
"Guest", # note-only used if GUEST_ENABLED=True
"Player",
"Helper",
"Builder",
"Admin",
"Developer",
]
# The default permission given to all new accounts
PERMISSION_ACCOUNT_DEFAULT = "Player"
# Default sizes for client window (in number of characters), if client
# is not supplying this on its own
CLIENT_DEFAULT_WIDTH = 78
# telnet standard height is 24; does anyone use such low-res displays anymore?
CLIENT_DEFAULT_HEIGHT = 45
# Set rate limits per-IP on account creations and login attempts. Set limits
# to None to disable.
CREATION_THROTTLE_LIMIT = 2
CREATION_THROTTLE_TIMEOUT = 10 * 60
LOGIN_THROTTLE_LIMIT = 5
LOGIN_THROTTLE_TIMEOUT = 5 * 60
# Certain characters, like html tags, line breaks and tabs are stripped
# from user input for commands using the `evennia.utils.strip_unsafe_input` helper
# since they can be exploitative. This list defines Account-level permissions
# (and higher) that bypass this stripping. It is used as a fallback if a
# specific list of perms are not given to the helper function.
INPUT_CLEANUP_BYPASS_PERMISSIONS = ['Builder']
######################################################################
# Guest accounts
######################################################################
# This enables guest logins, by default via "connect guest". Note that
# you need to edit your login screen to inform about this possibility.
GUEST_ENABLED = False
# Typeclass for guest account objects (linked to a character)
BASE_GUEST_TYPECLASS = "typeclasses.accounts.Guest"
# The permission given to guests
PERMISSION_GUEST_DEFAULT = "Guests"
# The default home location used for guests.
GUEST_HOME = DEFAULT_HOME
# The start position used for guest characters.
GUEST_START_LOCATION = START_LOCATION
# The naming convention used for creating new guest
# accounts/characters. The size of this list also determines how many
# guests may be on the game at once. The default is a maximum of nine
# guests, named Guest1 through Guest9.
GUEST_LIST = ["Guest" + str(s + 1) for s in range(9)]
######################################################################
# In-game Channels created from server start
######################################################################
# The mudinfo channel is a read-only channel used by Evennia to replay status
# messages, connection info etc to staff. The superuser will automatically be
# subscribed to this channel. If set to None, the channel is disabled and
# status messages will only be logged (not recommended).
CHANNEL_MUDINFO = {
"key": "MudInfo",
"aliases": "",
"desc": "Connection log",
"locks": "control:perm(Developer);listen:perm(Admin);send:false()",
}
# Optional channel (same form as CHANNEL_MUDINFO) that will receive connection
# messages like ("<account> has (dis)connected"). While the MudInfo channel
# will also receieve this info, this channel is meant for non-staffers. If
# None, this information will only be logged.
CHANNEL_CONNECTINFO = None
# New accounts will auto-sub to the default channels given below (but they can
# unsub at any time). Traditionally, at least 'public' should exist. Entries
# will be (re)created on the next reload, but removing or updating a same-key
# channel from this list will NOT automatically change/remove it in the game,
# that needs to be done manually. Note: To create other, non-auto-subbed
# channels, create them manually in server/conf/at_initial_setup.py.
DEFAULT_CHANNELS = [
{
"key": "Public",
"aliases": ("pub",),
"desc": "Public discussion",
"locks": "control:perm(Admin);listen:all();send:all()",
"typeclass": BASE_CHANNEL_TYPECLASS,
}
]
######################################################################
# External Connections
######################################################################
# Note: You do *not* have to make your MUD open to
# the public to use the external connections, they
# operate as long as you have an internet connection,
# just like stand-alone chat clients.
# The Evennia Game Index is a dynamic listing of Evennia games. You can add your game
# to this list also if it is in closed pre-alpha development.
GAME_INDEX_ENABLED = False
# This dict
GAME_INDEX_LISTING = {
"game_name": "Mygame", # usually SERVERNAME
"game_status": "pre-alpha", # pre-alpha, alpha, beta or launched
"short_description": "", # could be GAME_SLOGAN
"long_description": "",
"listing_contact": "", # email
"telnet_hostname": "", # mygame.com
"telnet_port": "", # 1234
"game_website": "", # http://mygame.com
"web_client_url": "", # http://mygame.com/webclient
}
# Evennia can connect to external IRC channels and
# echo what is said on the channel to IRC and vice
# versa. Obs - make sure the IRC network allows bots.
# When enabled, command @irc2chan will be available in-game
# IRC requires that you have twisted.words installed.
IRC_ENABLED = False
# RSS allows to connect RSS feeds (from forum updates, blogs etc) to
# an in-game channel. The channel will be updated when the rss feed
# updates. Use @rss2chan in game to connect if this setting is
# active. OBS: RSS support requires the python-feedparser package to
# be installed (through package manager or from the website
# http://code.google.com/p/feedparser/)
RSS_ENABLED = False
RSS_UPDATE_INTERVAL = 60 * 10 # 10 minutes
# Grapevine (grapevine.haus) is a network for listing MUDs as well as allow
# users of said MUDs to communicate with each other on shared channels. To use,
# your game must first be registered by logging in and creating a game entry at
# https://grapevine.haus. Evennia links grapevine channels to in-game channels
# with the @grapevine2chan command, available once this flag is set
# Grapevine requires installing the pyopenssl library (pip install pyopenssl)
GRAPEVINE_ENABLED = False
# Grapevine channels to allow connection to. See https://grapevine.haus/chat
# for the available channels. Only channels in this list can be linked to in-game
# channels later.
GRAPEVINE_CHANNELS = ["gossip", "testing"]
# Grapevine authentication. Register your game at https://grapevine.haus to get
# them. These are secret and should thus be overridden in secret_settings file
GRAPEVINE_CLIENT_ID = ""
GRAPEVINE_CLIENT_SECRET = ""
######################################################################
# Django web features
######################################################################
# While DEBUG is False, show a regular server error page on the web
# stuff, email the traceback to the people in the ADMINS tuple
# below. If True, show a detailed traceback for the web
# browser to display. Note however that this will leak memory when
# active, so make sure to turn it off for a production server!
DEBUG = False
# Emails are sent to these people if the above DEBUG value is False. If you'd
# rather prefer nobody receives emails, leave this commented out or empty.
ADMINS = () # '<NAME>', '<EMAIL>'),)
# These guys get broken link notifications when SEND_BROKEN_LINK_EMAILS is True.
MANAGERS = ADMINS
# This is a public point of contact for players or the public to contact
# a staff member or administrator of the site. It is publicly posted.
STAFF_CONTACT_EMAIL = None
# If using Sites/Pages from the web admin, this value must be set to the
# database-id of the Site (domain) we want to use with this game's Pages.
SITE_ID = 1
# The age for sessions.
# Default: 1209600 (2 weeks, in seconds)
SESSION_COOKIE_AGE = 1209600
# Session cookie domain
# Default: None
SESSION_COOKIE_DOMAIN = None
# The name of the cookie to use for sessions.
# Default: 'sessionid'
SESSION_COOKIE_NAME = "sessionid"
# Should the session expire when the browser closes?
# Default: False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Where to find locales (no need to change this, most likely)
LOCALE_PATHS = [os.path.join(EVENNIA_DIR, "locale/")]
# How to display time stamps in e.g. the admin
SHORT_DATETIME_FORMAT = 'Y-m-d H:i:s.u'
DATETIME_FORMAT = 'Y-m-d H:i:s' # ISO 8601 but without T and timezone
# This should be turned off unless you want to do tests with Django's
# development webserver (normally Evennia runs its own server)
SERVE_MEDIA = False
# The master urlconf file that contains all of the sub-branches to the
# applications. Change this to add your own URLs to the website.
ROOT_URLCONF = "web.urls"
# Where users are redirected after logging in via contrib.auth.login.
LOGIN_REDIRECT_URL = "/"
# Where to redirect users when using the @login_required decorator.
LOGIN_URL = reverse_lazy("login")
# Where to redirect users who wish to logout.
LOGOUT_URL = reverse_lazy("logout")
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = "/media/"
# Absolute path to the directory that holds file uploads from web apps.
MEDIA_ROOT = os.path.join(GAME_DIR, "server", ".media")
# URL prefix for admin media -- CSS, JavaScript and images. Make sure
# to use a trailing slash. Admin-related files are searched under STATIC_URL/admin.
STATIC_URL = "/static/"
# Absolute path to directory where the static data will be gathered into to be
# served by webserver.
STATIC_ROOT = os.path.join(GAME_DIR, "server", ".static")
# Location of static data to overload the defaults from
# evennia/web/static.
STATICFILES_DIRS = [os.path.join(GAME_DIR, "web", "static")]
# Patterns of files in the static directories. Used here to make sure that
# its readme file is preserved but unused.
STATICFILES_IGNORE_PATTERNS = ["README.md"]
# The name of the currently selected web template. This corresponds to the
# directory names shown in the templates directory.
WEBSITE_TEMPLATE = "website"
WEBCLIENT_TEMPLATE = "webclient"
# We setup the location of the website template as well as the admin site.
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(GAME_DIR, "web", "templates"),
os.path.join(GAME_DIR, "web", "templates", WEBSITE_TEMPLATE),
os.path.join(GAME_DIR, "web", "templates", WEBCLIENT_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "templates"),
os.path.join(EVENNIA_DIR, "web", "templates", WEBSITE_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "templates", WEBCLIENT_TEMPLATE),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.i18n",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.media",
"django.template.context_processors.debug",
"django.contrib.messages.context_processors.messages",
"sekizai.context_processors.sekizai",
"evennia.web.utils.general_context.general_context",
],
# While true, show "pretty" error messages for template syntax errors.
"debug": DEBUG,
},
}
]
# Django cache settings
# https://docs.djangoproject.com/en/dev/topics/cache/#setting-up-the-cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'throttle': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'TIMEOUT': 60 * 5,
'OPTIONS': {
'MAX_ENTRIES': 2000
}
}
}
# MiddleWare are semi-transparent extensions to Django's functionality.
# see http://www.djangoproject.com/documentation/middleware/ for a more detailed
# explanation.
MIDDLEWARE = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.messages.middleware.MessageMiddleware", # 1.4?
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.admindocs.middleware.XViewMiddleware",
"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware",
"evennia.web.utils.middleware.SharedLoginMiddleware",
]
######################################################################
# Evennia components
######################################################################
# Global and Evennia-specific apps. This ties everything together so we can
# refer to app models and perform DB syncs.
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.admindocs",
"django.contrib.flatpages",
"django.contrib.sites",
"django.contrib.staticfiles",
"evennia.web.utils.adminsite.EvenniaAdminApp", # replaces django.contrib.admin
"django.contrib.messages",
"rest_framework",
"django_filters",
"sekizai",
"evennia.utils.idmapper",
"evennia.server",
"evennia.typeclasses",
"evennia.accounts",
"evennia.objects",
"evennia.comms",
"evennia.help",
"evennia.scripts",
"evennia.web",
]
# The user profile extends the User object with more functionality;
# This should usually not be changed.
AUTH_USER_MODEL = "accounts.AccountDB"
# Password validation plugins
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {"min_length": 8},
},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
{"NAME": "evennia.server.validators.EvenniaPasswordValidator"},
]
# Username validation plugins
AUTH_USERNAME_VALIDATORS = [
{"NAME": "django.contrib.auth.validators.ASCIIUsernameValidator"},
{"NAME": "django.core.validators.MinLengthValidator", "OPTIONS": {"limit_value": 3},},
{"NAME": "django.core.validators.MaxLengthValidator", "OPTIONS": {"limit_value": 30},},
{"NAME": "evennia.server.validators.EvenniaUsernameAvailabilityValidator"},
]
# Use a custom test runner that just tests Evennia-specific apps.
TEST_RUNNER = "evennia.server.tests.testrunner.EvenniaTestSuiteRunner"
# Messages and Bootstrap don't classify events the same way; this setting maps
# messages.error() to Bootstrap 'danger' classes.
MESSAGE_TAGS = {messages.ERROR: "danger"}
# Django REST Framework settings
REST_FRAMEWORK = {
# django_filters allows you to specify search fields for models in an API View
"DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",),
# whether to paginate results and how many per page
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 25,
# require logged in users to call API so that access checks can work on them
"DEFAULT_PERMISSION_CLASSES": ["rest_framework.permissions.IsAuthenticated",],
# These are the different ways people can authenticate for API requests - via
# session or with user/password. Other ways are possible, such as via tokens
# or oauth, but require additional dependencies.
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
# default permission checks used by the EvenniaPermission class
"DEFAULT_CREATE_PERMISSION": "builder",
"DEFAULT_LIST_PERMISSION": "builder",
"DEFAULT_VIEW_LOCKS": ["examine"],
"DEFAULT_DESTROY_LOCKS": ["delete"],
"DEFAULT_UPDATE_LOCKS": ["control", "edit"],
# No throttle class set by default. Setting one also requires a cache backend to be specified.
}
# To enable the REST api, turn this to True
REST_API_ENABLED = False
######################################################################
# Networking Replaceables
######################################################################
# This allows for replacing the very core of the infrastructure holding Evennia
# together with your own variations. You should usually never have to touch
# this, and if so, you really need to know what you are doing.
# The Base Session Class is used as a parent class for all Protocols such as
# Telnet and SSH.) Changing this could be really dangerous. It will cascade
# to tons of classes. You generally shouldn't need to touch protocols.
BASE_SESSION_CLASS = "evennia.server.session.Session"
# Telnet Protocol inherits from whatever above BASE_SESSION_CLASS is specified.
# It is used for all telnet connections, and is also inherited by the SSL Protocol
# (which is just TLS + Telnet).
TELNET_PROTOCOL_CLASS = "evennia.server.portal.telnet.TelnetProtocol"
SSL_PROTOCOL_CLASS = "evennia.server.portal.ssl.SSLProtocol"
# Websocket Client Protocol. This inherits from BASE_SESSION_CLASS. It is used
# for all webclient connections.
WEBSOCKET_PROTOCOL_CLASS = "evennia.server.portal.webclient.WebSocketClient"
# Protocol for the SSH interface. This inherits from BASE_SESSION_CLASS.
SSH_PROTOCOL_CLASS = "evennia.server.portal.ssh.SshProtocol"
# Server-side session class used. This will inherit from BASE_SESSION_CLASS.
# This one isn't as dangerous to replace.
SERVER_SESSION_CLASS = "evennia.server.serversession.ServerSession"
# The Server SessionHandler manages all ServerSessions, handling logins,
# ensuring the login process happens smoothly, handling expected and
# unexpected disconnects. You shouldn't need to touch it, but you can.
# Replace it to implement altered game logic.
SERVER_SESSION_HANDLER_CLASS = "evennia.server.sessionhandler.ServerSessionHandler"
# The Portal SessionHandler manages all incoming connections regardless of
# the protocol in use. It is responsible for keeping them going and informing
# the Server Session Handler of the connections and synchronizing them across the
# AMP connection. You shouldn't ever need to change this. But you can.
PORTAL_SESSION_HANDLER_CLASS = "evennia.server.portal.portalsessionhandler.PortalSessionHandler"
# These are members / properties / attributes kept on both Server and
# Portal Sessions. They are sync'd at various points, such as logins and
# reloads. If you add to this, you may need to adjust the class __init__
# so the additions have somewhere to go. These must be simple things that
# can be pickled - stuff you could serialize to JSON is best.
SESSION_SYNC_ATTRS = (
"protocol_key",
"address",
"suid",
"sessid",
"uid",
"csessid",
"uname",
"logged_in",
"puid",
"conn_time",
"cmd_last",
"cmd_last_visible",
"cmd_total",
"protocol_flags",
"server_data",
"cmdset_storage_string",
)
# The following are used for the communications between the Portal and Server.
# Very dragons territory.
AMP_SERVER_PROTOCOL_CLASS = "evennia.server.portal.amp_server.AMPServerProtocol"
AMP_CLIENT_PROTOCOL_CLASS = "evennia.server.amp_client.AMPServerClientProtocol"
# don't change this manually, it can be checked from code to know if
# being run from a unit test (set by the evennia.utils.test_resources.BaseEvenniaTest
# and BaseEvenniaTestCase unit testing parents)
_TEST_ENVIRONMENT = False
######################################################################
# Django extensions
######################################################################
# Django extesions are useful third-party tools that are not
# always included in the default django distro.
try:
import django_extensions # noqa
INSTALLED_APPS += ["django_extensions"]
except ImportError:
# Django extensions are not installed in all distros.
pass
#######################################################################
# SECRET_KEY
#######################################################################
# This is the signing key for the cookies generated by Evennia's
# web interface.
#
# It is a fallback for the SECRET_KEY setting in settings.py, which
# is randomly seeded when settings.py is first created. If copying
# from here, make sure to change it!
SECRET_KEY = "changeme!(*#&*($&*(#*(&SDFKJJKLS*(@#KJAS"
|
en
| 0.800471
|
Master configuration file for Evennia. NOTE: NO MODIFICATIONS SHOULD BE MADE TO THIS FILE! All settings changes should be done by copy-pasting the variable and its value to <gamedir>/server/conf/settings.py. Hint: Don't copy&paste over more from this file than you actually want to change. Anything you don't copy&paste will thus retain its default value - which may change as Evennia is developed. This way you can always be sure of what you have changed and what is default behaviour. ###################################################################### # Evennia base server config ###################################################################### # This is the name of your game. Make it catchy! # Short one-sentence blurb describing your game. Shown under the title # on the website and could be used in online listings of your game etc. # The url address to your server, like mymudgame.com. This should be the publicly # visible location. This is used e.g. on the web site to show how you connect to the # game over telnet. Default is localhost (only on your machine). # Lockdown mode will cut off the game from any external connections # and only allow connections from localhost. Requires a cold reboot. # Activate telnet service # A list of ports the Evennia telnet server listens on Can be one or many. # Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6. # Activate Telnet+SSL protocol (SecureSocketLibrary) for supporting clients # Ports to use for Telnet+SSL # Telnet+SSL Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6. # OOB (out-of-band) telnet communication allows Evennia to communicate # special commands and data with enabled Telnet clients. This is used # to create custom client interfaces over a telnet connection. To make # full use of OOB, you need to prepare functions to handle the data # server-side (see INPUT_FUNC_MODULES). TELNET_ENABLED is required for this # to work. # Activate SSH protocol communication (SecureShell) # Ports to use for SSH # Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6. # Start the evennia django+twisted webserver so you can # browse the evennia website and the admin interface # (Obs - further web configuration can be found below # in the section 'Config for Django web features') # This is a security setting protecting against host poisoning # attacks. It defaults to allowing all. In production, make # sure to change this to your actual host addresses/IPs. # The webserver sits behind a Portal proxy. This is a list # of tuples (proxyport,serverport) used. The proxyports are what # the Portal proxy presents to the world. The serverports are # the internal ports the proxy uses to forward data to the Server-side # webserver (these should not be publicly open) # Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6. # IP addresses that may talk to the server in a reverse proxy configuration, # like NginX. # The webserver uses threadpool for handling requests. This will scale # with server load. Set the minimum and maximum number of threads it # may use as (min, max) (must be > 0) # Start the evennia webclient. This requires the webserver to be running and # offers the fallback ajax-based webclient backbone for browsers not supporting # the websocket one. # Activate Websocket support for modern browsers. If this is on, the # default webclient will use this and only use the ajax version if the browser # is too old to support websockets. Requires WEBCLIENT_ENABLED. # Server-side websocket port to open for the webclient. Note that this value will # be dynamically encoded in the webclient html page to allow the webclient to call # home. If the external encoded value needs to be different than this, due to # working through a proxy or docker port-remapping, the environment variable # WEBCLIENT_CLIENT_PROXY_PORT can be used to override this port only for the # front-facing client's sake. # Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6. # Actual URL for webclient component to reach the websocket. You only need # to set this if you know you need it, like using some sort of proxy setup. # If given it must be on the form "ws[s]://hostname[:port]". If left at None, # the client will itself figure out this url based on the server's hostname. # e.g. ws://external.example.com or wss://external.example.com:443 # This determine's whether Evennia's custom admin page is used, or if the # standard Django admin is used. # The Server opens an AMP port so that the portal can # communicate with it. This is an internal functionality of Evennia, usually # operating between two processes on the same machine. You usually don't need to # change this unless you cannot use the default AMP port/host for # whatever reason. # Path to the lib directory containing the bulk of the codebase's code. # Path to the game directory (containing the server/conf/settings.py file) # This is dynamically created- there is generally no need to change this! # unittesting mode # Fallback location (will be replaced by the actual game dir at runtime) # Place to put log files, how often to rotate the log and how big each log file # may become before rotating. # The http log is usually only for debugging since it's very spammy # if this is set to the empty string, lockwarnings will be turned off. # Number of lines to append to rotating channel logs when they rotate # Max size (in bytes) of channel log files before they rotate # Unused by default, but used by e.g. the MapSystem contrib. A place for storing # semi-permanent data and avoid it being rebuilt over and over. It is created # on-demand only. # Local time zone for this installation. All choices can be found here: # http://www.postgresql.org/docs/8.0/interactive/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE # Activate time zone in datetimes # Authentication backends. This is the code used to authenticate a user. # Language code for this installation. All choices can be found here: # http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes # How long time (in seconds) a user may idle before being logged # out. This can be set as big as desired. A user may avoid being # thrown off by sending the empty system command 'idle' to the server # at regular intervals. Set <=0 to deactivate idle timeout completely. # The idle command can be sent to keep your session active without actually # having to spam normal commands regularly. It gives no feedback, only updates # the idle timer. Note that "idle" will *always* work, even if a different # command-name is given here; this is because the webclient needs a default # to send to avoid proxy timeouts. # The set of encodings tried. An Account object may set an attribute "encoding" on # itself to match the client used. If not set, or wrong encoding is # given, this list is tried, in order, aborting on the first match. # Add sets for languages/regions your accounts are likely to use. # (see http://en.wikipedia.org/wiki/Character_encoding) # Telnet default encoding, unless specified by the client, will be ENCODINGS[0]. # Regular expression applied to all output to a given session in order # to strip away characters (usually various forms of decorations) for the benefit # of users with screen readers. Note that ANSI/MXP doesn't need to # be stripped this way, that is handled automatically. # MXP support means the ability to show clickable links in the client. Clicking # the link will execute a game command. It's a way to add mouse input to the game. # If this is set, MXP can only be sent by the server and not added from the # client side. Disabling this is a potential security risk because it could # allow malevolent players to lure others to execute commands they did not # intend to. # Database objects are cached in what is known as the idmapper. The idmapper # caching results in a massive speedup of the server (since it dramatically # limits the number of database accesses needed) and also allows for # storing temporary data on objects. It is however also the main memory # consumer of Evennia. With this setting the cache can be capped and # flushed when it reaches a certain size. Minimum is 50 MB but it is # not recommended to set this to less than 100 MB for a distribution # system. # Empirically, N_objects_in_cache ~ ((RMEM - 35) / 0.0157): # mem(MB) | objs in cache || mem(MB) | objs in cache # 50 | ~1000 || 800 | ~49 000 # 100 | ~4000 || 1200 | ~75 000 # 200 | ~10 000 || 1600 | ~100 000 # 500 | ~30 000 || 2000 | ~125 000 # Note that the estimated memory usage is not exact (and the cap is only # checked every 5 minutes), so err on the side of caution if # running on a server with limited memory. Also note that Python # will not necessarily return the memory to the OS when the idmapper # flashes (the memory will be freed and made available to the Python # process only). How many objects need to be in memory at any given # time depends very much on your game so some experimentation may # be necessary (use @server to see how many objects are in the idmapper # cache at any time). Setting this to None disables the cache cap. # (MB) # This determines how many connections per second the Portal should # accept, as a DoS countermeasure. If the rate exceeds this number, incoming # connections will be queued to this rate, so none will be lost. # Must be set to a value > 0. # Determine how many commands per second a given Session is allowed # to send to the Portal via a connected protocol. Too high rate will # drop the command and echo a warning. Note that this will also cap # OOB messages so don't set it too low if you expect a lot of events # from the client! To turn the limiter off, set to <= 0. # The warning to echo back to users if they send commands too fast # custom, extra commands to add to the `evennia` launcher. This is a dict # of {'cmdname': 'path.to.callable', ...}, where the callable will be passed # any extra args given on the command line. For example `evennia cmdname foo bar`. # Determine how large of a string can be sent to the server in number # of characters. If they attempt to enter a string over this character # limit, we stop them and send a message. To make unlimited, set to # 0 or less. # The warning to echo back to users if they enter a very large string # If this is true, errors and tracebacks from the engine will be # echoed as text in-game as well as to the log. This can speed up # debugging. OBS: Showing full tracebacks to regular users could be a # security problem -turn this off in a production game! # Broadcast "Server restart"-like messages to all sessions. ###################################################################### # Evennia Database config ###################################################################### # Database config syntax: # ENGINE - path to the the database backend. Possible choices are: # 'django.db.backends.sqlite3', (default) # 'django.db.backends.mysql', # 'django.db.backends.postgresql', # 'django.db.backends.oracle' (untested). # NAME - database name, or path to the db file for sqlite3 # USER - db admin (unused in sqlite3) # PASSWORD - <PASSWORD> (unused in sqlite3) # HOST - empty string is localhost (unused in sqlite3) # PORT - empty string defaults to localhost (unused in sqlite3) # How long the django-database connection should be kept open, in seconds. # If you get errors about the database having gone away after long idle # periods, shorten this value (e.g. MySQL defaults to a timeout of 8 hrs) # When removing or renaming models, such models stored in Attributes may # become orphaned and will return as None. If the change is a rename (that # is, there is a 1:1 pk mapping between the old and the new), the unserializer # can convert old to new when retrieving them. This is a list of tuples # (old_natural_key, new_natural_key). Note that Django ContentTypes' # natural_keys are themselves tuples (appname, modelname). Creation-dates will # not be checked for models specified here. If new_natural_key does not exist, # `None` will be returned and stored back as if no replacement was set. # Default type of autofield (required by Django) ###################################################################### # Evennia webclient options ###################################################################### # default webclient options (without user changing it) # Gags prompts in output window and puts them on the input bar # Shows help files in a new popup window instead of in-pane # Shows notifications of new messages as popup windows # Plays a sound for notifications of new messages ###################################################################### # Evennia pluggable modules ###################################################################### # Plugin modules extend Evennia in various ways. In the cases with no # existing default, there are examples of many of these modules # in contrib/examples. # The command parser module to use. See the default module for which # functions it must implement # On a multi-match when search objects or commands, the user has the # ability to search again with an index marker that differentiates # the results. If multiple "box" objects # are found, they can by default be separated as 1-box, 2-box. Below you # can change the regular expression used. The regex must have one # have two capturing groups (?P<number>...) and (?P<name>...) - the default # parser expects this. It should also involve a number starting from 1. # When changing this you must also update SEARCH_MULTIMATCH_TEMPLATE # to properly describe the syntax. # To display multimatch errors in various listings we must display # the syntax in a way that matches what SEARCH_MULTIMATCH_REGEX understand. # The template will be populated with data and expects the following markup: # {number} - the order of the multimatch, starting from 1; {name} - the # name (key) of the multimatched entity; {aliases} - eventual # aliases for the entity; {info} - extra info like #dbrefs for staff. Don't # forget a line break if you want one match per line. # The handler that outputs errors when using any API-level search # (not manager methods). This function should correctly report errors # both for command- and object-searches. This allows full control # over the error output (it uses SEARCH_MULTIMATCH_TEMPLATE by default). # Single characters to ignore at the beginning of a command. When set, e.g. # cmd, @cmd and +cmd will all find a command "cmd" or one named "@cmd" etc. If # you have defined two different commands cmd and @cmd you can still enter # @cmd to exactly target the second one. Single-character commands consisting # of only a prefix character will not be stripped. Set to the empty # string ("") to turn off prefix ignore. # The module holding text strings for the connection screen. # This module should contain one or more variables # with strings defining the look of the screen. # Delay to use before sending the evennia.syscmdkeys.CMD_LOGINSTART Command # when a new session connects (this defaults the unloggedin-look for showing # the connection screen). The delay is useful mainly for telnet, to allow # client/server to establish client capabilities like color/mxp etc before # sending any text. A value of 0.3 should be enough. While a good idea, it may # cause issues with menu-logins and autoconnects since the menu will not have # started when the autoconnects starts sending menu commands. # A module that must exist - this holds the instructions Evennia will use to # first prepare the database for use (create user #1 and Limbo etc). Only override if # you really know what # you are doing. If replacing, it must contain a function # handle_setup(stepname=None). The function will start being called with no argument # and is expected to maintain a named sequence of steps. Once each step is completed, it # should be saved with ServerConfig.objects.conf('last_initial_setup_step', stepname) # on a crash, the system will continue by calling handle_setup with the last completed # step. The last step in the sequence must be named 'done'. Once this key is saved, # initialization will not run again. # An optional module that, if existing, must hold a function # named at_initial_setup(). This hook method can be used to customize # the server's initial setup sequence (the very first startup of the system). # The check will fail quietly if module doesn't exist or fails to load. # Module containing your custom at_server_start(), at_server_reload() and # at_server_stop() methods. These methods will be called every time # the server starts, reloads and resets/stops respectively. # List of one or more module paths to modules containing a function start_ # plugin_services(application). This module will be called with the main # Evennia Server application when the Server is initiated. # It will be called last in the startup sequence. # List of one or more module paths to modules containing a function # start_plugin_services(application). This module will be called with the # main Evennia Portal application when the Portal is initiated. # It will be called last in the startup sequence. # Module holding MSSP meta data. This is used by MUD-crawlers to determine # what type of game you are running, how many accounts you have etc. # Module for web plugins. # Tuple of modules implementing lock functions. All callable functions # inside these modules will be available as lock functions. # Module holding handlers for managing incoming data from the client. These # will be loaded in order, meaning functions in later modules may overload # previous ones if having the same name. # Modules that contain prototypes for use with the spawner mechanism. # Modules containining Prototype functions able to be embedded in prototype # definitions from in-game. # Module holding settings/actions for the dummyrunner program (see the # dummyrunner for more information) # Mapping to extend Evennia's normal ANSI color tags. The mapping is a list of # tuples mapping the exact tag (not a regex!) to the ANSI convertion, like # `(r"%c%r", ansi.ANSI_RED)` (the evennia.utils.ansi module contains all # ANSI escape sequences). Default is to use `|` and `|[` -prefixes. # Extend the available regexes for adding XTERM256 colors in-game. This is given # as a list of regexes, where each regex must contain three anonymous groups for # holding integers 0-5 for the red, green and blue components Default is # is r'\|([0-5])([0-5])([0-5])', which allows e.g. |500 for red. # XTERM256 foreground color replacement # XTERM256 background color replacement. Default is \|\[([0-5])([0-5])([0-5])' # Extend the available regexes for adding XTERM256 grayscale values in-game. Given # as a list of regexes, where each regex must contain one anonymous group containing # a single letter a-z to mark the level from white to black. Default is r'\|=([a-z])', # which allows e.g. |=k for a medium gray. # XTERM256 grayscale foreground # XTERM256 grayscale background. Default is \|\[=([a-z])' # ANSI does not support bright backgrounds, so Evennia fakes this by mapping it to # XTERM256 backgrounds where supported. This is a list of tuples that maps the wanted # ansi tag (not a regex!) to a valid XTERM256 background tag, such as `(r'{[r', r'{[500')`. # If set True, the above color settings *replace* the default |-style color markdown # rather than extend it. ###################################################################### # Default command sets and commands ###################################################################### # Command set used on session before account has logged in # (Note that changing these three following cmdset paths will only affect NEW # created characters/objects, not those already in play. So if you want to # change this and have it apply to every object, it's recommended you do it # before having created a lot of objects (or simply reset the database after # the change for simplicity)). # Command set used on the logged-in session # Default set for logged in account with characters (fallback) # Command set for accounts without a character (ooc) # Location to search for cmdsets if full path not given # Fallbacks for cmdset paths that fail to load. Note that if you change the path for your # default cmdsets, you will also need to copy CMDSET_FALLBACKS after your change in your # settings file for it to detect the change. # Parent class for all default commands. Changing this class will # modify all default commands, so do so carefully. # Command.arg_regex is a regular expression desribing how the arguments # to the command must be structured for the command to match a given user # input. By default the command-name should end with a space or / (since the # default commands uses MuxCommand and /switches). # By default, Command.msg will only send data to the Session calling # the Command in the first place. If set, Command.msg will instead return # data to all Sessions connected to the Account/Character associated with # calling the Command. This may be more intuitive for users in certain # multisession modes. # The default lockstring of a command. ###################################################################### # Typeclasses and other paths ###################################################################### # These are paths that will be prefixed to the paths given if the # immediately entered path fail to find a typeclass. It allows for # shorter input strings. They must either base off the game directory # or start from the evennia library. # Typeclass for account objects (linked to a character) (fallback) # Typeclass and base for all objects (fallback) # Typeclass for character objects linked to an account (fallback) # Typeclass for rooms (fallback) # Typeclass for Exit objects (fallback). # Typeclass for Channel (fallback). # Typeclass for Scripts (fallback). You usually don't need to change this # but create custom variations of scripts on a per-case basis instead. # The default home location used for all objects. This is used as a # fallback if an object's normal home location is deleted. Default # is Limbo (#2). # The start position for new characters. Default is Limbo (#2). # MULTISESSION_MODE = 0, 1 - used by default unloggedin create command # MULTISESSION_MODE = 2, 3 - used by default character_create command # Lookups of Attributes, Tags, Nicks, Aliases can be aggressively # cached to avoid repeated database hits. This often gives noticeable # performance gains since they are called so often. Drawback is that # if you are accessing the database from multiple processes (such as # from a website -not- running Evennia's own webserver) data may go # out of sync between the processes. Keep on unless you face such # issues. # These are fallbacks for BASE typeclasses failing to load. Usually needed only # during doc building. The system expects these to *always* load correctly, so # only modify if you are making fundamental changes to how objects/accounts # work and know what you are doing ###################################################################### # Options and validators ###################################################################### # Options available on Accounts. Each such option is described by a # class available from evennia.OPTION_CLASSES, in turn making use # of validators from evennia.VALIDATOR_FUNCS to validate input when # the user changes an option. The options are accessed through the # `Account.options` handler. # ("Description", 'Option Class name in evennia.OPTION_CLASS_MODULES', 'Default Value') # Modules holding Option classes, responsible for serializing the option and # calling validator functions on it. Same-named functions in modules added # later in this list will override those added earlier. # Module holding validator functions. These are used as a resource for # validating options, but can also be used as input validators in general. # Same-named functions in modules added later in this list will override those # added earlier. ###################################################################### # Batch processors ###################################################################### # Python path to a directory to be searched for batch scripts # for the batch processors (.ev and/or .py files). ###################################################################### # Game Time setup ###################################################################### # You don't actually have to use this, but it affects the routines in # evennia.utils.gametime.py and allows for a convenient measure to # determine the current in-game time. You can of course interpret # "week", "month" etc as your own in-game time units as desired. # The time factor dictates if the game world runs faster (timefactor>1) # or slower (timefactor<1) than the real world. # The starting point of your game time (the epoch), in seconds. # In Python a value of 0 means Jan 1 1970 (use negatives for earlier # start date). This will affect the returns from the utils.gametime # module. If None, the server's first start-time is used as the epoch. # Normally, game time will only increase when the server runs. If this is True, # game time will not pause when the server reloads or goes offline. This setting # together with a time factor of 1 should keep the game in sync with # the real time (add a different epoch to shift time) ###################################################################### # Help system ###################################################################### # Help output from CmdHelp are wrapped in an EvMore call # (excluding webclient with separate help popups). If continuous scroll # is preferred, change 'HELP_MORE' to False. EvMORE uses CLIENT_DEFAULT_HEIGHT # The help category of a command if not specified. # The help category of a db or file-based help entry if not specified # File-based help entries. These are modules containing dicts defining help # entries. They can be used together with in-database entries created in-game. # if topics listed in help should be clickable # clickable links only work on clients that support MXP. ###################################################################### # FuncParser # # Strings parsed with the FuncParser can contain 'callables' on the # form $funcname(args,kwargs), which will lead to actual Python functions # being executed. ###################################################################### # This changes the start-symbol for the funcparser callable. Note that # this will make a lot of documentation invalid and there may also be # other unexpected side effects, so change with caution. # The symbol to use to escape Func # This is the global max nesting-level for nesting functions in # the funcparser. This protects against infinite loops. # Activate funcparser for all outgoing strings. The current Session # will be passed into the parser (used to be called inlinefuncs) # Only functions defined globally (and not starting with '_') in # these modules will be considered valid inlinefuncs. The list # is loaded from left-to-right, same-named functions will overload # Prototype values are also parsed with FuncParser. These modules # define which $func callables are available to use in prototypes. ###################################################################### # Global Scripts ###################################################################### # Global scripts started here will be available through # 'evennia.GLOBAL_SCRIPTS.key'. The scripts will survive a reload and be # recreated automatically if deleted. Each entry must have the script keys, # whereas all other fields in the specification are optional. If 'typeclass' is # not given, BASE_SCRIPT_TYPECLASS will be assumed. Note that if you change # typeclass for the same key, a new Script will replace the old one on # `evennia.GLOBAL_SCRIPTS`. # 'key': {'typeclass': 'typeclass.path.here', # 'repeats': -1, 'interval': 50, 'desc': 'Example script'}, ###################################################################### # Default Account setup and access ###################################################################### # Different Multisession modes allow a player (=account) to connect to the # game simultaneously with multiple clients (=sessions). In modes 0,1 there is # only one character created to the same name as the account at first login. # In modes 2,3 no default character will be created and the MAX_NR_CHARACTERS # value (below) defines how many characters the default char_create command # allow per account. # 0 - single session, one account, one character, when a new session is # connected, the old one is disconnected # 1 - multiple sessions, one account, one character, each session getting # the same data # 2 - multiple sessions, one account, many characters, one session per # character (disconnects multiplets) # 3 - like mode 2, except multiple sessions can puppet one character, each # session getting the same data. # The maximum number of characters allowed by the default ooc char-creation command # The access hierarchy, in climbing order. A higher permission in the # hierarchy includes access of all levels below it. Used by the perm()/pperm() # lock functions, which accepts both plural and singular (Admin & Admins) # note-only used if GUEST_ENABLED=True # The default permission given to all new accounts # Default sizes for client window (in number of characters), if client # is not supplying this on its own # telnet standard height is 24; does anyone use such low-res displays anymore? # Set rate limits per-IP on account creations and login attempts. Set limits # to None to disable. # Certain characters, like html tags, line breaks and tabs are stripped # from user input for commands using the `evennia.utils.strip_unsafe_input` helper # since they can be exploitative. This list defines Account-level permissions # (and higher) that bypass this stripping. It is used as a fallback if a # specific list of perms are not given to the helper function. ###################################################################### # Guest accounts ###################################################################### # This enables guest logins, by default via "connect guest". Note that # you need to edit your login screen to inform about this possibility. # Typeclass for guest account objects (linked to a character) # The permission given to guests # The default home location used for guests. # The start position used for guest characters. # The naming convention used for creating new guest # accounts/characters. The size of this list also determines how many # guests may be on the game at once. The default is a maximum of nine # guests, named Guest1 through Guest9. ###################################################################### # In-game Channels created from server start ###################################################################### # The mudinfo channel is a read-only channel used by Evennia to replay status # messages, connection info etc to staff. The superuser will automatically be # subscribed to this channel. If set to None, the channel is disabled and # status messages will only be logged (not recommended). # Optional channel (same form as CHANNEL_MUDINFO) that will receive connection # messages like ("<account> has (dis)connected"). While the MudInfo channel # will also receieve this info, this channel is meant for non-staffers. If # None, this information will only be logged. # New accounts will auto-sub to the default channels given below (but they can # unsub at any time). Traditionally, at least 'public' should exist. Entries # will be (re)created on the next reload, but removing or updating a same-key # channel from this list will NOT automatically change/remove it in the game, # that needs to be done manually. Note: To create other, non-auto-subbed # channels, create them manually in server/conf/at_initial_setup.py. ###################################################################### # External Connections ###################################################################### # Note: You do *not* have to make your MUD open to # the public to use the external connections, they # operate as long as you have an internet connection, # just like stand-alone chat clients. # The Evennia Game Index is a dynamic listing of Evennia games. You can add your game # to this list also if it is in closed pre-alpha development. # This dict # usually SERVERNAME # pre-alpha, alpha, beta or launched # could be GAME_SLOGAN # email # mygame.com # 1234 # http://mygame.com # http://mygame.com/webclient # Evennia can connect to external IRC channels and # echo what is said on the channel to IRC and vice # versa. Obs - make sure the IRC network allows bots. # When enabled, command @irc2chan will be available in-game # IRC requires that you have twisted.words installed. # RSS allows to connect RSS feeds (from forum updates, blogs etc) to # an in-game channel. The channel will be updated when the rss feed # updates. Use @rss2chan in game to connect if this setting is # active. OBS: RSS support requires the python-feedparser package to # be installed (through package manager or from the website # http://code.google.com/p/feedparser/) # 10 minutes # Grapevine (grapevine.haus) is a network for listing MUDs as well as allow # users of said MUDs to communicate with each other on shared channels. To use, # your game must first be registered by logging in and creating a game entry at # https://grapevine.haus. Evennia links grapevine channels to in-game channels # with the @grapevine2chan command, available once this flag is set # Grapevine requires installing the pyopenssl library (pip install pyopenssl) # Grapevine channels to allow connection to. See https://grapevine.haus/chat # for the available channels. Only channels in this list can be linked to in-game # channels later. # Grapevine authentication. Register your game at https://grapevine.haus to get # them. These are secret and should thus be overridden in secret_settings file ###################################################################### # Django web features ###################################################################### # While DEBUG is False, show a regular server error page on the web # stuff, email the traceback to the people in the ADMINS tuple # below. If True, show a detailed traceback for the web # browser to display. Note however that this will leak memory when # active, so make sure to turn it off for a production server! # Emails are sent to these people if the above DEBUG value is False. If you'd # rather prefer nobody receives emails, leave this commented out or empty. # '<NAME>', '<EMAIL>'),) # These guys get broken link notifications when SEND_BROKEN_LINK_EMAILS is True. # This is a public point of contact for players or the public to contact # a staff member or administrator of the site. It is publicly posted. # If using Sites/Pages from the web admin, this value must be set to the # database-id of the Site (domain) we want to use with this game's Pages. # The age for sessions. # Default: 1209600 (2 weeks, in seconds) # Session cookie domain # Default: None # The name of the cookie to use for sessions. # Default: 'sessionid' # Should the session expire when the browser closes? # Default: False # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. # Where to find locales (no need to change this, most likely) # How to display time stamps in e.g. the admin # ISO 8601 but without T and timezone # This should be turned off unless you want to do tests with Django's # development webserver (normally Evennia runs its own server) # The master urlconf file that contains all of the sub-branches to the # applications. Change this to add your own URLs to the website. # Where users are redirected after logging in via contrib.auth.login. # Where to redirect users when using the @login_required decorator. # Where to redirect users who wish to logout. # URL that handles the media served from MEDIA_ROOT. # Example: "http://media.lawrence.com" # Absolute path to the directory that holds file uploads from web apps. # URL prefix for admin media -- CSS, JavaScript and images. Make sure # to use a trailing slash. Admin-related files are searched under STATIC_URL/admin. # Absolute path to directory where the static data will be gathered into to be # served by webserver. # Location of static data to overload the defaults from # evennia/web/static. # Patterns of files in the static directories. Used here to make sure that # its readme file is preserved but unused. # The name of the currently selected web template. This corresponds to the # directory names shown in the templates directory. # We setup the location of the website template as well as the admin site. # While true, show "pretty" error messages for template syntax errors. # Django cache settings # https://docs.djangoproject.com/en/dev/topics/cache/#setting-up-the-cache # MiddleWare are semi-transparent extensions to Django's functionality. # see http://www.djangoproject.com/documentation/middleware/ for a more detailed # explanation. # 1.4? ###################################################################### # Evennia components ###################################################################### # Global and Evennia-specific apps. This ties everything together so we can # refer to app models and perform DB syncs. # replaces django.contrib.admin # The user profile extends the User object with more functionality; # This should usually not be changed. # Password validation plugins # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators # Username validation plugins # Use a custom test runner that just tests Evennia-specific apps. # Messages and Bootstrap don't classify events the same way; this setting maps # messages.error() to Bootstrap 'danger' classes. # Django REST Framework settings # django_filters allows you to specify search fields for models in an API View # whether to paginate results and how many per page # require logged in users to call API so that access checks can work on them # These are the different ways people can authenticate for API requests - via # session or with user/password. Other ways are possible, such as via tokens # or oauth, but require additional dependencies. # default permission checks used by the EvenniaPermission class # No throttle class set by default. Setting one also requires a cache backend to be specified. # To enable the REST api, turn this to True ###################################################################### # Networking Replaceables ###################################################################### # This allows for replacing the very core of the infrastructure holding Evennia # together with your own variations. You should usually never have to touch # this, and if so, you really need to know what you are doing. # The Base Session Class is used as a parent class for all Protocols such as # Telnet and SSH.) Changing this could be really dangerous. It will cascade # to tons of classes. You generally shouldn't need to touch protocols. # Telnet Protocol inherits from whatever above BASE_SESSION_CLASS is specified. # It is used for all telnet connections, and is also inherited by the SSL Protocol # (which is just TLS + Telnet). # Websocket Client Protocol. This inherits from BASE_SESSION_CLASS. It is used # for all webclient connections. # Protocol for the SSH interface. This inherits from BASE_SESSION_CLASS. # Server-side session class used. This will inherit from BASE_SESSION_CLASS. # This one isn't as dangerous to replace. # The Server SessionHandler manages all ServerSessions, handling logins, # ensuring the login process happens smoothly, handling expected and # unexpected disconnects. You shouldn't need to touch it, but you can. # Replace it to implement altered game logic. # The Portal SessionHandler manages all incoming connections regardless of # the protocol in use. It is responsible for keeping them going and informing # the Server Session Handler of the connections and synchronizing them across the # AMP connection. You shouldn't ever need to change this. But you can. # These are members / properties / attributes kept on both Server and # Portal Sessions. They are sync'd at various points, such as logins and # reloads. If you add to this, you may need to adjust the class __init__ # so the additions have somewhere to go. These must be simple things that # can be pickled - stuff you could serialize to JSON is best. # The following are used for the communications between the Portal and Server. # Very dragons territory. # don't change this manually, it can be checked from code to know if # being run from a unit test (set by the evennia.utils.test_resources.BaseEvenniaTest # and BaseEvenniaTestCase unit testing parents) ###################################################################### # Django extensions ###################################################################### # Django extesions are useful third-party tools that are not # always included in the default django distro. # noqa # Django extensions are not installed in all distros. ####################################################################### # SECRET_KEY ####################################################################### # This is the signing key for the cookies generated by Evennia's # web interface. # # It is a fallback for the SECRET_KEY setting in settings.py, which # is randomly seeded when settings.py is first created. If copying # from here, make sure to change it! #&*($&*(#*(&SDFKJJKLS*(@#KJAS"
| 1.943794
| 2
|
openmlpimp/utils/misc.py
|
kw-corne/openml-pimp
| 12
|
6628791
|
import arff
import openml
import openmlpimp
import sklearn
from time import gmtime, strftime
def get_time():
return strftime("[%Y-%m-%d %H:%M:%S]", gmtime())
def fixed_parameters_to_suffix(fixed_parameters):
if fixed_parameters is not None and len(fixed_parameters) > 0:
save_folder_suffix = [param + '_' + str(fixed_parameters[param]) for param in sorted(fixed_parameters)]
save_folder_suffix = '/' + '__'.join(save_folder_suffix)
else:
save_folder_suffix = '/vanilla'
return save_folder_suffix
def do_run(task, optimizer, output_dir, internet_access=True, publish=False):
if internet_access:
run = openml.runs.run_model_on_task(task, optimizer)
score = run.get_metric_fn(sklearn.metrics.accuracy_score)
print('%s [SCORE] Data: %s; Accuracy: %0.2f' % (openmlpimp.utils.get_time(), task.get_dataset().name, score.mean()))
if publish:
run = run.publish()
run_xml = run._create_description_xml()
predictions_arff = arff.dumps(run._generate_arff_dict())
with open(output_dir + '/run.xml', 'w') as f:
f.write(run_xml)
with open(output_dir + '/predictions.arff', 'w') as f:
f.write(predictions_arff)
if run.trace_content is not None:
trace_arff = arff.dumps(run._generate_trace_arff_dict())
with open(output_dir + '/trace.arff', 'w') as f:
f.write(trace_arff)
return run
else:
res = openml.runs.functions._run_task_get_arffcontent(optimizer, task, task.class_labels)
run = openml.runs.OpenMLRun(task_id=task.task_id, dataset_id=None, flow_id=None, model=optimizer)
run.data_content, run.trace_content, run.trace_attributes, run.fold_evaluations, _ = res
score = run.get_metric_fn(sklearn.metrics.accuracy_score)
print('%s [SCORE] Data: %s; Accuracy: %0.2f' % (
openmlpimp.utils.get_time(), task.get_dataset().name, score.mean()))
if run.trace_content is not None:
trace_arff = arff.dumps(run._generate_trace_arff_dict())
with open(output_dir + '/trace.arff', 'w') as f:
f.write(trace_arff)
predictions_arff = arff.dumps(run._generate_arff_dict())
with open(output_dir + '/predictions.arff', 'w') as f:
f.write(predictions_arff)
return run
def name_mapping(classifier, name, replace_underscores=True):
splitted = name.split('__')
if len(splitted) > 1:
relevant = splitted[1]
else:
relevant = name
if name == 'imputation__strategy' or name == 'strategy':
return 'imputation'
if classifier == 'adaboost':
if relevant == 'n_estimators':
return 'iterations'
if len(splitted) == 3 and splitted[2] == 'max_depth':
if replace_underscores:
return 'max. depth'
else:
return 'max._depth'
elif classifier == 'libsvm_svc':
if relevant == 'C':
return 'complexity'
elif relevant == 'tol':
return 'tolerance'
# elif splitted[1] == 'coef0':
# return 'tolerance'
parts = relevant.split('_')
for idx in range(len(parts)):
if parts[idx] == 'max':
parts[idx] = 'max.'
elif parts[idx] == 'min':
parts[idx] = 'min.'
if replace_underscores:
return ' '.join(parts)
else:
return '_'.join(parts)
|
import arff
import openml
import openmlpimp
import sklearn
from time import gmtime, strftime
def get_time():
return strftime("[%Y-%m-%d %H:%M:%S]", gmtime())
def fixed_parameters_to_suffix(fixed_parameters):
if fixed_parameters is not None and len(fixed_parameters) > 0:
save_folder_suffix = [param + '_' + str(fixed_parameters[param]) for param in sorted(fixed_parameters)]
save_folder_suffix = '/' + '__'.join(save_folder_suffix)
else:
save_folder_suffix = '/vanilla'
return save_folder_suffix
def do_run(task, optimizer, output_dir, internet_access=True, publish=False):
if internet_access:
run = openml.runs.run_model_on_task(task, optimizer)
score = run.get_metric_fn(sklearn.metrics.accuracy_score)
print('%s [SCORE] Data: %s; Accuracy: %0.2f' % (openmlpimp.utils.get_time(), task.get_dataset().name, score.mean()))
if publish:
run = run.publish()
run_xml = run._create_description_xml()
predictions_arff = arff.dumps(run._generate_arff_dict())
with open(output_dir + '/run.xml', 'w') as f:
f.write(run_xml)
with open(output_dir + '/predictions.arff', 'w') as f:
f.write(predictions_arff)
if run.trace_content is not None:
trace_arff = arff.dumps(run._generate_trace_arff_dict())
with open(output_dir + '/trace.arff', 'w') as f:
f.write(trace_arff)
return run
else:
res = openml.runs.functions._run_task_get_arffcontent(optimizer, task, task.class_labels)
run = openml.runs.OpenMLRun(task_id=task.task_id, dataset_id=None, flow_id=None, model=optimizer)
run.data_content, run.trace_content, run.trace_attributes, run.fold_evaluations, _ = res
score = run.get_metric_fn(sklearn.metrics.accuracy_score)
print('%s [SCORE] Data: %s; Accuracy: %0.2f' % (
openmlpimp.utils.get_time(), task.get_dataset().name, score.mean()))
if run.trace_content is not None:
trace_arff = arff.dumps(run._generate_trace_arff_dict())
with open(output_dir + '/trace.arff', 'w') as f:
f.write(trace_arff)
predictions_arff = arff.dumps(run._generate_arff_dict())
with open(output_dir + '/predictions.arff', 'w') as f:
f.write(predictions_arff)
return run
def name_mapping(classifier, name, replace_underscores=True):
splitted = name.split('__')
if len(splitted) > 1:
relevant = splitted[1]
else:
relevant = name
if name == 'imputation__strategy' or name == 'strategy':
return 'imputation'
if classifier == 'adaboost':
if relevant == 'n_estimators':
return 'iterations'
if len(splitted) == 3 and splitted[2] == 'max_depth':
if replace_underscores:
return 'max. depth'
else:
return 'max._depth'
elif classifier == 'libsvm_svc':
if relevant == 'C':
return 'complexity'
elif relevant == 'tol':
return 'tolerance'
# elif splitted[1] == 'coef0':
# return 'tolerance'
parts = relevant.split('_')
for idx in range(len(parts)):
if parts[idx] == 'max':
parts[idx] = 'max.'
elif parts[idx] == 'min':
parts[idx] = 'min.'
if replace_underscores:
return ' '.join(parts)
else:
return '_'.join(parts)
|
en
| 0.176427
|
# elif splitted[1] == 'coef0': # return 'tolerance'
| 2.336396
| 2
|
databricks/koalas/ml.py
|
garawalid/spark-pandas
| 2
|
6628792
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.stat import Correlation
def corr(kdf, method='pearson'):
"""
The correlation matrix of all the numerical columns of this dataframe.
Only accepts scalar numerical values for now.
:param kdf: the koalas dataframe.
:param method: {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
:return: :class:`pandas.DataFrame`
"""
assert method in ('pearson', 'spearman'), method
ndf, fields = to_numeric_df(kdf)
corr = Correlation.corr(ndf, "_1", method)
pcorr = corr.toPandas()
arr = pcorr.iloc[0, 0].toArray()
arr = pd.DataFrame(arr)
arr.columns = fields
arr = arr.set_index(pd.Index(fields))
return arr
def to_numeric_df(kdf):
"""
Takes a dataframe and turns it into a dataframe containing a single numerical
vector of doubles. This dataframe has a single field called '_1'.
TODO: index is not preserved currently
:param df:
:return: a pair of dataframe, list of strings (the name of the columns
that were converted to numerical types)
"""
# TODO, it should be more robust.
accepted_types = {np.dtype(dt) for dt in [np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64, np.bool_]}
numeric_fields = [fname for fname in kdf._metadata.column_fields
if kdf[fname].dtype in accepted_types]
numeric_df = kdf._sdf.select(*numeric_fields)
va = VectorAssembler(inputCols=numeric_fields, outputCol="_1")
v = va.transform(numeric_df).select("_1")
return v, numeric_fields
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.stat import Correlation
def corr(kdf, method='pearson'):
"""
The correlation matrix of all the numerical columns of this dataframe.
Only accepts scalar numerical values for now.
:param kdf: the koalas dataframe.
:param method: {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
:return: :class:`pandas.DataFrame`
"""
assert method in ('pearson', 'spearman'), method
ndf, fields = to_numeric_df(kdf)
corr = Correlation.corr(ndf, "_1", method)
pcorr = corr.toPandas()
arr = pcorr.iloc[0, 0].toArray()
arr = pd.DataFrame(arr)
arr.columns = fields
arr = arr.set_index(pd.Index(fields))
return arr
def to_numeric_df(kdf):
"""
Takes a dataframe and turns it into a dataframe containing a single numerical
vector of doubles. This dataframe has a single field called '_1'.
TODO: index is not preserved currently
:param df:
:return: a pair of dataframe, list of strings (the name of the columns
that were converted to numerical types)
"""
# TODO, it should be more robust.
accepted_types = {np.dtype(dt) for dt in [np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64, np.bool_]}
numeric_fields = [fname for fname in kdf._metadata.column_fields
if kdf[fname].dtype in accepted_types]
numeric_df = kdf._sdf.select(*numeric_fields)
va = VectorAssembler(inputCols=numeric_fields, outputCol="_1")
v = va.transform(numeric_df).select("_1")
return v, numeric_fields
|
en
| 0.807508
|
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The correlation matrix of all the numerical columns of this dataframe. Only accepts scalar numerical values for now. :param kdf: the koalas dataframe. :param method: {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation :return: :class:`pandas.DataFrame` Takes a dataframe and turns it into a dataframe containing a single numerical vector of doubles. This dataframe has a single field called '_1'. TODO: index is not preserved currently :param df: :return: a pair of dataframe, list of strings (the name of the columns that were converted to numerical types) # TODO, it should be more robust.
| 2.792186
| 3
|
python/pyspark/sql/pandas/functions.py
|
melin/spark
| 2
|
6628793
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import warnings
from inspect import getfullargspec
from pyspark.rdd import PythonEvalType
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
from pyspark.sql.types import DataType
from pyspark.sql.udf import _create_udf
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
SCALAR_ITER = PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a pandas user defined function (a.k.a. vectorized user defined function).
Pandas UDFs are user defined functions that are executed by Spark using Arrow to transfer
data and Pandas to work with the data, which allows vectorized operations. A Pandas UDF
is defined using the `pandas_udf` as a decorator or to wrap the function, and no
additional configuration is required. A Pandas UDF behaves as a regular PySpark function
API in general.
.. versionadded:: 2.3.0
Parameters
----------
f : function, optional
user-defined function. A python function if used as a standalone function
returnType : :class:`pyspark.sql.types.DataType` or str, optional
the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
functionType : int, optional
an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR. This parameter exists for compatibility.
Using Python type hints is encouraged.
Examples
--------
In order to use this API, customarily the below are imported:
>>> import pandas as pd
>>> from pyspark.sql.functions import pandas_udf
From Spark 3.0 with Python 3.6+, `Python type hints <https://www.python.org/dev/peps/pep-0484>`_
detect the function types as below:
>>> @pandas_udf(IntegerType())
... def slen(s: pd.Series) -> pd.Series:
... return s.str.len()
Prior to Spark 3.0, the pandas UDF used `functionType` to decide the execution type as below:
>>> from pyspark.sql.functions import PandasUDFType
>>> from pyspark.sql.types import IntegerType
>>> @pandas_udf(IntegerType(), PandasUDFType.SCALAR)
... def slen(s):
... return s.str.len()
It is preferred to specify type hints for the pandas UDF instead of specifying pandas UDF
type via `functionType` which will be deprecated in the future releases.
Note that the type hint should use `pandas.Series` in all cases but there is one variant
that `pandas.DataFrame` should be used for its input or output type hint instead when the input
or output column is of :class:`pyspark.sql.types.StructType`. The following example shows
a Pandas UDF which takes long column, string column and struct column, and outputs a struct
column. It requires the function to specify the type hints of `pandas.Series` and
`pandas.DataFrame` as below:
>>> @pandas_udf("col1 string, col2 long")
>>> def func(s1: pd.Series, s2: pd.Series, s3: pd.DataFrame) -> pd.DataFrame:
... s3['col2'] = s1 + s2.str.len()
... return s3
...
>>> # Create a Spark DataFrame that has three columns including a sturct column.
... df = spark.createDataFrame(
... [[1, "a string", ("a nested string",)]],
... "long_col long, string_col string, struct_col struct<col1:string>")
>>> df.printSchema()
root
|-- long_column: long (nullable = true)
|-- string_column: string (nullable = true)
|-- struct_column: struct (nullable = true)
| |-- col1: string (nullable = true)
>>> df.select(func("long_col", "string_col", "struct_col")).printSchema()
|-- func(long_col, string_col, struct_col): struct (nullable = true)
| |-- col1: string (nullable = true)
| |-- col2: long (nullable = true)
In the following sections, it describes the cominations of the supported type hints. For
simplicity, `pandas.DataFrame` variant is omitted.
* Series to Series
`pandas.Series`, ... -> `pandas.Series`
The function takes one or more `pandas.Series` and outputs one `pandas.Series`.
The output of the function should always be of the same length as the input.
>>> @pandas_udf("string")
... def to_upper(s: pd.Series) -> pd.Series:
... return s.str.upper()
...
>>> df = spark.createDataFrame([("<NAME>",)], ("name",))
>>> df.select(to_upper("name")).show()
+--------------+
|to_upper(name)|
+--------------+
| <NAME>|
+--------------+
>>> @pandas_udf("first string, last string")
... def split_expand(s: pd.Series) -> pd.DataFrame:
... return s.str.split(expand=True)
...
>>> df = spark.createDataFrame([("<NAME>",)], ("name",))
>>> df.select(split_expand("name")).show()
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of the input is not that of the whole input column, but is the
length of an internal batch used for each call to the function.
* Iterator of Series to Iterator of Series
`Iterator[pandas.Series]` -> `Iterator[pandas.Series]`
The function takes an iterator of `pandas.Series` and outputs an iterator of
`pandas.Series`. In this case, the created pandas UDF instance requires one input
column when this is called as a PySpark column. The length of the entire output from
the function should be the same length of the entire input; therefore, it can
prefetch the data from the input iterator as long as the lengths are the same.
It is also useful when the UDF execution
requires initializing some states although internally it works identically as
Series to Series case. The pseudocode below illustrates the example.
.. highlight:: python
.. code-block:: python
@pandas_udf("long")
def calculate(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
# Do some expensive initialization with a state
state = very_expensive_initialization()
for x in iterator:
# Use that state for whole iterator.
yield calculate_with_state(x, state)
df.select(calculate("value")).show()
>>> from typing import Iterator
>>> @pandas_udf("long")
... def plus_one(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
... for s in iterator:
... yield s + 1
...
>>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"]))
>>> df.select(plus_one(df.v)).show()
+-----------+
|plus_one(v)|
+-----------+
| 2|
| 3|
| 4|
+-----------+
.. note:: The length of each series is the length of a batch internally used.
* Iterator of Multiple Series to Iterator of Series
`Iterator[Tuple[pandas.Series, ...]]` -> `Iterator[pandas.Series]`
The function takes an iterator of a tuple of multiple `pandas.Series` and outputs an
iterator of `pandas.Series`. In this case, the created pandas UDF instance requires
input columns as many as the series when this is called as a PySpark column.
Otherwise, it has the same characteristics and restrictions as Iterator of Series
to Iterator of Series case.
>>> from typing import Iterator, Tuple
>>> from pyspark.sql.functions import struct, col
>>> @pandas_udf("long")
... def multiply(iterator: Iterator[Tuple[pd.Series, pd.DataFrame]]) -> Iterator[pd.Series]:
... for s1, df in iterator:
... yield s1 * df.v
...
>>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"]))
>>> df.withColumn('output', multiply(col("v"), struct(col("v")))).show()
+---+------+
| v|output|
+---+------+
| 1| 1|
| 2| 4|
| 3| 9|
+---+------+
.. note:: The length of each series is the length of a batch internally used.
* Series to Scalar
`pandas.Series`, ... -> `Any`
The function takes `pandas.Series` and returns a scalar value. The `returnType`
should be a primitive data type, and the returned scalar can be either a python primitive
type, e.g., int or float or a numpy data type, e.g., numpy.int64 or numpy.float64.
`Any` should ideally be a specific scalar type accordingly.
>>> @pandas_udf("double")
... def mean_udf(v: pd.Series) -> float:
... return v.mean()
...
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v"))
>>> df.groupby("id").agg(mean_udf(df['v'])).show()
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This UDF can also be used as window functions as below:
>>> from pyspark.sql import Window
>>> @pandas_udf("double")
... def mean_udf(v: pd.Series) -> float:
... return v.mean()
...
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v"))
>>> w = Window.partitionBy('id').orderBy('v').rowsBetween(-1, 0)
>>> df.withColumn('mean_v', mean_udf("v").over(w)).show()
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
Notes
-----
The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
The user-defined functions do not take keyword arguments on the calling side.
The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined `returnType` (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
Currently,
:class:`pyspark.sql.types.MapType`,
:class:`pyspark.sql.types.ArrayType` of :class:`pyspark.sql.types.TimestampType` and
nested :class:`pyspark.sql.types.StructType`
are currently not supported as output types.
See Also
--------
pyspark.sql.GroupedData.agg
pyspark.sql.DataFrame.mapInPandas
pyspark.sql.GroupedData.applyInPandas
pyspark.sql.PandasCogroupedOps.applyInPandas
pyspark.sql.UDFRegistration.register
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28132's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))| True(bool)| 1(int8)| 1(int16)| 1(int32)| 1(int64)| 1(uint8)| 1(uint16)| 1(uint32)| 1(uint64)| 1.0(float16)| 1.0(float32)| 1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))| 1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)| A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| True| True| True| X| X| X| X| X| X| X| X| X| X| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 0| 18000000000000| X| 1| X| X| X| X| X| 86400000000000| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| datetime.date(197...| X|datetime.date(197...| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X|datetime.datetime...| X| X| X| X| X| X| # noqa
# | string| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| 'a'| X| X| X| X| X| 'A'| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| None|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')| bytearray(b'\x01')| bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'')|bytearray(b'')|bytearray(b'')| bytearray(b'')| bytearray(b'')| bytearray(b'a')| X| X|bytearray(b'')| bytearray(b'')| bytearray(b'')|bytearray(b'A')| bytearray(b'')| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa #
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 3.7.3, Pandas 1.1.1 and PyArrow 1.0.1 are used.
# Note: Timezone is KST.
# Note: 'X' means it throws an exception during the conversion.
require_minimum_pandas_version()
require_minimum_pyarrow_version()
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = None
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = None
if return_type is None:
raise ValueError("Invalid return type: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
None]: # None means it should infer the type from type hints.
raise ValueError("Invalid function type: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_pandas_udf, returnType=return_type, evalType=eval_type)
else:
return _create_pandas_udf(f=f, returnType=return_type, evalType=eval_type)
def _create_pandas_udf(f, returnType, evalType):
argspec = getfullargspec(f)
# pandas UDF by type hints.
from inspect import signature
if evalType in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
warnings.warn(
"In Python 3.6+ and Spark 3.0+, it is preferred to specify type hints for "
"pandas UDF instead of specifying pandas UDF type which will be deprecated "
"in the future releases. See SPARK-28264 for more details.", UserWarning)
elif evalType in [PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF]:
# In case of 'SQL_GROUPED_MAP_PANDAS_UDF', deprecation warning is being triggered
# at `apply` instead.
# In case of 'SQL_MAP_PANDAS_ITER_UDF' and 'SQL_COGROUPED_MAP_PANDAS_UDF', the
# evaluation type will always be set.
pass
elif len(argspec.annotations) > 0:
evalType = infer_eval_type(signature(f))
assert evalType is not None
if evalType is None:
# Set default is scalar UDF.
evalType = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if (evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF) and \
len(argspec.args) == 0 and \
argspec.varargs is None:
raise ValueError(
"Invalid function: 0-arg pandas_udfs are not supported. "
"Instead, create a 1-arg pandas_udf and ignore the arg in your function."
)
if evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (1, 2):
raise ValueError(
"Invalid function: pandas_udf with function type GROUPED_MAP or "
"the function in groupby.applyInPandas "
"must take either one argument (data) or two arguments (key, data).")
if evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (2, 3):
raise ValueError(
"Invalid function: the function in cogroup.applyInPandas "
"must take either two arguments (left, right) "
"or three arguments (key, left, right).")
return _create_udf(f, returnType, evalType)
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import warnings
from inspect import getfullargspec
from pyspark.rdd import PythonEvalType
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
from pyspark.sql.types import DataType
from pyspark.sql.udf import _create_udf
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
SCALAR_ITER = PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a pandas user defined function (a.k.a. vectorized user defined function).
Pandas UDFs are user defined functions that are executed by Spark using Arrow to transfer
data and Pandas to work with the data, which allows vectorized operations. A Pandas UDF
is defined using the `pandas_udf` as a decorator or to wrap the function, and no
additional configuration is required. A Pandas UDF behaves as a regular PySpark function
API in general.
.. versionadded:: 2.3.0
Parameters
----------
f : function, optional
user-defined function. A python function if used as a standalone function
returnType : :class:`pyspark.sql.types.DataType` or str, optional
the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
functionType : int, optional
an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR. This parameter exists for compatibility.
Using Python type hints is encouraged.
Examples
--------
In order to use this API, customarily the below are imported:
>>> import pandas as pd
>>> from pyspark.sql.functions import pandas_udf
From Spark 3.0 with Python 3.6+, `Python type hints <https://www.python.org/dev/peps/pep-0484>`_
detect the function types as below:
>>> @pandas_udf(IntegerType())
... def slen(s: pd.Series) -> pd.Series:
... return s.str.len()
Prior to Spark 3.0, the pandas UDF used `functionType` to decide the execution type as below:
>>> from pyspark.sql.functions import PandasUDFType
>>> from pyspark.sql.types import IntegerType
>>> @pandas_udf(IntegerType(), PandasUDFType.SCALAR)
... def slen(s):
... return s.str.len()
It is preferred to specify type hints for the pandas UDF instead of specifying pandas UDF
type via `functionType` which will be deprecated in the future releases.
Note that the type hint should use `pandas.Series` in all cases but there is one variant
that `pandas.DataFrame` should be used for its input or output type hint instead when the input
or output column is of :class:`pyspark.sql.types.StructType`. The following example shows
a Pandas UDF which takes long column, string column and struct column, and outputs a struct
column. It requires the function to specify the type hints of `pandas.Series` and
`pandas.DataFrame` as below:
>>> @pandas_udf("col1 string, col2 long")
>>> def func(s1: pd.Series, s2: pd.Series, s3: pd.DataFrame) -> pd.DataFrame:
... s3['col2'] = s1 + s2.str.len()
... return s3
...
>>> # Create a Spark DataFrame that has three columns including a sturct column.
... df = spark.createDataFrame(
... [[1, "a string", ("a nested string",)]],
... "long_col long, string_col string, struct_col struct<col1:string>")
>>> df.printSchema()
root
|-- long_column: long (nullable = true)
|-- string_column: string (nullable = true)
|-- struct_column: struct (nullable = true)
| |-- col1: string (nullable = true)
>>> df.select(func("long_col", "string_col", "struct_col")).printSchema()
|-- func(long_col, string_col, struct_col): struct (nullable = true)
| |-- col1: string (nullable = true)
| |-- col2: long (nullable = true)
In the following sections, it describes the cominations of the supported type hints. For
simplicity, `pandas.DataFrame` variant is omitted.
* Series to Series
`pandas.Series`, ... -> `pandas.Series`
The function takes one or more `pandas.Series` and outputs one `pandas.Series`.
The output of the function should always be of the same length as the input.
>>> @pandas_udf("string")
... def to_upper(s: pd.Series) -> pd.Series:
... return s.str.upper()
...
>>> df = spark.createDataFrame([("<NAME>",)], ("name",))
>>> df.select(to_upper("name")).show()
+--------------+
|to_upper(name)|
+--------------+
| <NAME>|
+--------------+
>>> @pandas_udf("first string, last string")
... def split_expand(s: pd.Series) -> pd.DataFrame:
... return s.str.split(expand=True)
...
>>> df = spark.createDataFrame([("<NAME>",)], ("name",))
>>> df.select(split_expand("name")).show()
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of the input is not that of the whole input column, but is the
length of an internal batch used for each call to the function.
* Iterator of Series to Iterator of Series
`Iterator[pandas.Series]` -> `Iterator[pandas.Series]`
The function takes an iterator of `pandas.Series` and outputs an iterator of
`pandas.Series`. In this case, the created pandas UDF instance requires one input
column when this is called as a PySpark column. The length of the entire output from
the function should be the same length of the entire input; therefore, it can
prefetch the data from the input iterator as long as the lengths are the same.
It is also useful when the UDF execution
requires initializing some states although internally it works identically as
Series to Series case. The pseudocode below illustrates the example.
.. highlight:: python
.. code-block:: python
@pandas_udf("long")
def calculate(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
# Do some expensive initialization with a state
state = very_expensive_initialization()
for x in iterator:
# Use that state for whole iterator.
yield calculate_with_state(x, state)
df.select(calculate("value")).show()
>>> from typing import Iterator
>>> @pandas_udf("long")
... def plus_one(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
... for s in iterator:
... yield s + 1
...
>>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"]))
>>> df.select(plus_one(df.v)).show()
+-----------+
|plus_one(v)|
+-----------+
| 2|
| 3|
| 4|
+-----------+
.. note:: The length of each series is the length of a batch internally used.
* Iterator of Multiple Series to Iterator of Series
`Iterator[Tuple[pandas.Series, ...]]` -> `Iterator[pandas.Series]`
The function takes an iterator of a tuple of multiple `pandas.Series` and outputs an
iterator of `pandas.Series`. In this case, the created pandas UDF instance requires
input columns as many as the series when this is called as a PySpark column.
Otherwise, it has the same characteristics and restrictions as Iterator of Series
to Iterator of Series case.
>>> from typing import Iterator, Tuple
>>> from pyspark.sql.functions import struct, col
>>> @pandas_udf("long")
... def multiply(iterator: Iterator[Tuple[pd.Series, pd.DataFrame]]) -> Iterator[pd.Series]:
... for s1, df in iterator:
... yield s1 * df.v
...
>>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"]))
>>> df.withColumn('output', multiply(col("v"), struct(col("v")))).show()
+---+------+
| v|output|
+---+------+
| 1| 1|
| 2| 4|
| 3| 9|
+---+------+
.. note:: The length of each series is the length of a batch internally used.
* Series to Scalar
`pandas.Series`, ... -> `Any`
The function takes `pandas.Series` and returns a scalar value. The `returnType`
should be a primitive data type, and the returned scalar can be either a python primitive
type, e.g., int or float or a numpy data type, e.g., numpy.int64 or numpy.float64.
`Any` should ideally be a specific scalar type accordingly.
>>> @pandas_udf("double")
... def mean_udf(v: pd.Series) -> float:
... return v.mean()
...
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v"))
>>> df.groupby("id").agg(mean_udf(df['v'])).show()
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This UDF can also be used as window functions as below:
>>> from pyspark.sql import Window
>>> @pandas_udf("double")
... def mean_udf(v: pd.Series) -> float:
... return v.mean()
...
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v"))
>>> w = Window.partitionBy('id').orderBy('v').rowsBetween(-1, 0)
>>> df.withColumn('mean_v', mean_udf("v").over(w)).show()
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
Notes
-----
The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
The user-defined functions do not take keyword arguments on the calling side.
The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined `returnType` (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
Currently,
:class:`pyspark.sql.types.MapType`,
:class:`pyspark.sql.types.ArrayType` of :class:`pyspark.sql.types.TimestampType` and
nested :class:`pyspark.sql.types.StructType`
are currently not supported as output types.
See Also
--------
pyspark.sql.GroupedData.agg
pyspark.sql.DataFrame.mapInPandas
pyspark.sql.GroupedData.applyInPandas
pyspark.sql.PandasCogroupedOps.applyInPandas
pyspark.sql.UDFRegistration.register
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28132's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))| True(bool)| 1(int8)| 1(int16)| 1(int32)| 1(int64)| 1(uint8)| 1(uint16)| 1(uint32)| 1(uint64)| 1.0(float16)| 1.0(float32)| 1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))| 1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)| A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| True| True| True| X| X| X| X| X| X| X| X| X| X| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 0| 18000000000000| X| 1| X| X| X| X| X| 86400000000000| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| datetime.date(197...| X|datetime.date(197...| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X|datetime.datetime...| X| X| X| X| X| X| # noqa
# | string| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| 'a'| X| X| X| X| X| 'A'| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| None|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')| bytearray(b'\x01')| bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'')|bytearray(b'')|bytearray(b'')| bytearray(b'')| bytearray(b'')| bytearray(b'a')| X| X|bytearray(b'')| bytearray(b'')| bytearray(b'')|bytearray(b'A')| bytearray(b'')| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa #
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 3.7.3, Pandas 1.1.1 and PyArrow 1.0.1 are used.
# Note: Timezone is KST.
# Note: 'X' means it throws an exception during the conversion.
require_minimum_pandas_version()
require_minimum_pyarrow_version()
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = None
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = None
if return_type is None:
raise ValueError("Invalid return type: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
None]: # None means it should infer the type from type hints.
raise ValueError("Invalid function type: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_pandas_udf, returnType=return_type, evalType=eval_type)
else:
return _create_pandas_udf(f=f, returnType=return_type, evalType=eval_type)
def _create_pandas_udf(f, returnType, evalType):
argspec = getfullargspec(f)
# pandas UDF by type hints.
from inspect import signature
if evalType in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
warnings.warn(
"In Python 3.6+ and Spark 3.0+, it is preferred to specify type hints for "
"pandas UDF instead of specifying pandas UDF type which will be deprecated "
"in the future releases. See SPARK-28264 for more details.", UserWarning)
elif evalType in [PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF]:
# In case of 'SQL_GROUPED_MAP_PANDAS_UDF', deprecation warning is being triggered
# at `apply` instead.
# In case of 'SQL_MAP_PANDAS_ITER_UDF' and 'SQL_COGROUPED_MAP_PANDAS_UDF', the
# evaluation type will always be set.
pass
elif len(argspec.annotations) > 0:
evalType = infer_eval_type(signature(f))
assert evalType is not None
if evalType is None:
# Set default is scalar UDF.
evalType = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if (evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF) and \
len(argspec.args) == 0 and \
argspec.varargs is None:
raise ValueError(
"Invalid function: 0-arg pandas_udfs are not supported. "
"Instead, create a 1-arg pandas_udf and ignore the arg in your function."
)
if evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (1, 2):
raise ValueError(
"Invalid function: pandas_udf with function type GROUPED_MAP or "
"the function in groupby.applyInPandas "
"must take either one argument (data) or two arguments (key, data).")
if evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (2, 3):
raise ValueError(
"Invalid function: the function in cogroup.applyInPandas "
"must take either two arguments (left, right) "
"or three arguments (key, left, right).")
return _create_udf(f, returnType, evalType)
|
en
| 0.508939
|
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`. Creates a pandas user defined function (a.k.a. vectorized user defined function). Pandas UDFs are user defined functions that are executed by Spark using Arrow to transfer data and Pandas to work with the data, which allows vectorized operations. A Pandas UDF is defined using the `pandas_udf` as a decorator or to wrap the function, and no additional configuration is required. A Pandas UDF behaves as a regular PySpark function API in general. .. versionadded:: 2.3.0 Parameters ---------- f : function, optional user-defined function. A python function if used as a standalone function returnType : :class:`pyspark.sql.types.DataType` or str, optional the return type of the user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. functionType : int, optional an enum value in :class:`pyspark.sql.functions.PandasUDFType`. Default: SCALAR. This parameter exists for compatibility. Using Python type hints is encouraged. Examples -------- In order to use this API, customarily the below are imported: >>> import pandas as pd >>> from pyspark.sql.functions import pandas_udf From Spark 3.0 with Python 3.6+, `Python type hints <https://www.python.org/dev/peps/pep-0484>`_ detect the function types as below: >>> @pandas_udf(IntegerType()) ... def slen(s: pd.Series) -> pd.Series: ... return s.str.len() Prior to Spark 3.0, the pandas UDF used `functionType` to decide the execution type as below: >>> from pyspark.sql.functions import PandasUDFType >>> from pyspark.sql.types import IntegerType >>> @pandas_udf(IntegerType(), PandasUDFType.SCALAR) ... def slen(s): ... return s.str.len() It is preferred to specify type hints for the pandas UDF instead of specifying pandas UDF type via `functionType` which will be deprecated in the future releases. Note that the type hint should use `pandas.Series` in all cases but there is one variant that `pandas.DataFrame` should be used for its input or output type hint instead when the input or output column is of :class:`pyspark.sql.types.StructType`. The following example shows a Pandas UDF which takes long column, string column and struct column, and outputs a struct column. It requires the function to specify the type hints of `pandas.Series` and `pandas.DataFrame` as below: >>> @pandas_udf("col1 string, col2 long") >>> def func(s1: pd.Series, s2: pd.Series, s3: pd.DataFrame) -> pd.DataFrame: ... s3['col2'] = s1 + s2.str.len() ... return s3 ... >>> # Create a Spark DataFrame that has three columns including a sturct column. ... df = spark.createDataFrame( ... [[1, "a string", ("a nested string",)]], ... "long_col long, string_col string, struct_col struct<col1:string>") >>> df.printSchema() root |-- long_column: long (nullable = true) |-- string_column: string (nullable = true) |-- struct_column: struct (nullable = true) | |-- col1: string (nullable = true) >>> df.select(func("long_col", "string_col", "struct_col")).printSchema() |-- func(long_col, string_col, struct_col): struct (nullable = true) | |-- col1: string (nullable = true) | |-- col2: long (nullable = true) In the following sections, it describes the cominations of the supported type hints. For simplicity, `pandas.DataFrame` variant is omitted. * Series to Series `pandas.Series`, ... -> `pandas.Series` The function takes one or more `pandas.Series` and outputs one `pandas.Series`. The output of the function should always be of the same length as the input. >>> @pandas_udf("string") ... def to_upper(s: pd.Series) -> pd.Series: ... return s.str.upper() ... >>> df = spark.createDataFrame([("<NAME>",)], ("name",)) >>> df.select(to_upper("name")).show() +--------------+ |to_upper(name)| +--------------+ | <NAME>| +--------------+ >>> @pandas_udf("first string, last string") ... def split_expand(s: pd.Series) -> pd.DataFrame: ... return s.str.split(expand=True) ... >>> df = spark.createDataFrame([("<NAME>",)], ("name",)) >>> df.select(split_expand("name")).show() +------------------+ |split_expand(name)| +------------------+ | [John, Doe]| +------------------+ .. note:: The length of the input is not that of the whole input column, but is the length of an internal batch used for each call to the function. * Iterator of Series to Iterator of Series `Iterator[pandas.Series]` -> `Iterator[pandas.Series]` The function takes an iterator of `pandas.Series` and outputs an iterator of `pandas.Series`. In this case, the created pandas UDF instance requires one input column when this is called as a PySpark column. The length of the entire output from the function should be the same length of the entire input; therefore, it can prefetch the data from the input iterator as long as the lengths are the same. It is also useful when the UDF execution requires initializing some states although internally it works identically as Series to Series case. The pseudocode below illustrates the example. .. highlight:: python .. code-block:: python @pandas_udf("long") def calculate(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]: # Do some expensive initialization with a state state = very_expensive_initialization() for x in iterator: # Use that state for whole iterator. yield calculate_with_state(x, state) df.select(calculate("value")).show() >>> from typing import Iterator >>> @pandas_udf("long") ... def plus_one(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]: ... for s in iterator: ... yield s + 1 ... >>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"])) >>> df.select(plus_one(df.v)).show() +-----------+ |plus_one(v)| +-----------+ | 2| | 3| | 4| +-----------+ .. note:: The length of each series is the length of a batch internally used. * Iterator of Multiple Series to Iterator of Series `Iterator[Tuple[pandas.Series, ...]]` -> `Iterator[pandas.Series]` The function takes an iterator of a tuple of multiple `pandas.Series` and outputs an iterator of `pandas.Series`. In this case, the created pandas UDF instance requires input columns as many as the series when this is called as a PySpark column. Otherwise, it has the same characteristics and restrictions as Iterator of Series to Iterator of Series case. >>> from typing import Iterator, Tuple >>> from pyspark.sql.functions import struct, col >>> @pandas_udf("long") ... def multiply(iterator: Iterator[Tuple[pd.Series, pd.DataFrame]]) -> Iterator[pd.Series]: ... for s1, df in iterator: ... yield s1 * df.v ... >>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"])) >>> df.withColumn('output', multiply(col("v"), struct(col("v")))).show() +---+------+ | v|output| +---+------+ | 1| 1| | 2| 4| | 3| 9| +---+------+ .. note:: The length of each series is the length of a batch internally used. * Series to Scalar `pandas.Series`, ... -> `Any` The function takes `pandas.Series` and returns a scalar value. The `returnType` should be a primitive data type, and the returned scalar can be either a python primitive type, e.g., int or float or a numpy data type, e.g., numpy.int64 or numpy.float64. `Any` should ideally be a specific scalar type accordingly. >>> @pandas_udf("double") ... def mean_udf(v: pd.Series) -> float: ... return v.mean() ... >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v")) >>> df.groupby("id").agg(mean_udf(df['v'])).show() +---+-----------+ | id|mean_udf(v)| +---+-----------+ | 1| 1.5| | 2| 6.0| +---+-----------+ This UDF can also be used as window functions as below: >>> from pyspark.sql import Window >>> @pandas_udf("double") ... def mean_udf(v: pd.Series) -> float: ... return v.mean() ... >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v")) >>> w = Window.partitionBy('id').orderBy('v').rowsBetween(-1, 0) >>> df.withColumn('mean_v', mean_udf("v").over(w)).show() +---+----+------+ | id| v|mean_v| +---+----+------+ | 1| 1.0| 1.0| | 1| 2.0| 1.5| | 2| 3.0| 3.0| | 2| 5.0| 4.0| | 2|10.0| 7.5| +---+----+------+ .. note:: For performance reasons, the input series to window functions are not copied. Therefore, mutating the input series is not allowed and will cause incorrect results. For the same reason, users should also not rely on the index of the input series. Notes ----- The user-defined functions do not support conditional expressions or short circuiting in boolean expressions and it ends up with being executed all internally. If the functions can fail on special rows, the workaround is to incorporate the condition into the functions. The user-defined functions do not take keyword arguments on the calling side. The data type of returned `pandas.Series` from the user-defined functions should be matched with defined `returnType` (see :meth:`types.to_arrow_type` and :meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do conversion on returned data. The conversion is not guaranteed to be correct and results should be checked for accuracy by users. Currently, :class:`pyspark.sql.types.MapType`, :class:`pyspark.sql.types.ArrayType` of :class:`pyspark.sql.types.TimestampType` and nested :class:`pyspark.sql.types.StructType` are currently not supported as output types. See Also -------- pyspark.sql.GroupedData.agg pyspark.sql.DataFrame.mapInPandas pyspark.sql.GroupedData.applyInPandas pyspark.sql.PandasCogroupedOps.applyInPandas pyspark.sql.UDFRegistration.register # The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that # are not yet visible to the user. Some of behaviors are buggy and might be changed in the near # future. The table might have to be eventually documented externally. # Please see SPARK-28132's PR to see the codes in order to generate the table below. # # +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa # |SQL Type \ Pandas Value(Type)|None(object(NoneType))| True(bool)| 1(int8)| 1(int16)| 1(int32)| 1(int64)| 1(uint8)| 1(uint16)| 1(uint32)| 1(uint64)| 1.0(float16)| 1.0(float32)| 1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))| 1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)| A(category)|1 days 00:00:00(timedelta64[ns])| # noqa # +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa # | boolean| None| True| True| True| True| True| True| True| True| True| True| True| True| X| X| X| X| X| X| X| X| X| X| # noqa # | tinyint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa # | smallint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa # | int| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa # | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 0| 18000000000000| X| 1| X| X| X| X| X| 86400000000000| # noqa # | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa # | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa # | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| datetime.date(197...| X|datetime.date(197...| X| X| X| X| X| X| # noqa # | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X|datetime.datetime...| X| X| X| X| X| X| # noqa # | string| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| 'a'| X| X| X| X| X| 'A'| X| # noqa # | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa # | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa # | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa # | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa # | binary| None|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')| bytearray(b'\x01')| bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'')|bytearray(b'')|bytearray(b'')| bytearray(b'')| bytearray(b'')| bytearray(b'a')| X| X|bytearray(b'')| bytearray(b'')| bytearray(b'')|bytearray(b'A')| bytearray(b'')| # noqa # +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa # # # Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be # used in `returnType`. # Note: The values inside of the table are generated by `repr`. # Note: Python 3.7.3, Pandas 1.1.1 and PyArrow 1.0.1 are used. # Note: Timezone is KST. # Note: 'X' means it throws an exception during the conversion. # decorator @pandas_udf(returnType, functionType) # If DataType has been passed as a positional argument # for decorator use it as a returnType # @pandas_udf(dataType, functionType=functionType) # @pandas_udf(returnType=dataType, functionType=functionType) # @pandas_udf(dataType, functionType) # @pandas_udf(dataType) or @pandas_udf(returnType=dataType) # None means it should infer the type from type hints. # pandas UDF by type hints. # In case of 'SQL_GROUPED_MAP_PANDAS_UDF', deprecation warning is being triggered # at `apply` instead. # In case of 'SQL_MAP_PANDAS_ITER_UDF' and 'SQL_COGROUPED_MAP_PANDAS_UDF', the # evaluation type will always be set. # Set default is scalar UDF.
| 1.941546
| 2
|
comment/admin.py
|
codingtruman/myblog
| 0
|
6628794
|
<gh_stars>0
from django.contrib import admin
from .models import *
class CommentAdmin(admin.ModelAdmin):
# choose what to show in article admin page
list_display = ["user", "comment_time", "body"]
# choose what parameters as filters
list_filter = ["user", "comment_time"]
# choose what can be searched in search box
search_fields = ["user", "body"]
date_hierarchy = "comment_time"
list_per_page = 20
actions_on_bottom = True
admin.site.register(Comment, CommentAdmin)
|
from django.contrib import admin
from .models import *
class CommentAdmin(admin.ModelAdmin):
# choose what to show in article admin page
list_display = ["user", "comment_time", "body"]
# choose what parameters as filters
list_filter = ["user", "comment_time"]
# choose what can be searched in search box
search_fields = ["user", "body"]
date_hierarchy = "comment_time"
list_per_page = 20
actions_on_bottom = True
admin.site.register(Comment, CommentAdmin)
|
en
| 0.836837
|
# choose what to show in article admin page # choose what parameters as filters # choose what can be searched in search box
| 2.153001
| 2
|
gap/exception.py
|
Labgoo/google-analytics-for-python
| 0
|
6628795
|
<reponame>Labgoo/google-analytics-for-python<gh_stars>0
__author__ = 'minhtule'
class ValidateException(Exception):
pass
|
__author__ = 'minhtule'
class ValidateException(Exception):
pass
|
none
| 1
| 1.124256
| 1
|
|
tests/test_il_gaming_board.py
|
MAYANK25402/city-scrapers
| 1
|
6628796
|
from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import BOARD, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.il_gaming_board import IlGamingBoardSpider
test_response = file_response(
join(dirname(__file__), "files", "il_gaming_board.html"),
url="http://www.igb.illinois.gov/MeetingsMinutes.aspx",
)
spider = IlGamingBoardSpider()
freezer = freeze_time("2019-06-04")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_count():
assert len(parsed_items) == 10
def test_title():
assert parsed_items[0]["title"] == "Riverboat/Video Gaming"
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2019, 1, 30, 9)
def test_end():
assert parsed_items[0]["end"] is None
def test_time_notes():
assert parsed_items[0]["time_notes"] == "See source to confirm meeting time"
def test_id():
assert (
parsed_items[0]["id"] == "il_gaming_board/201901300900/x/riverboat_video_gaming"
)
def test_status():
assert parsed_items[0]["status"] == PASSED
def test_location():
assert parsed_items[0]["location"] == spider.location
def test_source():
assert (
parsed_items[0]["source"] == "http://www.igb.illinois.gov/MeetingsMinutes.aspx"
)
def test_links():
assert parsed_items[0]["links"] == [
{
"href": "http://www.igb.illinois.gov/FilesBoardMeeting/20190130RiverboatAgenda.pdf", # noqa
"title": "Agenda: Riverboat",
},
{
"href": "http://www.igb.illinois.gov/FilesBoardMeeting/20190130RiverboatMinutes.pdf", # noqa
"title": "Minutes: Riverboat",
},
{
"href": "http://www.igb.illinois.gov/FilesBoardMeeting/20190130RiverboatAudio.mp3", # noqa
"title": "Audio: Riverboat",
},
{
"href": "http://www.igb.illinois.gov/FilesBoardMeeting/20190130VideoAudio.mp3", # noqa
"title": "Audio: Video Gaming",
},
]
def test_classification():
assert parsed_items[0]["classification"] == BOARD
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
|
from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import BOARD, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.il_gaming_board import IlGamingBoardSpider
test_response = file_response(
join(dirname(__file__), "files", "il_gaming_board.html"),
url="http://www.igb.illinois.gov/MeetingsMinutes.aspx",
)
spider = IlGamingBoardSpider()
freezer = freeze_time("2019-06-04")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_count():
assert len(parsed_items) == 10
def test_title():
assert parsed_items[0]["title"] == "Riverboat/Video Gaming"
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2019, 1, 30, 9)
def test_end():
assert parsed_items[0]["end"] is None
def test_time_notes():
assert parsed_items[0]["time_notes"] == "See source to confirm meeting time"
def test_id():
assert (
parsed_items[0]["id"] == "il_gaming_board/201901300900/x/riverboat_video_gaming"
)
def test_status():
assert parsed_items[0]["status"] == PASSED
def test_location():
assert parsed_items[0]["location"] == spider.location
def test_source():
assert (
parsed_items[0]["source"] == "http://www.igb.illinois.gov/MeetingsMinutes.aspx"
)
def test_links():
assert parsed_items[0]["links"] == [
{
"href": "http://www.igb.illinois.gov/FilesBoardMeeting/20190130RiverboatAgenda.pdf", # noqa
"title": "Agenda: Riverboat",
},
{
"href": "http://www.igb.illinois.gov/FilesBoardMeeting/20190130RiverboatMinutes.pdf", # noqa
"title": "Minutes: Riverboat",
},
{
"href": "http://www.igb.illinois.gov/FilesBoardMeeting/20190130RiverboatAudio.mp3", # noqa
"title": "Audio: Riverboat",
},
{
"href": "http://www.igb.illinois.gov/FilesBoardMeeting/20190130VideoAudio.mp3", # noqa
"title": "Audio: Video Gaming",
},
]
def test_classification():
assert parsed_items[0]["classification"] == BOARD
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
|
uz
| 0.447735
|
# noqa # noqa # noqa # noqa
| 2.380965
| 2
|
bpytop.py
|
jonasbfranco/bpytop
| 1
|
6628797
|
#!/usr/bin/env python3
# pylint: disable=not-callable, no-member, unsubscriptable-object
# indent = tab
# tab-size = 4
# Copyright 2020 Aristocratos (<EMAIL>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, io, threading, signal, re, subprocess, logging, logging.handlers, argparse
import urllib.request
from time import time, sleep, strftime, localtime
from datetime import timedelta
from _thread import interrupt_main
from collections import defaultdict
from select import select
from distutils.util import strtobool
from string import Template
from math import ceil, floor
from random import randint
from shutil import which
from typing import List, Set, Dict, Tuple, Optional, Union, Any, Callable, ContextManager, Iterable, Type, NamedTuple
errors: List[str] = []
try: import fcntl, termios, tty, pwd
except Exception as e: errors.append(f'{e}')
try: import psutil # type: ignore
except Exception as e: errors.append(f'{e}')
SELF_START = time()
SYSTEM: str
if "linux" in sys.platform: SYSTEM = "Linux"
elif "bsd" in sys.platform: SYSTEM = "BSD"
elif "darwin" in sys.platform: SYSTEM = "MacOS"
else: SYSTEM = "Other"
if errors:
print("ERROR!")
print("\n".join(errors))
if SYSTEM == "Other":
print("\nUnsupported platform!\n")
else:
print("\nInstall required modules!\n")
raise SystemExit(1)
VERSION: str = "1.0.63"
#? Argument parser ------------------------------------------------------------------------------->
args = argparse.ArgumentParser()
args.add_argument("-b", "--boxes", action="store", dest="boxes", help = "which boxes to show at start, example: -b \"cpu mem net proc\"")
args.add_argument("-lc", "--low-color", action="store_true", help = "disable truecolor, converts 24-bit colors to 256-color")
args.add_argument("-v", "--version", action="store_true", help = "show version info and exit")
args.add_argument("--debug", action="store_true", help = "start with loglevel set to DEBUG overriding value set in config")
stdargs = args.parse_args()
if stdargs.version:
print(f'bpytop version: {VERSION}\n'
f'psutil version: {".".join(str(x) for x in psutil.version_info)}')
raise SystemExit(0)
ARG_BOXES: str = stdargs.boxes
LOW_COLOR: bool = stdargs.low_color
DEBUG: bool = stdargs.debug
#? Variables ------------------------------------------------------------------------------------->
BANNER_SRC: List[Tuple[str, str, str]] = [
("#ffa50a", "#0fd7ff", "██████╗ ██████╗ ██╗ ██╗████████╗ ██████╗ ██████╗"),
("#f09800", "#00bfe6", "██╔══██╗██╔══██╗╚██╗ ██╔╝╚══██╔══╝██╔═══██╗██╔══██╗"),
("#db8b00", "#00a6c7", "██████╔╝██████╔╝ ╚████╔╝ ██║ ██║ ██║██████╔╝"),
("#c27b00", "#008ca8", "██╔══██╗██╔═══╝ ╚██╔╝ ██║ ██║ ██║██╔═══╝ "),
("#a86b00", "#006e85", "██████╔╝██║ ██║ ██║ ╚██████╔╝██║"),
("#000000", "#000000", "╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝"),
]
#*?This is the template used to create the config file
DEFAULT_CONF: Template = Template(f'#? Config file for bpytop v. {VERSION}' + '''
#* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme.
#* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai"
color_theme="$color_theme"
#* If the theme set background should be shown, set to False if you want terminal background transparency
theme_background=$theme_background
#* Sets if 24-bit truecolor should be used, will convert 24-bit colors to 256 color (6x6x6 color cube) if false.
truecolor=$truecolor
#* Manually set which boxes to show. Available values are "cpu mem net proc", seperate values with whitespace.
shown_boxes="$shown_boxes"
#* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs.
update_ms=$update_ms
#* Processes update multiplier, sets how often the process list is updated as a multiplier of "update_ms".
#* Set to 2 or higher to greatly decrease bpytop cpu usage. (Only integers)
proc_update_mult=$proc_update_mult
#* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive",
#* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly.
proc_sorting="$proc_sorting"
#* Reverse sorting order, True or False.
proc_reversed=$proc_reversed
#* Show processes as a tree
proc_tree=$proc_tree
#* Which depth the tree view should auto collapse processes at
tree_depth=$tree_depth
#* Use the cpu graph colors in the process list.
proc_colors=$proc_colors
#* Use a darkening gradient in the process list.
proc_gradient=$proc_gradient
#* If process cpu usage should be of the core it's running on or usage of the total available cpu power.
proc_per_core=$proc_per_core
#* Show process memory as bytes instead of percent
proc_mem_bytes=$proc_mem_bytes
#* Sets the CPU stat shown in upper half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_upper="$cpu_graph_upper"
#* Sets the CPU stat shown in lower half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_lower="$cpu_graph_lower"
#* Toggles if the lower CPU graph should be inverted.
cpu_invert_lower=$cpu_invert_lower
#* Set to True to completely disable the lower CPU graph.
cpu_single_graph=$cpu_single_graph
#* Shows the system uptime in the CPU box.
show_uptime=$show_uptime
#* Check cpu temperature, needs "osx-cpu-temp" on MacOS X.
check_temp=$check_temp
#* Which sensor to use for cpu temperature, use options menu to select from list of available sensors.
cpu_sensor=$cpu_sensor
#* Show temperatures for cpu cores also if check_temp is True and sensors has been found
show_coretemp=$show_coretemp
#* Which temperature scale to use, available values: "celsius", "fahrenheit", "kelvin" and "rankine"
temp_scale="$temp_scale"
#* Draw a clock at top of screen, formatting according to strftime, empty string to disable.
draw_clock="$draw_clock"
#* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort.
background_update=$background_update
#* Custom cpu model name, empty string to disable.
custom_cpu_name="$custom_cpu_name"
#* Optional filter for shown disks, should be full path of a mountpoint, separate multiple values with a comma ",".
#* Begin line with "exclude=" to change to exclude filter, oterwise defaults to "most include" filter. Example: disks_filter="exclude=/boot, /home/user"
disks_filter="$disks_filter"
#* Show graphs instead of meters for memory values.
mem_graphs=$mem_graphs
#* If swap memory should be shown in memory box.
show_swap=$show_swap
#* Show swap as a disk, ignores show_swap value above, inserts itself after first disk.
swap_disk=$swap_disk
#* If mem box should be split to also show disks info.
show_disks=$show_disks
#* Filter out non physical disks. Set this to False to include network disks, RAM disks and similar.
only_physical=$only_physical
#* Read disks list from /etc/fstab. This also disables only_physical.
use_fstab=$use_fstab
#* Toggles if io stats should be shown in regular disk usage view
show_io_stat=$show_io_stat
#* Toggles io mode for disks, showing only big graphs for disk read/write speeds.
io_mode=$io_mode
#* Set to True to show combined read/write io graphs in io mode.
io_graph_combined=$io_graph_combined
#* Set the top speed for the io graphs in MiB/s (10 by default), use format "device:speed" seperate disks with a comma ",".
#* Example: "/dev/sda:100, /dev/sdb:20"
io_graph_speeds="$io_graph_speeds"
#* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit"
net_download="$net_download"
net_upload="$net_upload"
#* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest.
net_auto=$net_auto
#* Sync the scaling for download and upload to whichever currently has the highest scale
net_sync=$net_sync
#* If the network graphs color gradient should scale to bandwith usage or auto scale, bandwith usage is based on "net_download" and "net_upload" values
net_color_fixed=$net_color_fixed
#* Starts with the Network Interface specified here.
net_iface="$net_iface"
#* Show battery stats in top right if battery is present
show_battery=$show_battery
#* Show init screen at startup, the init screen is purely cosmetical
show_init=$show_init
#* Enable check for new version from github.com/aristocratos/bpytop at start.
update_check=$update_check
#* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG".
#* The level set includes all lower levels, i.e. "DEBUG" will show all logging info.
log_level=$log_level
''')
CONFIG_DIR: str = f'{os.path.expanduser("~")}/.config/bpytop'
if not os.path.isdir(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
os.mkdir(f'{CONFIG_DIR}/themes')
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
CONFIG_FILE: str = f'{CONFIG_DIR}/bpytop.conf'
THEME_DIR: str = ""
if os.path.isdir(f'{os.path.dirname(__file__)}/bpytop-themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/bpytop-themes'
else:
for td in ["/usr/local/", "/usr/", "/snap/bpytop/current/usr/"]:
if os.path.isdir(f'{td}share/bpytop/themes'):
THEME_DIR = f'{td}share/bpytop/themes'
break
USER_THEME_DIR: str = f'{CONFIG_DIR}/themes'
CORES: int = psutil.cpu_count(logical=False) or 1
THREADS: int = psutil.cpu_count(logical=True) or 1
THREAD_ERROR: int = 0
DEFAULT_THEME: Dict[str, str] = {
"main_bg" : "#00",
"main_fg" : "#cc",
"title" : "#ee",
"hi_fg" : "#969696",
"selected_bg" : "#7e2626",
"selected_fg" : "#ee",
"inactive_fg" : "#40",
"graph_text" : "#60",
"meter_bg" : "#40",
"proc_misc" : "#0de756",
"cpu_box" : "#3d7b46",
"mem_box" : "#8a882e",
"net_box" : "#423ba5",
"proc_box" : "#923535",
"div_line" : "#30",
"temp_start" : "#4897d4",
"temp_mid" : "#5474e8",
"temp_end" : "#ff40b6",
"cpu_start" : "#50f095",
"cpu_mid" : "#f2e266",
"cpu_end" : "#fa1e1e",
"free_start" : "#223014",
"free_mid" : "#b5e685",
"free_end" : "#dcff85",
"cached_start" : "#0b1a29",
"cached_mid" : "#74e6fc",
"cached_end" : "#26c5ff",
"available_start" : "#292107",
"available_mid" : "#ffd77a",
"available_end" : "#ffb814",
"used_start" : "#3b1f1c",
"used_mid" : "#d9626d",
"used_end" : "#ff4769",
"download_start" : "#231a63",
"download_mid" : "#4f43a3",
"download_end" : "#b0a9de",
"upload_start" : "#510554",
"upload_mid" : "#7d4180",
"upload_end" : "#dcafde",
"process_start" : "#80d0a3",
"process_mid" : "#dcd179",
"process_end" : "#d45454",
}
MENUS: Dict[str, Dict[str, Tuple[str, ...]]] = {
"options" : {
"normal" : (
"┌─┐┌─┐┌┬┐┬┌─┐┌┐┌┌─┐",
"│ │├─┘ │ ││ ││││└─┐",
"└─┘┴ ┴ ┴└─┘┘└┘└─┘"),
"selected" : (
"╔═╗╔═╗╔╦╗╦╔═╗╔╗╔╔═╗",
"║ ║╠═╝ ║ ║║ ║║║║╚═╗",
"╚═╝╩ ╩ ╩╚═╝╝╚╝╚═╝") },
"help" : {
"normal" : (
"┬ ┬┌─┐┬ ┌─┐",
"├─┤├┤ │ ├─┘",
"┴ ┴└─┘┴─┘┴ "),
"selected" : (
"╦ ╦╔═╗╦ ╔═╗",
"╠═╣║╣ ║ ╠═╝",
"╩ ╩╚═╝╩═╝╩ ") },
"quit" : {
"normal" : (
"┌─┐ ┬ ┬ ┬┌┬┐",
"│─┼┐│ │ │ │ ",
"└─┘└└─┘ ┴ ┴ "),
"selected" : (
"╔═╗ ╦ ╦ ╦╔╦╗ ",
"║═╬╗║ ║ ║ ║ ",
"╚═╝╚╚═╝ ╩ ╩ ") }
}
MENU_COLORS: Dict[str, Tuple[str, ...]] = {
"normal" : ("#0fd7ff", "#00bfe6", "#00a6c7", "#008ca8"),
"selected" : ("#ffa50a", "#f09800", "#db8b00", "#c27b00")
}
#? Units for floating_humanizer function
UNITS: Dict[str, Tuple[str, ...]] = {
"bit" : ("bit", "Kib", "Mib", "Gib", "Tib", "Pib", "Eib", "Zib", "Yib", "Bib", "GEb"),
"byte" : ("Byte", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "BiB", "GEB")
}
SUBSCRIPT: Tuple[str, ...] = ("₀", "₁", "₂", "₃", "₄", "₅", "₆", "₇", "₈", "₉")
SUPERSCRIPT: Tuple[str, ...] = ("⁰", "¹", "²", "³", "⁴", "⁵", "⁶", "⁷", "⁸", "⁹")
#? Setup error logger ---------------------------------------------------------------->
try:
errlog = logging.getLogger("ErrorLogger")
errlog.setLevel(logging.DEBUG)
eh = logging.handlers.RotatingFileHandler(f'{CONFIG_DIR}/error.log', maxBytes=1048576, backupCount=4)
eh.setLevel(logging.DEBUG)
eh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s: %(message)s", datefmt="%d/%m/%y (%X)"))
errlog.addHandler(eh)
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
#? Timers for testing and debugging -------------------------------------------------------------->
class TimeIt:
timers: Dict[str, float] = {}
paused: Dict[str, float] = {}
@classmethod
def start(cls, name):
cls.timers[name] = time()
@classmethod
def pause(cls, name):
if name in cls.timers:
cls.paused[name] = time() - cls.timers[name]
del cls.timers[name]
@classmethod
def stop(cls, name):
if name in cls.timers:
total: float = time() - cls.timers[name]
del cls.timers[name]
if name in cls.paused:
total += cls.paused[name]
del cls.paused[name]
errlog.debug(f'{name} completed in {total:.6f} seconds')
def timeit_decorator(func):
def timed(*args, **kw):
ts = time()
out = func(*args, **kw)
errlog.debug(f'{func.__name__} completed in {time() - ts:.6f} seconds')
return out
return timed
#? Set up config class and load config ----------------------------------------------------------->
class Config:
'''Holds all config variables and functions for loading from and saving to disk'''
keys: List[str] = ["color_theme", "update_ms", "proc_sorting", "proc_reversed", "proc_tree", "check_temp", "draw_clock", "background_update", "custom_cpu_name",
"proc_colors", "proc_gradient", "proc_per_core", "proc_mem_bytes", "disks_filter", "update_check", "log_level", "mem_graphs", "show_swap",
"swap_disk", "show_disks", "use_fstab", "net_download", "net_upload", "net_auto", "net_color_fixed", "show_init", "theme_background",
"net_sync", "show_battery", "tree_depth", "cpu_sensor", "show_coretemp", "proc_update_mult", "shown_boxes", "net_iface", "only_physical",
"truecolor", "io_mode", "io_graph_combined", "io_graph_speeds", "show_io_stat", "cpu_graph_upper", "cpu_graph_lower", "cpu_invert_lower",
"cpu_single_graph", "show_uptime", "temp_scale"]
conf_dict: Dict[str, Union[str, int, bool]] = {}
color_theme: str = "Default"
theme_background: bool = True
truecolor: bool = True
shown_boxes: str = "cpu mem net proc"
update_ms: int = 2000
proc_update_mult: int = 2
proc_sorting: str = "cpu lazy"
proc_reversed: bool = False
proc_tree: bool = False
tree_depth: int = 3
proc_colors: bool = True
proc_gradient: bool = True
proc_per_core: bool = False
proc_mem_bytes: bool = True
cpu_graph_upper: str = "total"
cpu_graph_lower: str = "total"
cpu_invert_lower: bool = True
cpu_single_graph: bool = False
show_uptime: bool = True
check_temp: bool = True
cpu_sensor: str = "Auto"
show_coretemp: bool = True
temp_scale: str = "celsius"
draw_clock: str = "%X"
background_update: bool = True
custom_cpu_name: str = ""
disks_filter: str = ""
update_check: bool = True
mem_graphs: bool = True
show_swap: bool = True
swap_disk: bool = True
show_disks: bool = True
only_physical: bool = True
use_fstab: bool = False
show_io_stat: bool = True
io_mode: bool = False
io_graph_combined: bool = False
io_graph_speeds: str = ""
net_download: str = "10M"
net_upload: str = "10M"
net_color_fixed: bool = False
net_auto: bool = True
net_sync: bool = False
net_iface: str = ""
show_battery: bool = True
show_init: bool = True
log_level: str = "WARNING"
warnings: List[str] = []
info: List[str] = []
sorting_options: List[str] = ["pid", "program", "arguments", "threads", "user", "memory", "cpu lazy", "cpu responsive"]
log_levels: List[str] = ["ERROR", "WARNING", "INFO", "DEBUG"]
cpu_percent_fields: List = ["total"]
cpu_percent_fields.extend(getattr(psutil.cpu_times_percent(), "_fields", []))
temp_scales: List[str] = ["celsius", "fahrenheit", "kelvin", "rankine"]
cpu_sensors: List[str] = [ "Auto" ]
if hasattr(psutil, "sensors_temperatures"):
try:
_temps = psutil.sensors_temperatures()
if _temps:
for _name, _entries in _temps.items():
for _num, _entry in enumerate(_entries, 1):
if hasattr(_entry, "current"):
cpu_sensors.append(f'{_name}:{_num if _entry.label == "" else _entry.label}')
except:
pass
changed: bool = False
recreate: bool = False
config_file: str = ""
_initialized: bool = False
def __init__(self, path: str):
self.config_file = path
conf: Dict[str, Union[str, int, bool]] = self.load_config()
if not "version" in conf.keys():
self.recreate = True
self.info.append(f'Config file malformatted or missing, will be recreated on exit!')
elif conf["version"] != VERSION:
self.recreate = True
self.info.append(f'Config file version and bpytop version missmatch, will be recreated on exit!')
for key in self.keys:
if key in conf.keys() and conf[key] != "_error_":
setattr(self, key, conf[key])
else:
self.recreate = True
self.conf_dict[key] = getattr(self, key)
self._initialized = True
def __setattr__(self, name, value):
if self._initialized:
object.__setattr__(self, "changed", True)
object.__setattr__(self, name, value)
if name not in ["_initialized", "recreate", "changed"]:
self.conf_dict[name] = value
def load_config(self) -> Dict[str, Union[str, int, bool]]:
'''Load config from file, set correct types for values and return a dict'''
new_config: Dict[str,Union[str, int, bool]] = {}
conf_file: str = ""
if os.path.isfile(self.config_file):
conf_file = self.config_file
elif os.path.isfile("/etc/bpytop.conf"):
conf_file = "/etc/bpytop.conf"
else:
return new_config
try:
with open(conf_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("#? Config"):
new_config["version"] = line[line.find("v. ") + 3:]
continue
if not '=' in line:
continue
key, line = line.split('=', maxsplit=1)
if not key in self.keys:
continue
line = line.strip('"')
if type(getattr(self, key)) == int:
try:
new_config[key] = int(line)
except ValueError:
self.warnings.append(f'Config key "{key}" should be an integer!')
if type(getattr(self, key)) == bool:
try:
new_config[key] = bool(strtobool(line))
except ValueError:
self.warnings.append(f'Config key "{key}" can only be True or False!')
if type(getattr(self, key)) == str:
new_config[key] = str(line)
except Exception as e:
errlog.exception(str(e))
if "proc_sorting" in new_config and not new_config["proc_sorting"] in self.sorting_options:
new_config["proc_sorting"] = "_error_"
self.warnings.append(f'Config key "proc_sorted" didn\'t get an acceptable value!')
if "log_level" in new_config and not new_config["log_level"] in self.log_levels:
new_config["log_level"] = "_error_"
self.warnings.append(f'Config key "log_level" didn\'t get an acceptable value!')
if "update_ms" in new_config and int(new_config["update_ms"]) < 100:
new_config["update_ms"] = 100
self.warnings.append(f'Config key "update_ms" can\'t be lower than 100!')
for net_name in ["net_download", "net_upload"]:
if net_name in new_config and not new_config[net_name][0].isdigit(): # type: ignore
new_config[net_name] = "_error_"
if "cpu_sensor" in new_config and not new_config["cpu_sensor"] in self.cpu_sensors:
new_config["cpu_sensor"] = "_error_"
self.warnings.append(f'Config key "cpu_sensor" does not contain an available sensor!')
if "shown_boxes" in new_config and not new_config["shown_boxes"] == "":
for box in new_config["shown_boxes"].split(): #type: ignore
if not box in ["cpu", "mem", "net", "proc"]:
new_config["shown_boxes"] = "_error_"
self.warnings.append(f'Config key "shown_boxes" contains invalid box names!')
break
for cpu_graph in ["cpu_graph_upper", "cpu_graph_lower"]:
if cpu_graph in new_config and not new_config[cpu_graph] in self.cpu_percent_fields:
new_config[cpu_graph] = "_error_"
self.warnings.append(f'Config key "{cpu_graph}" does not contain an available cpu stat attribute!')
if "temp_scale" in new_config and not new_config["temp_scale"] in self.temp_scales:
new_config["temp_scale"] = "_error_"
self.warnings.append(f'Config key "temp_scale" does not contain a recognized temperature scale!')
return new_config
def save_config(self):
'''Save current config to config file if difference in values or version, creates a new file if not found'''
if not self.changed and not self.recreate: return
try:
with open(self.config_file, "w" if os.path.isfile(self.config_file) else "x") as f:
f.write(DEFAULT_CONF.substitute(self.conf_dict))
except Exception as e:
errlog.exception(str(e))
try:
CONFIG: Config = Config(CONFIG_FILE)
if DEBUG:
errlog.setLevel(logging.DEBUG)
else:
errlog.setLevel(getattr(logging, CONFIG.log_level))
DEBUG = CONFIG.log_level == "DEBUG"
errlog.info(f'New instance of bpytop version {VERSION} started with pid {os.getpid()}')
errlog.info(f'Loglevel set to {"DEBUG" if DEBUG else CONFIG.log_level}')
errlog.debug(f'Using psutil version {".".join(str(x) for x in psutil.version_info)}')
errlog.debug(f'CMD: {" ".join(sys.argv)}')
if CONFIG.info:
for info in CONFIG.info:
errlog.info(info)
CONFIG.info = []
if CONFIG.warnings:
for warning in CONFIG.warnings:
errlog.warning(warning)
CONFIG.warnings = []
except Exception as e:
errlog.exception(f'{e}')
raise SystemExit(1)
if ARG_BOXES:
_new_boxes: List = []
for _box in ARG_BOXES.split():
if _box in ["cpu", "mem", "net", "proc"]:
_new_boxes.append(_box)
CONFIG.shown_boxes = " ".join(_new_boxes)
del _box, _new_boxes
if SYSTEM == "Linux" and not os.path.isdir("/sys/class/power_supply"):
CONFIG.show_battery = False
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
warn = f'psutil version {".".join(str(x) for x in psutil.version_info)} detected, version 5.7.0 or later required for full functionality!'
print("WARNING!", warn)
errlog.warning(warn)
#? Classes --------------------------------------------------------------------------------------->
class Term:
"""Terminal info and commands"""
width: int = 0
height: int = 0
resized: bool = False
_w : int = 0
_h : int = 0
fg: str = "" #* Default foreground color
bg: str = "" #* Default background color
hide_cursor = "\033[?25l" #* Hide terminal cursor
show_cursor = "\033[?25h" #* Show terminal cursor
alt_screen = "\033[?1049h" #* Switch to alternate screen
normal_screen = "\033[?1049l" #* Switch to normal screen
clear = "\033[2J\033[0;0f" #* Clear screen and set cursor to position 0,0
mouse_on = "\033[?1002h\033[?1015h\033[?1006h" #* Enable reporting of mouse position on click and release
mouse_off = "\033[?1002l" #* Disable mouse reporting
mouse_direct_on = "\033[?1003h" #* Enable reporting of mouse position at any movement
mouse_direct_off = "\033[?1003l" #* Disable direct mouse reporting
winch = threading.Event()
old_boxes: List = []
min_width: int = 0
min_height: int = 0
@classmethod
def refresh(cls, *args, force: bool = False):
"""Update width, height and set resized flag if terminal has been resized"""
if Init.running: cls.resized = False; return
if cls.resized: cls.winch.set(); return
cls._w, cls._h = os.get_terminal_size()
if (cls._w, cls._h) == (cls.width, cls.height) and cls.old_boxes == Box.boxes and not force: return
if force: Collector.collect_interrupt = True
if cls.old_boxes != Box.boxes:
w_p = h_p = 0
cls.min_width = cls.min_height = 0
cls.old_boxes = Box.boxes.copy()
for box_class in Box.__subclasses__():
for box_name in Box.boxes:
if box_name in str(box_class).capitalize():
if not (box_name == "cpu" and "proc" in Box.boxes) and not (box_name == "net" and "mem" in Box.boxes) and w_p + box_class.width_p <= 100:
w_p += box_class.width_p
cls.min_width += getattr(box_class, "min_w", 0)
if not (box_name in ["mem", "net"] and "proc" in Box.boxes) and h_p + box_class.height_p <= 100:
h_p += box_class.height_p
cls.min_height += getattr(box_class, "min_h", 0)
while (cls._w, cls._h) != (cls.width, cls.height) or (cls._w < cls.min_width or cls._h < cls.min_height):
if Init.running: Init.resized = True
CpuBox.clock_block = True
cls.resized = True
Collector.collect_interrupt = True
cls.width, cls.height = cls._w, cls._h
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, 50, 3, "resizing", line_color=Colors.green, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width : {cls._w} Height: {cls._h}{Fx.ub}{Term.bg}{Term.fg}')
if cls._w < 80 or cls._h < 24:
while cls._w < cls.min_width or cls._h < cls.min_height:
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, box_width, 4, "warning", line_color=Colors.red, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width: {Colors.red if cls._w < cls.min_width else Colors.green}{cls._w} ',
f'{Colors.default}Height: {Colors.red if cls._h < cls.min_height else Colors.green}{cls._h}{Term.bg}{Term.fg}',
f'{Mv.d(1)}{Mv.l(25)}{Colors.default}{Colors.black_bg}Current config need: {cls.min_width} x {cls.min_height}{Fx.ub}{Term.bg}{Term.fg}')
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
else:
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
Key.mouse = {}
Box.calc_sizes()
Collector.proc_counter = 1
if Menu.active: Menu.resized = True
Box.draw_bg(now=False)
cls.resized = False
Timer.finish()
@staticmethod
def echo(on: bool):
"""Toggle input echo"""
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(sys.stdin.fileno())
if on:
lflag |= termios.ECHO # type: ignore
else:
lflag &= ~termios.ECHO # type: ignore
new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, new_attr)
@staticmethod
def title(text: str = "") -> str:
out: str = f'{os.environ.get("TERMINAL_TITLE", "")}'
if out and text: out += " "
if text: out += f'{text}'
return f'\033]0;{out}\a'
class Fx:
"""Text effects
* trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace.
* uncolor(string: str) : Removes all 24-bit color and returns string ."""
start = "\033[" #* Escape sequence start
sep = ";" #* Escape sequence separator
end = "m" #* Escape sequence end
reset = rs = "\033[0m" #* Reset foreground/background color and text effects
bold = b = "\033[1m" #* Bold on
unbold = ub = "\033[22m" #* Bold off
dark = d = "\033[2m" #* Dark on
undark = ud = "\033[22m" #* Dark off
italic = i = "\033[3m" #* Italic on
unitalic = ui = "\033[23m" #* Italic off
underline = u = "\033[4m" #* Underline on
ununderline = uu = "\033[24m" #* Underline off
blink = bl = "\033[5m" #* Blink on
unblink = ubl = "\033[25m" #* Blink off
strike = s = "\033[9m" #* Strike / crossed-out on
unstrike = us = "\033[29m" #* Strike / crossed-out off
#* Precompiled regex for finding a 24-bit color escape sequence in a string
color_re = re.compile(r"\033\[\d+;\d?;?\d*;?\d*;?\d*m")
@staticmethod
def trans(string: str):
return string.replace(" ", "\033[1C")
@classmethod
def uncolor(cls, string: str) -> str:
return f'{cls.color_re.sub("", string)}'
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Mv:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
return f'\033[{line};{col}f' #* Move cursor to line, column
@staticmethod
def right(x: int) -> str: #* Move cursor right x columns
return f'\033[{x}C'
@staticmethod
def left(x: int) -> str: #* Move cursor left x columns
return f'\033[{x}D'
@staticmethod
def up(x: int) -> str: #* Move cursor up x lines
return f'\033[{x}A'
@staticmethod
def down(x: int) -> str: #* Move cursor down x lines
return f'\033[{x}B'
save: str = "\033[s" #* Save cursor position
restore: str = "\033[u" #* Restore saved cursor postion
t = to
r = right
l = left
u = up
d = down
class Key:
"""Handles the threaded input reader for keypresses and mouse events"""
list: List[str] = []
mouse: Dict[str, List[List[int]]] = {}
mouse_pos: Tuple[int, int] = (0, 0)
escape: Dict[Union[str, Tuple[str, str]], str] = {
"\n" : "enter",
("\x7f", "\x08") : "backspace",
("[A", "OA") : "up",
("[B", "OB") : "down",
("[D", "OD") : "left",
("[C", "OC") : "right",
"[2~" : "insert",
"[3~" : "delete",
"[H" : "home",
"[F" : "end",
"[5~" : "page_up",
"[6~" : "page_down",
"\t" : "tab",
"[Z" : "shift_tab",
"OP" : "f1",
"OQ" : "f2",
"OR" : "f3",
"OS" : "f4",
"[15" : "f5",
"[17" : "f6",
"[18" : "f7",
"[19" : "f8",
"[20" : "f9",
"[21" : "f10",
"[23" : "f11",
"[24" : "f12"
}
new = threading.Event()
idle = threading.Event()
mouse_move = threading.Event()
mouse_report: bool = False
idle.set()
stopping: bool = False
started: bool = False
reader: threading.Thread
@classmethod
def start(cls):
cls.stopping = False
cls.reader = threading.Thread(target=cls._get_key)
cls.reader.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.reader.is_alive():
cls.stopping = True
try:
cls.reader.join()
except:
pass
@classmethod
def last(cls) -> str:
if cls.list: return cls.list.pop()
else: return ""
@classmethod
def get(cls) -> str:
if cls.list: return cls.list.pop(0)
else: return ""
@classmethod
def get_mouse(cls) -> Tuple[int, int]:
if cls.new.is_set():
cls.new.clear()
return cls.mouse_pos
@classmethod
def mouse_moved(cls) -> bool:
if cls.mouse_move.is_set():
cls.mouse_move.clear()
return True
else:
return False
@classmethod
def has_key(cls) -> bool:
return bool(cls.list)
@classmethod
def clear(cls):
cls.list = []
@classmethod
def input_wait(cls, sec: float = 0.0, mouse: bool = False) -> bool:
'''Returns True if key is detected else waits out timer and returns False'''
if cls.list: return True
if mouse: Draw.now(Term.mouse_direct_on)
cls.new.wait(sec if sec > 0 else 0.0)
if mouse: Draw.now(Term.mouse_direct_off, Term.mouse_on)
if cls.new.is_set():
cls.new.clear()
return True
else:
return False
@classmethod
def break_wait(cls):
cls.list.append("_null")
cls.new.set()
sleep(0.01)
cls.new.clear()
@classmethod
def _get_key(cls):
"""Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread."""
input_key: str = ""
clean_key: str = ""
try:
while not cls.stopping:
with Raw(sys.stdin):
if not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag
continue
input_key += sys.stdin.read(1) #* Read 1 key safely with blocking on
if input_key == "\033": #* If first character is a escape sequence keep reading
cls.idle.clear() #* Report IO block in progress to prevent Draw functions from getting a IO Block error
Draw.idle.wait() #* Wait for Draw function to finish if busy
with Nonblocking(sys.stdin): #* Set non blocking to prevent read stall
input_key += sys.stdin.read(20)
if input_key.startswith("\033[<"):
_ = sys.stdin.read(1000)
cls.idle.set() #* Report IO blocking done
#errlog.debug(f'{repr(input_key)}')
if input_key == "\033": clean_key = "escape" #* Key is "escape" key if only containing \033
elif input_key.startswith(("\033[<0;", "\033[<35;", "\033[<64;", "\033[<65;")): #* Detected mouse event
try:
cls.mouse_pos = (int(input_key.split(";")[1]), int(input_key.split(";")[2].rstrip("mM")))
except:
pass
else:
if input_key.startswith("\033[<35;"): #* Detected mouse move in mouse direct mode
cls.mouse_move.set()
cls.new.set()
elif input_key.startswith("\033[<64;"): #* Detected mouse scroll up
clean_key = "mouse_scroll_up"
elif input_key.startswith("\033[<65;"): #* Detected mouse scroll down
clean_key = "mouse_scroll_down"
elif input_key.startswith("\033[<0;") and input_key.endswith("m"): #* Detected mouse click release
if Menu.active:
clean_key = "mouse_click"
else:
for key_name, positions in cls.mouse.items(): #* Check if mouse position is clickable
if list(cls.mouse_pos) in positions:
clean_key = key_name
break
else:
clean_key = "mouse_click"
elif input_key == "\\": clean_key = "\\" #* Clean up "\" to not return escaped
else:
for code in cls.escape.keys(): #* Go trough dict of escape codes to get the cleaned key name
if input_key.lstrip("\033").startswith(code):
clean_key = cls.escape[code]
break
else: #* If not found in escape dict and length of key is 1, assume regular character
if len(input_key) == 1:
clean_key = input_key
if clean_key:
cls.list.append(clean_key) #* Store up to 10 keys in input queue for later processing
if len(cls.list) > 10: del cls.list[0]
clean_key = ""
cls.new.set() #* Set threading event to interrupt main thread sleep
input_key = ""
except Exception as e:
errlog.exception(f'Input thread failed with exception: {e}')
cls.idle.set()
cls.list.clear()
clean_quit(1, thread=True)
class Draw:
'''Holds the draw buffer and manages IO blocking queue
* .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer
* - Adding "+" prefix to name sets append to True and appends to name's current string
* - Adding "!" suffix to name sets now to True and print name's current string
* .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after
* .now(*args) : Prints all arguments as a string
* .clear(*names) : Clear named buffers, all if no argument
* .last_screen() : Prints all saved buffers
'''
strings: Dict[str, str] = {}
z_order: Dict[str, int] = {}
saved: Dict[str, str] = {}
save: Dict[str, bool] = {}
once: Dict[str, bool] = {}
idle = threading.Event()
idle.set()
@classmethod
def now(cls, *args):
'''Wait for input reader and self to be idle then print to screen'''
Key.idle.wait()
cls.idle.wait()
cls.idle.clear()
try:
print(*args, sep="", end="", flush=True)
except BlockingIOError:
pass
Key.idle.wait()
print(*args, sep="", end="", flush=True)
cls.idle.set()
@classmethod
def buffer(cls, name: str, *args: str, append: bool = False, now: bool = False, z: int = 100, only_save: bool = False, no_save: bool = False, once: bool = False):
string: str = ""
if name.startswith("+"):
name = name.lstrip("+")
append = True
if name.endswith("!"):
name = name.rstrip("!")
now = True
cls.save[name] = not no_save
cls.once[name] = once
if not name in cls.z_order or z != 100: cls.z_order[name] = z
if args: string = "".join(args)
if only_save:
if name not in cls.saved or not append: cls.saved[name] = ""
cls.saved[name] += string
else:
if name not in cls.strings or not append: cls.strings[name] = ""
cls.strings[name] += string
if now:
cls.out(name)
@classmethod
def out(cls, *names: str, clear = False):
out: str = ""
if not cls.strings: return
if names:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in names and name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if clear or cls.once[name]:
cls.clear(name)
cls.now(out)
else:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if cls.once[name] and not clear:
cls.clear(name)
if clear:
cls.clear()
cls.now(out)
@classmethod
def saved_buffer(cls) -> str:
out: str = ""
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.saved:
out += cls.saved[name]
return out
@classmethod
def clear(cls, *names, saved: bool = False):
if names:
for name in names:
if name in cls.strings:
del cls.strings[name]
if name in cls.save:
del cls.save[name]
if name in cls.once:
del cls.once[name]
if saved:
if name in cls.saved:
del cls.saved[name]
if name in cls.z_order:
del cls.z_order[name]
else:
cls.strings = {}
cls.save = {}
cls.once = {}
if saved:
cls.saved = {}
cls.z_order = {}
class Color:
'''Holds representations for a 24-bit color value
__init__(color, depth="fg", default=False)
-- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string.
-- depth accepts "fg" or "bg"
__call__(*args) joins str arguments to a string and apply color
__str__ returns escape sequence to set color
__iter__ returns iteration over red, green and blue in integer values of 0-255.
* Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str
'''
hexa: str; dec: Tuple[int, int, int]; red: int; green: int; blue: int; depth: str; escape: str; default: bool
def __init__(self, color: str, depth: str = "fg", default: bool = False):
self.depth = depth
self.default = default
try:
if not color:
self.dec = (-1, -1, -1)
self.hexa = ""
self.red = self.green = self.blue = -1
self.escape = "\033[49m" if depth == "bg" and default else ""
return
elif color.startswith("#"):
self.hexa = color
if len(self.hexa) == 3:
self.hexa += self.hexa[1:3] + self.hexa[1:3]
c = int(self.hexa[1:3], base=16)
self.dec = (c, c, c)
elif len(self.hexa) == 7:
self.dec = (int(self.hexa[1:3], base=16), int(self.hexa[3:5], base=16), int(self.hexa[5:7], base=16))
else:
raise ValueError(f'Incorrectly formatted hexadecimal rgb string: {self.hexa}')
else:
c_t = tuple(map(int, color.split(" ")))
if len(c_t) == 3:
self.dec = c_t #type: ignore
else:
raise ValueError(f'RGB dec should be "0-255 0-255 0-255"')
ct = self.dec[0] + self.dec[1] + self.dec[2]
if ct > 255*3 or ct < 0:
raise ValueError(f'RGB values out of range: {color}')
except Exception as e:
errlog.exception(str(e))
self.escape = ""
return
if self.dec and not self.hexa: self.hexa = f'{hex(self.dec[0]).lstrip("0x").zfill(2)}{hex(self.dec[1]).lstrip("0x").zfill(2)}{hex(self.dec[2]).lstrip("0x").zfill(2)}'
if self.dec and self.hexa:
self.red, self.green, self.blue = self.dec
self.escape = f'\033[{38 if self.depth == "fg" else 48};2;{";".join(str(c) for c in self.dec)}m'
if not CONFIG.truecolor or LOW_COLOR:
self.escape = f'{self.truecolor_to_256(rgb=self.dec, depth=self.depth)}'
def __str__(self) -> str:
return self.escape
def __repr__(self) -> str:
return repr(self.escape)
def __iter__(self) -> Iterable:
for c in self.dec: yield c
def __call__(self, *args: str) -> str:
if len(args) < 1: return ""
return f'{self.escape}{"".join(args)}{getattr(Term, self.depth)}'
@staticmethod
def truecolor_to_256(rgb: Tuple[int, int, int], depth: str="fg") -> str:
out: str = ""
pre: str = f'\033[{"38" if depth == "fg" else "48"};5;'
greyscale: Tuple[int, int, int] = ( rgb[0] // 11, rgb[1] // 11, rgb[2] // 11 )
if greyscale[0] == greyscale[1] == greyscale[2]:
out = f'{pre}{232 + greyscale[0]}m'
else:
out = f'{pre}{round(rgb[0] / 51) * 36 + round(rgb[1] / 51) * 6 + round(rgb[2] / 51) + 16}m'
return out
@staticmethod
def escape_color(hexa: str = "", r: int = 0, g: int = 0, b: int = 0, depth: str = "fg") -> str:
"""Returns escape sequence to set color
* accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF"
* or decimal RGB: r=0-255, g=0-255, b=0-255
* depth="fg" or "bg"
"""
dint: int = 38 if depth == "fg" else 48
color: str = ""
if hexa:
try:
if len(hexa) == 3:
c = int(hexa[1:], base=16)
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{c};{c};{c}m'
else:
color = f'{Color.truecolor_to_256(rgb=(c, c, c), depth=depth)}'
elif len(hexa) == 7:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{int(hexa[1:3], base=16)};{int(hexa[3:5], base=16)};{int(hexa[5:7], base=16)}m'
else:
color = f'{Color.truecolor_to_256(rgb=(int(hexa[1:3], base=16), int(hexa[3:5], base=16), int(hexa[5:7], base=16)), depth=depth)}'
except ValueError as e:
errlog.exception(f'{e}')
else:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{r};{g};{b}m'
else:
color = f'{Color.truecolor_to_256(rgb=(r, g, b), depth=depth)}'
return color
@classmethod
def fg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="fg")
else: return cls.escape_color(hexa=args[0], depth="fg")
@classmethod
def bg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="bg")
else: return cls.escape_color(hexa=args[0], depth="bg")
class Colors:
'''Standard colors for menus and dialogs'''
default = Color("#cc")
white = Color("#ff")
red = Color("#bf3636")
green = Color("#68bf36")
blue = Color("#0fd7ff")
yellow = Color("#db8b00")
black_bg = Color("#00", depth="bg")
null = Color("")
class Theme:
'''__init__ accepts a dict containing { "color_element" : "color" }'''
themes: Dict[str, str] = {}
cached: Dict[str, Dict[str, str]] = { "Default" : DEFAULT_THEME }
current: str = ""
main_bg = main_fg = title = hi_fg = selected_bg = selected_fg = inactive_fg = proc_misc = cpu_box = mem_box = net_box = proc_box = div_line = temp_start = temp_mid = temp_end = cpu_start = cpu_mid = cpu_end = free_start = free_mid = free_end = cached_start = cached_mid = cached_end = available_start = available_mid = available_end = used_start = used_mid = used_end = download_start = download_mid = download_end = upload_start = upload_mid = upload_end = graph_text = meter_bg = process_start = process_mid = process_end = Colors.default
gradient: Dict[str, List[str]] = {
"temp" : [],
"cpu" : [],
"free" : [],
"cached" : [],
"available" : [],
"used" : [],
"download" : [],
"upload" : [],
"proc" : [],
"proc_color" : [],
"process" : [],
}
def __init__(self, theme: str):
self.refresh()
self._load_theme(theme)
def __call__(self, theme: str):
for k in self.gradient.keys(): self.gradient[k] = []
self._load_theme(theme)
def _load_theme(self, theme: str):
tdict: Dict[str, str]
if theme in self.cached:
tdict = self.cached[theme]
elif theme in self.themes:
tdict = self._load_file(self.themes[theme])
self.cached[theme] = tdict
else:
errlog.warning(f'No theme named "{theme}" found!')
theme = "Default"
CONFIG.color_theme = theme
tdict = DEFAULT_THEME
self.current = theme
#if CONFIG.color_theme != theme: CONFIG.color_theme = theme
if not "graph_text" in tdict and "inactive_fg" in tdict:
tdict["graph_text"] = tdict["inactive_fg"]
if not "meter_bg" in tdict and "inactive_fg" in tdict:
tdict["meter_bg"] = tdict["inactive_fg"]
if not "process_start" in tdict and "cpu_start" in tdict:
tdict["process_start"] = tdict["cpu_start"]
tdict["process_mid"] = tdict.get("cpu_mid", "")
tdict["process_end"] = tdict.get("cpu_end", "")
#* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict
for item, value in DEFAULT_THEME.items():
default = item in ["main_fg", "main_bg"]
depth = "bg" if item in ["main_bg", "selected_bg"] else "fg"
if item in tdict:
setattr(self, item, Color(tdict[item], depth=depth, default=default))
else:
setattr(self, item, Color(value, depth=depth, default=default))
#* Create color gradients from one, two or three colors, 101 values indexed 0-100
self.proc_start, self.proc_mid, self.proc_end = self.main_fg, Colors.null, self.inactive_fg
self.proc_color_start, self.proc_color_mid, self.proc_color_end = self.inactive_fg, Colors.null, self.process_start
rgb: Dict[str, Tuple[int, int, int]]
colors: List[List[int]] = []
for name in self.gradient:
rgb = { "start" : getattr(self, f'{name}_start').dec, "mid" : getattr(self, f'{name}_mid').dec, "end" : getattr(self, f'{name}_end').dec }
colors = [ list(getattr(self, f'{name}_start')) ]
if rgb["end"][0] >= 0:
r = 50 if rgb["mid"][0] >= 0 else 100
for first, second in ["start", "mid" if r == 50 else "end"], ["mid", "end"]:
for i in range(r):
colors += [[rgb[first][n] + i * (rgb[second][n] - rgb[first][n]) // r for n in range(3)]]
if r == 100:
break
self.gradient[name] += [ Color.fg(*color) for color in colors ]
else:
c = Color.fg(*rgb["start"])
self.gradient[name] += [c] * 101
#* Set terminal colors
Term.fg = f'{self.main_fg}'
Term.bg = f'{self.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(self.main_fg, self.main_bg)
@classmethod
def refresh(cls):
'''Sets themes dict with names and paths to all found themes'''
cls.themes = { "Default" : "Default" }
try:
for d in (THEME_DIR, USER_THEME_DIR):
if not d: continue
for f in os.listdir(d):
if f.endswith(".theme"):
cls.themes[f'{"" if d == THEME_DIR else "+"}{f[:-6]}'] = f'{d}/{f}'
except Exception as e:
errlog.exception(str(e))
@staticmethod
def _load_file(path: str) -> Dict[str, str]:
'''Load a bashtop formatted theme file and return a dict'''
new_theme: Dict[str, str] = {}
try:
with open(path, "r") as f:
for line in f:
if not line.startswith("theme["): continue
key = line[6:line.find("]")]
s = line.find('"')
value = line[s + 1:line.find('"', s + 1)]
new_theme[key] = value
except Exception as e:
errlog.exception(str(e))
return new_theme
class Banner:
'''Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False])'''
out: List[str] = []
c_color: str = ""
length: int = 0
if not out:
for num, (color, color2, line) in enumerate(BANNER_SRC):
if len(line) > length: length = len(line)
out_var = ""
line_color = Color.fg(color)
line_color2 = Color.fg(color2)
line_dark = Color.fg(f'#{80 - num * 6}')
for n, letter in enumerate(line):
if letter == "█" and c_color != line_color:
if 5 < n < 25: c_color = line_color2
else: c_color = line_color
out_var += c_color
elif letter == " ":
letter = f'{Mv.r(1)}'
c_color = ""
elif letter != "█" and c_color != line_dark:
c_color = line_dark
out_var += line_dark
out_var += letter
out.append(out_var)
@classmethod
def draw(cls, line: int, col: int = 0, center: bool = False, now: bool = False):
out: str = ""
if center: col = Term.width // 2 - cls.length // 2
for n, o in enumerate(cls.out):
out += f'{Mv.to(line + n, col)}{o}'
out += f'{Term.fg}'
if now: Draw.out(out)
else: return out
class Symbol:
h_line: str = "─"
v_line: str = "│"
left_up: str = "┌"
right_up: str = "┐"
left_down: str = "└"
right_down: str = "┘"
title_left: str = "┤"
title_right: str = "├"
div_up: str = "┬"
div_down: str = "┴"
graph_up: Dict[float, str] = {
0.0 : " ", 0.1 : "⢀", 0.2 : "⢠", 0.3 : "⢰", 0.4 : "⢸",
1.0 : "⡀", 1.1 : "⣀", 1.2 : "⣠", 1.3 : "⣰", 1.4 : "⣸",
2.0 : "⡄", 2.1 : "⣄", 2.2 : "⣤", 2.3 : "⣴", 2.4 : "⣼",
3.0 : "⡆", 3.1 : "⣆", 3.2 : "⣦", 3.3 : "⣶", 3.4 : "⣾",
4.0 : "⡇", 4.1 : "⣇", 4.2 : "⣧", 4.3 : "⣷", 4.4 : "⣿"
}
graph_up_small = graph_up.copy()
graph_up_small[0.0] = "\033[1C"
graph_down: Dict[float, str] = {
0.0 : " ", 0.1 : "⠈", 0.2 : "⠘", 0.3 : "⠸", 0.4 : "⢸",
1.0 : "⠁", 1.1 : "⠉", 1.2 : "⠙", 1.3 : "⠹", 1.4 : "⢹",
2.0 : "⠃", 2.1 : "⠋", 2.2 : "⠛", 2.3 : "⠻", 2.4 : "⢻",
3.0 : "⠇", 3.1 : "⠏", 3.2 : "⠟", 3.3 : "⠿", 3.4 : "⢿",
4.0 : "⡇", 4.1 : "⡏", 4.2 : "⡟", 4.3 : "⡿", 4.4 : "⣿"
}
graph_down_small = graph_down.copy()
graph_down_small[0.0] = "\033[1C"
meter: str = "■"
up: str = "↑"
down: str = "↓"
left: str = "←"
right: str = "→"
enter: str = "↲"
ok: str = f'{Color.fg("#30ff50")}√{Color.fg("#cc")}'
fail: str = f'{Color.fg("#ff3050")}!{Color.fg("#cc")}'
class Graph:
'''Class for creating and adding to graphs
* __str__ : returns graph as a string
* add(value: int) : adds a value to graph and returns it as a string
* __call__ : same as add
'''
out: str
width: int
height: int
graphs: Dict[bool, List[str]]
colors: List[str]
invert: bool
max_value: int
color_max_value: int
offset: int
no_zero: bool
round_up_low: bool
current: bool
last: int
lowest: int = 0
symbol: Dict[float, str]
def __init__(self, width: int, height: int, color: Union[List[str], Color, None], data: List[int], invert: bool = False, max_value: int = 0, offset: int = 0, color_max_value: Union[int, None] = None, no_zero: bool = False, round_up_low: bool = False):
self.graphs: Dict[bool, List[str]] = {False : [], True : []}
self.current: bool = True
self.width = width
self.height = height
self.invert = invert
self.offset = offset
self.round_up_low = round_up_low
self.no_zero = no_zero or round_up_low
if not data: data = [0]
if max_value:
self.lowest = 1 if self.round_up_low else 0
self.max_value = max_value
data = [ min_max((v + offset) * 100 // (max_value + offset), min_max(v + offset, 0, self.lowest), 100) for v in data ] #* Convert values to percentage values of max_value with max_value as ceiling
else:
self.max_value = 0
if color_max_value:
self.color_max_value = color_max_value
else:
self.color_max_value = self.max_value
if self.color_max_value and self.max_value:
color_scale = int(100.0 * self.max_value / self.color_max_value)
else:
color_scale = 100
self.colors: List[str] = []
if isinstance(color, list) and height > 1:
for i in range(1, height + 1): self.colors.insert(0, color[min(100, i * color_scale // height)]) #* Calculate colors of graph
if invert: self.colors.reverse()
elif isinstance(color, Color) and height > 1:
self.colors = [ f'{color}' for _ in range(height) ]
else:
if isinstance(color, list): self.colors = color
elif isinstance(color, Color): self.colors = [ f'{color}' for _ in range(101) ]
if self.height == 1:
self.symbol = Symbol.graph_down_small if invert else Symbol.graph_up_small
else:
self.symbol = Symbol.graph_down if invert else Symbol.graph_up
value_width: int = ceil(len(data) / 2)
filler: str = ""
if value_width > width: #* If the size of given data set is bigger then width of graph, shrink data set
data = data[-(width*2):]
value_width = ceil(len(data) / 2)
elif value_width < width: #* If the size of given data set is smaller then width of graph, fill graph with whitespace
filler = self.symbol[0.0] * (width - value_width)
if len(data) % 2: data.insert(0, 0)
for _ in range(height):
for b in [True, False]:
self.graphs[b].append(filler)
self._create(data, new=True)
def _create(self, data: List[int], new: bool = False):
h_high: int
h_low: int
value: Dict[str, int] = { "left" : 0, "right" : 0 }
val: int
side: str
#* Create the graph
for h in range(self.height):
h_high = round(100 * (self.height - h) / self.height) if self.height > 1 else 100
h_low = round(100 * (self.height - (h + 1)) / self.height) if self.height > 1 else 0
for v in range(len(data)):
if new: self.current = bool(v % 2) #* Switch between True and False graphs
if new and v == 0: self.last = 0
for val, side in [self.last, "left"], [data[v], "right"]: # type: ignore
if val >= h_high:
value[side] = 4
elif val <= h_low:
value[side] = 0
else:
if self.height == 1: value[side] = round(val * 4 / 100 + 0.5)
else: value[side] = round((val - h_low) * 4 / (h_high - h_low) + 0.1)
if self.no_zero and not (new and v == 0 and side == "left") and h == self.height - 1 and value[side] < 1 and not (self.round_up_low and val == 0): value[side] = 1
if new: self.last = data[v]
self.graphs[self.current][h] += self.symbol[float(value["left"] + value["right"] / 10)]
if data: self.last = data[-1]
self.out = ""
if self.height == 1:
self.out += f'{"" if not self.colors else (THEME.inactive_fg if self.last < 5 else self.colors[self.last])}{self.graphs[self.current][0]}'
elif self.height > 1:
for h in range(self.height):
if h > 0: self.out += f'{Mv.d(1)}{Mv.l(self.width)}'
self.out += f'{"" if not self.colors else self.colors[h]}{self.graphs[self.current][h if not self.invert else (self.height - 1) - h]}'
if self.colors: self.out += f'{Term.fg}'
def __call__(self, value: Union[int, None] = None) -> str:
if not isinstance(value, int): return self.out
self.current = not self.current
if self.height == 1:
if self.graphs[self.current][0].startswith(self.symbol[0.0]):
self.graphs[self.current][0] = self.graphs[self.current][0].replace(self.symbol[0.0], "", 1)
else:
self.graphs[self.current][0] = self.graphs[self.current][0][1:]
else:
for n in range(self.height):
self.graphs[self.current][n] = self.graphs[self.current][n][1:]
if self.max_value: value = min_max((value + self.offset) * 100 // (self.max_value + self.offset), min_max(value + self.offset, 0, self.lowest), 100)
self._create([value])
return self.out
def add(self, value: Union[int, None] = None) -> str:
return self.__call__(value)
def __str__(self):
return self.out
def __repr__(self):
return repr(self.out)
class Graphs:
'''Holds all graphs and lists of graphs for dynamically created graphs'''
cpu: Dict[str, Graph] = {}
cores: List[Graph] = [NotImplemented] * THREADS
temps: List[Graph] = [NotImplemented] * (THREADS + 1)
net: Dict[str, Graph] = {}
detailed_cpu: Graph = NotImplemented
detailed_mem: Graph = NotImplemented
pid_cpu: Dict[int, Graph] = {}
disk_io: Dict[str, Dict[str, Graph]] = {}
class Meter:
'''Creates a percentage meter
__init__(value, width, theme, gradient_name) to create new meter
__call__(value) to set value and return meter as a string
__str__ returns last set meter as a string
'''
out: str
color_gradient: List[str]
color_inactive: Color
gradient_name: str
width: int
invert: bool
saved: Dict[int, str]
def __init__(self, value: int, width: int, gradient_name: str, invert: bool = False):
self.gradient_name = gradient_name
self.color_gradient = THEME.gradient[gradient_name]
self.color_inactive = THEME.meter_bg
self.width = width
self.saved = {}
self.invert = invert
self.out = self._create(value)
def __call__(self, value: Union[int, None]) -> str:
if not isinstance(value, int): return self.out
if value > 100: value = 100
elif value < 0: value = 100
if value in self.saved:
self.out = self.saved[value]
else:
self.out = self._create(value)
return self.out
def __str__(self) -> str:
return self.out
def __repr__(self):
return repr(self.out)
def _create(self, value: int) -> str:
if value > 100: value = 100
elif value < 0: value = 100
out: str = ""
for i in range(1, self.width + 1):
if value >= round(i * 100 / self.width):
out += f'{self.color_gradient[round(i * 100 / self.width) if not self.invert else round(100 - (i * 100 / self.width))]}{Symbol.meter}'
else:
out += self.color_inactive(Symbol.meter * (self.width + 1 - i))
break
else:
out += f'{Term.fg}'
if not value in self.saved:
self.saved[value] = out
return out
class Meters:
cpu: Meter
battery: Meter
mem: Dict[str, Union[Meter, Graph]] = {}
swap: Dict[str, Union[Meter, Graph]] = {}
disks_used: Dict[str, Meter] = {}
disks_free: Dict[str, Meter] = {}
class Box:
'''Box class with all needed attributes for create_box() function'''
name: str
num: int = 0
boxes: List = []
view_modes: Dict[str, List] = {"full" : ["cpu", "mem", "net", "proc"], "stat" : ["cpu", "mem", "net"], "proc" : ["cpu", "proc"]}
view_mode: str
for view_mode in view_modes:
if sorted(CONFIG.shown_boxes.split(), key=str.lower) == view_modes[view_mode]:
break
else:
view_mode = "user"
view_modes["user"] = CONFIG.shown_boxes.split()
height_p: int
width_p: int
x: int
y: int
width: int
height: int
out: str
bg: str
_b_cpu_h: int
_b_mem_h: int
redraw_all: bool
buffers: List[str] = []
clock_on: bool = False
clock: str = ""
clock_len: int = 0
resized: bool = False
clock_custom_format: Dict[str, Any] = {
"/host" : os.uname()[1],
"/user" : os.environ.get("USER") or pwd.getpwuid(os.getuid())[0],
"/uptime" : "",
}
if clock_custom_format["/host"].endswith(".local"):
clock_custom_format["/host"] = clock_custom_format["/host"].replace(".local", "")
@classmethod
def calc_sizes(cls):
'''Calculate sizes of boxes'''
cls.boxes = CONFIG.shown_boxes.split()
for sub in cls.__subclasses__():
sub._calc_size() # type: ignore
sub.resized = True # type: ignore
@classmethod
def draw_update_ms(cls, now: bool = True):
if not "cpu" in cls.boxes: return
update_string: str = f'{CONFIG.update_ms}ms'
xpos: int = CpuBox.x + CpuBox.width - len(update_string) - 15
if not "+" in Key.mouse:
Key.mouse["+"] = [[xpos + 7 + i, CpuBox.y] for i in range(3)]
Key.mouse["-"] = [[CpuBox.x + CpuBox.width - 4 + i, CpuBox.y] for i in range(3)]
Draw.buffer("update_ms!" if now and not Menu.active else "update_ms",
f'{Mv.to(CpuBox.y, xpos)}{THEME.cpu_box(Symbol.h_line * 7, Symbol.title_left)}{Fx.b}{THEME.hi_fg("+")} ',
f'{THEME.title(update_string)} {THEME.hi_fg("-")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}', only_save=Menu.active, once=True)
if now and not Menu.active:
Draw.clear("update_ms")
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def draw_clock(cls, force: bool = False):
if not "cpu" in cls.boxes or not cls.clock_on: return
out: str = ""
if force: pass
elif Term.resized or strftime(CONFIG.draw_clock) == cls.clock: return
clock_string = cls.clock = strftime(CONFIG.draw_clock)
for custom in cls.clock_custom_format:
if custom in clock_string:
if custom == "/uptime": cls.clock_custom_format["/uptime"] = CpuCollector.uptime
clock_string = clock_string.replace(custom, cls.clock_custom_format[custom])
clock_len = len(clock_string[:(CpuBox.width-56)])
if cls.clock_len != clock_len and not CpuBox.resized:
out = f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(cls.clock_len//2))}{Fx.ub}{THEME.cpu_box}{Symbol.h_line * cls.clock_len}'
cls.clock_len = clock_len
now: bool = False if Menu.active else not force
out += (f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(clock_len//2))}{Fx.ub}{THEME.cpu_box}'
f'{Symbol.title_left}{Fx.b}{THEME.title(clock_string[:clock_len])}{Fx.ub}{THEME.cpu_box}{Symbol.title_right}{Term.fg}')
Draw.buffer("clock", out, z=1, now=now, once=not force, only_save=Menu.active)
if now and not Menu.active:
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def empty_bg(cls) -> str:
return (f'{Term.clear}' +
(f'{Banner.draw(Term.height // 2 - 10, center=True)}'
f'{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}[esc] Menu'
f'{Mv.r(25)}{Fx.i}Version: {VERSION}{Fx.ui}' if Term.height > 22 else "") +
f'{Mv.d(1)}{Mv.l(34)}{Fx.b}All boxes hidden!'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[1] {Fx.ub}Toggle CPU box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[2] {Fx.ub}Toggle MEM box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[3] {Fx.ub}Toggle NET box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[4] {Fx.ub}Toggle PROC box'
f'{Mv.d(1)}{Mv.l(19)}{Fx.b}[m] {Fx.ub}Cycle presets'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[q] Quit {Fx.ub}{Term.bg}{Term.fg}')
@classmethod
def draw_bg(cls, now: bool = True):
'''Draw all boxes outlines and titles'''
out: str = ""
if not cls.boxes:
out = cls.empty_bg()
else:
out = "".join(sub._draw_bg() for sub in cls.__subclasses__()) # type: ignore
Draw.buffer("bg", out, now=now, z=1000, only_save=Menu.active, once=True)
cls.draw_update_ms(now=now)
if CONFIG.draw_clock: cls.draw_clock(force=True)
class SubBox:
box_x: int = 0
box_y: int = 0
box_width: int = 0
box_height: int = 0
box_columns: int = 0
column_size: int = 0
class CpuBox(Box, SubBox):
name = "cpu"
num = 1
x = 1
y = 1
height_p = 32
width_p = 100
min_w: int = 60
min_h: int = 8
resized: bool = True
redraw: bool = False
buffer: str = "cpu"
battery_percent: int = 1000
battery_secs: int = 0
battery_status: str = "Unknown"
old_battery_pos = 0
old_battery_len = 0
battery_path: Union[str, None] = ""
battery_clear: bool = False
battery_symbols: Dict[str, str] = {"Charging": "▲",
"Discharging": "▼",
"Full": "■",
"Not charging": "■"}
clock_block: bool = True
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "cpu" in cls.boxes:
Box._b_cpu_h = 0
cls.width = Term.width
return
cpu = CpuCollector
height_p: int
if cls.boxes == ["cpu"]:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * cls.width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height < 8: cls.height = 8
Box._b_cpu_h = cls.height
#THREADS = 64
cls.box_columns = ceil((THREADS + 1) / (cls.height - 5))
if cls.box_columns * (20 + 13 if cpu.got_sensors else 21) < cls.width - (cls.width // 3):
cls.column_size = 2
cls.box_width = (20 + 13 if cpu.got_sensors else 21) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (15 + 6 if cpu.got_sensors else 15) < cls.width - (cls.width // 3):
cls.column_size = 1
cls.box_width = (15 + 6 if cpu.got_sensors else 15) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (8 + 6 if cpu.got_sensors else 8) < cls.width - (cls.width // 3):
cls.column_size = 0
else:
cls.box_columns = (cls.width - cls.width // 3) // (8 + 6 if cpu.got_sensors else 8); cls.column_size = 0
if cls.column_size == 0: cls.box_width = (8 + 6 if cpu.got_sensors else 8) * cls.box_columns + 1
cls.box_height = ceil(THREADS / cls.box_columns) + 4
if cls.box_height > cls.height - 2: cls.box_height = cls.height - 2
cls.box_x = (cls.width - 1) - cls.box_width
cls.box_y = cls.y + ceil((cls.height - 2) / 2) - ceil(cls.box_height / 2) + 1
@classmethod
def _draw_bg(cls) -> str:
if not "cpu" in cls.boxes: return ""
if not "M" in Key.mouse:
Key.mouse["M"] = [[cls.x + 10 + i, cls.y] for i in range(6)]
return (f'{create_box(box=cls, line_color=THEME.cpu_box)}'
f'{Mv.to(cls.y, cls.x + 10)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("M")}{THEME.title("enu")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
f'{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title=CPU_NAME[:cls.box_width - 14] if not CONFIG.custom_cpu_name else CONFIG.custom_cpu_name[:cls.box_width - 14])}')
@classmethod
def battery_activity(cls) -> bool:
if not hasattr(psutil, "sensors_battery") or psutil.sensors_battery() == None:
if cls.battery_percent != 1000:
cls.battery_clear = True
return False
if cls.battery_path == "":
cls.battery_path = None
if os.path.isdir("/sys/class/power_supply"):
for directory in sorted(os.listdir("/sys/class/power_supply")):
if directory.startswith('BAT') or 'battery' in directory.lower():
cls.battery_path = f'/sys/class/power_supply/{directory}/'
break
return_true: bool = False
percent: int = ceil(getattr(psutil.sensors_battery(), "percent", 0))
if percent != cls.battery_percent:
cls.battery_percent = percent
return_true = True
seconds: int = getattr(psutil.sensors_battery(), "secsleft", 0)
if seconds != cls.battery_secs:
cls.battery_secs = seconds
return_true = True
status: str = "not_set"
if cls.battery_path:
status = readfile(cls.battery_path + "status", default="not_set")
if status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == True:
status = "Charging" if cls.battery_percent < 100 else "Full"
elif status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == False:
status = "Discharging"
elif status == "not_set":
status = "Unknown"
if status != cls.battery_status:
cls.battery_status = status
return_true = True
return return_true or cls.resized or cls.redraw or Menu.active
@classmethod
def _draw_fg(cls):
if not "cpu" in cls.boxes: return
cpu = CpuCollector
if cpu.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
lavg: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
hh: int = ceil(h / 2)
hh2: int = h - hh
mid_line: bool = False
temp: int = 0
unit: str = ""
if not CONFIG.cpu_single_graph and CONFIG.cpu_graph_upper != CONFIG.cpu_graph_lower:
mid_line = True
if h % 2: hh = floor(h / 2)
else: hh2 -= 1
hide_cores: bool = (cpu.cpu_temp_only or not CONFIG.show_coretemp) and cpu.got_sensors
ct_width: int = (max(6, 6 * cls.column_size)) * hide_cores
if cls.resized or cls.redraw:
if not "m" in Key.mouse:
Key.mouse["m"] = [[cls.x + 16 + i, cls.y] for i in range(12)]
out_misc += f'{Mv.to(cls.y, cls.x + 16)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("m")}{THEME.title}ode:{Box.view_mode}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
Graphs.cpu["up"] = Graph(w - bw - 3, (h if CONFIG.cpu_single_graph else hh), THEME.gradient["cpu"], cpu.cpu_upper, round_up_low=True)
if not CONFIG.cpu_single_graph:
Graphs.cpu["down"] = Graph(w - bw - 3, hh2, THEME.gradient["cpu"], cpu.cpu_lower, invert=CONFIG.cpu_invert_lower, round_up_low=True)
Meters.cpu = Meter(cpu.cpu_usage[0][-1], bw - (21 if cpu.got_sensors else 9), "cpu")
if cls.column_size > 0 or ct_width > 0:
for n in range(THREADS):
Graphs.cores[n] = Graph(5 * cls.column_size + ct_width, 1, None, cpu.cpu_usage[n + 1])
if cpu.got_sensors:
Graphs.temps[0] = Graph(5, 1, None, cpu.cpu_temp[0], max_value=cpu.cpu_temp_crit, offset=-23)
if cls.column_size > 1:
for n in range(1, THREADS + 1):
if not cpu.cpu_temp[n]:
continue
Graphs.temps[n] = Graph(5, 1, None, cpu.cpu_temp[n], max_value=cpu.cpu_temp_crit, offset=-23)
Draw.buffer("cpu_misc", out_misc, only_save=True)
if CONFIG.show_battery and cls.battery_activity():
bat_out: str = ""
if cls.battery_secs > 0:
battery_time: str = f' {cls.battery_secs // 3600:02}:{(cls.battery_secs % 3600) // 60:02}'
else:
battery_time = ""
if not hasattr(Meters, "battery") or cls.resized:
Meters.battery = Meter(cls.battery_percent, 10, "cpu", invert=True)
battery_symbol: str = cls.battery_symbols.get(cls.battery_status, "○")
battery_len: int = len(f'{CONFIG.update_ms}') + (11 if cls.width >= 100 else 0) + len(battery_time) + len(f'{cls.battery_percent}')
battery_pos = cls.width - battery_len - 17
if (battery_pos != cls.old_battery_pos or battery_len != cls.old_battery_len) and cls.old_battery_pos > 0 and not cls.resized:
bat_out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.old_battery_pos, cls.old_battery_len = battery_pos, battery_len
bat_out += (f'{Mv.to(y-1, battery_pos)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.title}BAT{battery_symbol} {cls.battery_percent}%'+
("" if cls.width < 100 else f' {Fx.ub}{Meters.battery(cls.battery_percent)}{Fx.b}') +
f'{THEME.title}{battery_time}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}')
Draw.buffer("battery", f'{bat_out}{Term.fg}', only_save=Menu.active)
elif cls.battery_clear:
out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.battery_clear = False
cls.battery_percent = 1000
cls.battery_secs = 0
cls.battery_status = "Unknown"
cls.old_battery_pos = 0
cls.old_battery_len = 0
cls.battery_path = ""
Draw.clear("battery", saved=True)
cx = cy = cc = 0
ccw = (bw + 1) // cls.box_columns
if cpu.cpu_freq:
freq: str = f'{cpu.cpu_freq} Mhz' if cpu.cpu_freq < 1000 else f'{float(cpu.cpu_freq / 1000):.1f} GHz'
out += f'{Mv.to(by - 1, bx + bw - 9)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title(freq)}{Fx.ub}{THEME.div_line(Symbol.title_right)}'
out += f'{Mv.to(y, x)}{Graphs.cpu["up"](None if cls.resized else cpu.cpu_upper[-1])}'
if mid_line:
out += (f'{Mv.to(y+hh, x-1)}{THEME.cpu_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (w - bw - 3)}{THEME.div_line(Symbol.title_left)}'
f'{Mv.to(y+hh, x+((w-bw)//2)-((len(CONFIG.cpu_graph_upper)+len(CONFIG.cpu_graph_lower))//2)-4)}{THEME.main_fg}{CONFIG.cpu_graph_upper}{Mv.r(1)}▲▼{Mv.r(1)}{CONFIG.cpu_graph_lower}')
if not CONFIG.cpu_single_graph and Graphs.cpu.get("down"):
out += f'{Mv.to(y + hh + (1 * mid_line), x)}{Graphs.cpu["down"](None if cls.resized else cpu.cpu_lower[-1])}'
out += (f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b}{"CPU "}{Fx.ub}{Meters.cpu(cpu.cpu_usage[0][-1])}'
f'{THEME.gradient["cpu"][cpu.cpu_usage[0][-1]]}{cpu.cpu_usage[0][-1]:>4}{THEME.main_fg}%')
if cpu.got_sensors:
try:
temp, unit = temperature(cpu.cpu_temp[0][-1], CONFIG.temp_scale)
out += (f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[0][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[0](None if cls.resized else cpu.cpu_temp[0][-1])}'
f'{temp:>4}{THEME.main_fg}{unit}')
except:
cpu.got_sensors = False
cy += 1
for n in range(1, THREADS + 1):
out += f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b + "C" + Fx.ub if THREADS < 100 else ""}{str(n):<{2 if cls.column_size == 0 else 3}}'
if cls.column_size > 0 or ct_width > 0:
out += f'{THEME.inactive_fg}{"⡀" * (5 * cls.column_size + ct_width)}{Mv.l(5 * cls.column_size + ct_width)}{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}{Graphs.cores[n-1](None if cls.resized else cpu.cpu_usage[n][-1])}'
else:
out += f'{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}'
out += f'{cpu.cpu_usage[n][-1]:>{3 if cls.column_size < 2 else 4}}{THEME.main_fg}%'
if cpu.got_sensors and cpu.cpu_temp[n] and not hide_cores:
try:
temp, unit = temperature(cpu.cpu_temp[n][-1], CONFIG.temp_scale)
if cls.column_size > 1:
out += f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[n][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[n](None if cls.resized else cpu.cpu_temp[n][-1])}'
else:
out += f'{THEME.gradient["temp"][min_max(temp, 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}'
out += f'{temp:>4}{THEME.main_fg}{unit}'
except:
cpu.got_sensors = False
elif cpu.got_sensors and not hide_cores:
out += f'{Mv.r(max(6, 6 * cls.column_size))}'
out += f'{THEME.div_line(Symbol.v_line)}'
cy += 1
if cy > ceil(THREADS/cls.box_columns) and n != THREADS:
cc += 1; cy = 1; cx = ccw * cc
if cc == cls.box_columns: break
if cy < bh - 1: cy = bh - 1
if cy < bh and cc < cls.box_columns:
if cls.column_size == 2 and cpu.got_sensors:
lavg = f' Load AVG: {" ".join(str(l) for l in cpu.load_avg):^19.19}'
elif cls.column_size == 2 or (cls.column_size == 1 and cpu.got_sensors):
lavg = f'LAV: {" ".join(str(l) for l in cpu.load_avg):^14.14}'
elif cls.column_size == 1 or (cls.column_size == 0 and cpu.got_sensors):
lavg = f'L {" ".join(str(round(l, 1)) for l in cpu.load_avg):^11.11}'
else:
lavg = f'{" ".join(str(round(l, 1)) for l in cpu.load_avg[:2]):^7.7}'
out += f'{Mv.to(by + cy, bx + cx)}{THEME.main_fg}{lavg}{THEME.div_line(Symbol.v_line)}'
if CONFIG.show_uptime:
out += f'{Mv.to(y + (0 if not CONFIG.cpu_invert_lower or CONFIG.cpu_single_graph else h - 1), x + 1)}{THEME.graph_text}{Fx.trans("up " + cpu.uptime)}'
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = cls.clock_block = False
class MemBox(Box):
name = "mem"
num = 2
height_p = 38
width_p = 45
min_w: int = 36
min_h: int = 10
x = 1
y = 1
mem_meter: int = 0
mem_size: int = 0
disk_meter: int = 0
divider: int = 0
mem_width: int = 0
disks_width: int = 0
disks_io_h: int = 0
disks_io_order: List[str] = []
graph_speeds: Dict[str, int] = {}
graph_height: int
resized: bool = True
redraw: bool = False
buffer: str = "mem"
swap_on: bool = CONFIG.show_swap
Box.buffers.append(buffer)
mem_names: List[str] = ["used", "available", "cached", "free"]
swap_names: List[str] = ["used", "free"]
@classmethod
def _calc_size(cls):
if not "mem" in cls.boxes:
Box._b_mem_h = 0
cls.width = Term.width
return
width_p: int; height_p: int
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 60 if "net" in cls.boxes else 98
elif not "net" in cls.boxes:
height_p = 98 - CpuBox.height_p
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100) + 1
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
Box._b_mem_h = cls.height
cls.y = Box._b_cpu_h + 1
if CONFIG.show_disks:
cls.mem_width = ceil((cls.width - 3) / 2)
cls.disks_width = cls.width - cls.mem_width - 3
if cls.mem_width + cls.disks_width < cls.width - 2: cls.mem_width += 1
cls.divider = cls.x + cls.mem_width
else:
cls.mem_width = cls.width - 1
item_height: int = 6 if cls.swap_on and not CONFIG.swap_disk else 4
if cls.height - (3 if cls.swap_on and not CONFIG.swap_disk else 2) > 2 * item_height: cls.mem_size = 3
elif cls.mem_width > 25: cls.mem_size = 2
else: cls.mem_size = 1
cls.mem_meter = cls.width - (cls.disks_width if CONFIG.show_disks else 0) - (9 if cls.mem_size > 2 else 20)
if cls.mem_size == 1: cls.mem_meter += 6
if cls.mem_meter < 1: cls.mem_meter = 0
if CONFIG.mem_graphs:
cls.graph_height = round(((cls.height - (2 if cls.swap_on and not CONFIG.swap_disk else 1)) - (2 if cls.mem_size == 3 else 1) * item_height) / item_height)
if cls.graph_height == 0: cls.graph_height = 1
if cls.graph_height > 1: cls.mem_meter += 6
else:
cls.graph_height = 0
if CONFIG.show_disks:
cls.disk_meter = cls.width - cls.mem_width - 23
if cls.disks_width < 25:
cls.disk_meter += 10
if cls.disk_meter < 1: cls.disk_meter = 0
@classmethod
def _draw_bg(cls) -> str:
if not "mem" in cls.boxes: return ""
out: str = ""
out += f'{create_box(box=cls, line_color=THEME.mem_box)}'
if CONFIG.show_disks:
out += (f'{Mv.to(cls.y, cls.divider + 2)}{THEME.mem_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("d")}{THEME.title("isks")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}'
f'{Mv.to(cls.y, cls.divider)}{THEME.mem_box(Symbol.div_up)}'
f'{Mv.to(cls.y + cls.height - 1, cls.divider)}{THEME.mem_box(Symbol.div_down)}{THEME.div_line}'
f'{"".join(f"{Mv.to(cls.y + i, cls.divider)}{Symbol.v_line}" for i in range(1, cls.height - 1))}')
Key.mouse["d"] = [[cls.divider + 3 + i, cls.y] for i in range(5)]
else:
out += f'{Mv.to(cls.y, cls.x + cls.width - 9)}{THEME.mem_box(Symbol.title_left)}{THEME.hi_fg("d")}{THEME.title("isks")}{THEME.mem_box(Symbol.title_right)}'
Key.mouse["d"] = [[cls.x + cls.width - 8 + i, cls.y] for i in range(5)]
return out
@classmethod
def _draw_fg(cls):
if not "mem" in cls.boxes: return
mem = MemCollector
if mem.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
gbg: str = ""
gmv: str = ""
gli: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
if cls.resized or cls.redraw:
cls.redraw = True
cls._calc_size()
out_misc += cls._draw_bg()
Meters.mem = {}
Meters.swap = {}
Meters.disks_used = {}
Meters.disks_free = {}
if cls.mem_meter > 0:
for name in cls.mem_names:
if CONFIG.mem_graphs:
Meters.mem[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.vlist[name])
else:
Meters.mem[name] = Meter(mem.percent[name], cls.mem_meter, name)
if cls.swap_on:
for name in cls.swap_names:
if CONFIG.swap_disk and CONFIG.show_disks:
break
elif CONFIG.mem_graphs and not CONFIG.swap_disk:
Meters.swap[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.swap_vlist[name])
else:
Meters.swap[name] = Meter(mem.swap_percent[name], cls.mem_meter, name)
if CONFIG.show_disks and mem.disks:
if CONFIG.show_io_stat or CONFIG.io_mode:
d_graph: List[str] = []
d_no_graph: List[str] = []
l_vals: List[Tuple[str, int, str, bool]] = []
if CONFIG.io_mode:
cls.disks_io_h = (cls.height - 2 - len(mem.disks)) // max(1, len(mem.disks_io_dict))
if cls.disks_io_h < 2: cls.disks_io_h = 1 if CONFIG.io_graph_combined else 2
else:
cls.disks_io_h = 1
if CONFIG.io_graph_speeds and not cls.graph_speeds:
try:
cls.graph_speeds = { spds.split(":")[0] : int(spds.split(":")[1]) for spds in list(i.strip() for i in CONFIG.io_graph_speeds.split(","))}
except (KeyError, ValueError):
errlog.error("Wrong formatting in io_graph_speeds variable. Using defaults.")
for name in mem.disks.keys():
if name in mem.disks_io_dict:
d_graph.append(name)
else:
d_no_graph.append(name)
continue
if CONFIG.io_graph_combined or not CONFIG.io_mode:
l_vals = [("rw", cls.disks_io_h, "available", False)]
else:
l_vals = [("read", cls.disks_io_h // 2, "free", False), ("write", cls.disks_io_h // 2, "used", True)]
Graphs.disk_io[name] = {_name : Graph(width=cls.disks_width - (6 if not CONFIG.io_mode else 0), height=_height, color=THEME.gradient[_gradient],
data=mem.disks_io_dict[name][_name], invert=_invert, max_value=cls.graph_speeds.get(name, 10), no_zero=True)
for _name, _height, _gradient, _invert in l_vals}
cls.disks_io_order = d_graph + d_no_graph
if cls.disk_meter > 0:
for n, name in enumerate(mem.disks.keys()):
if n * 2 > h: break
Meters.disks_used[name] = Meter(mem.disks[name]["used_percent"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free[name] = Meter(mem.disks[name]["free_percent"], cls.disk_meter, "free")
if not "g" in Key.mouse:
Key.mouse["g"] = [[x + 8 + i, y-1] for i in range(5)]
out_misc += (f'{Mv.to(y-1, x + 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.mem_graphs else ""}'
f'{THEME.hi_fg("g")}{THEME.title("raph")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if CONFIG.show_disks:
if not "s" in Key.mouse:
Key.mouse["s"] = [[x + w - 6 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x + w - 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.swap_disk else ""}'
f'{THEME.hi_fg("s")}{THEME.title("wap")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if not "i" in Key.mouse:
Key.mouse["i"] = [[x + w - 10 + i, y-1] for i in range(2)]
out_misc += (f'{Mv.to(y-1, x + w - 11)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.io_mode else ""}'
f'{THEME.hi_fg("i")}{THEME.title("o")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if Collector.collect_interrupt: return
Draw.buffer("mem_misc", out_misc, only_save=True)
try:
#* Mem
cx = 1; cy = 1
out += f'{Mv.to(y, x+1)}{THEME.title}{Fx.b}Total:{mem.string["total"]:>{cls.mem_width - 9}}{Fx.ub}{THEME.main_fg}'
if cls.graph_height > 0:
gli = f'{Mv.l(2)}{THEME.mem_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (cls.mem_width - 1)}{"" if CONFIG.show_disks else THEME.mem_box}{Symbol.title_left}{Mv.l(cls.mem_width - 1)}{THEME.title}'
if cls.graph_height >= 2:
gbg = f'{Mv.l(1)}'
gmv = f'{Mv.l(cls.mem_width - 2)}{Mv.u(cls.graph_height - 1)}'
big_mem: bool = cls.mem_width > 21
for name in cls.mem_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.string[name])))}{Fx.trans(mem.string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{gmv}{str(mem.percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{mem.string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'
cy += 1 if not cls.graph_height else cls.graph_height
#* Swap
if cls.swap_on and CONFIG.show_swap and not CONFIG.swap_disk and mem.swap_string:
if h - cy > 5:
if cls.graph_height > 0: out += f'{Mv.to(y+cy, x+cx)}{gli}'
cy += 1
out += f'{Mv.to(y+cy, x+cx)}{THEME.title}{Fx.b}Swap:{mem.swap_string["total"]:>{cls.mem_width - 8}}{Fx.ub}{THEME.main_fg}'
cy += 1
for name in cls.swap_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.swap_string[name])))}{Fx.trans(mem.swap_string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{gmv}{str(mem.swap_percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{mem.swap_string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'; cy += 1 if not cls.graph_height else cls.graph_height
if cls.graph_height > 0 and not cy == h: out += f'{Mv.to(y+cy, x+cx)}{gli}'
#* Disks
if CONFIG.show_disks and mem.disks:
cx = x + cls.mem_width - 1; cy = 0
big_disk: bool = cls.disks_width >= 25
gli = f'{Mv.l(2)}{THEME.div_line}{Symbol.title_right}{Symbol.h_line * cls.disks_width}{THEME.mem_box}{Symbol.title_left}{Mv.l(cls.disks_width - 1)}'
if CONFIG.io_mode:
for name in cls.disks_io_order:
item = mem.disks[name]
io_item = mem.disks_io_dict.get(name, {})
if Collector.collect_interrupt: return
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += Fx.trans(f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(str(item["used_percent"])) // 2) - 2)}{Fx.ub}{THEME.main_fg}{item["used_percent"]}%')
cy += 1
if io_item:
if cy > h - 1: break
if CONFIG.io_graph_combined:
if cls.disks_io_h <= 1:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io"] or "RW"}')
cy += cls.disks_io_h
else:
if cls.disks_io_h <= 3:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}{Mv.to(y+cy+1, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["read"](None if cls.redraw else mem.disks_io_dict[name]["read"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io_r"] or "R"}')
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy, x+cx-1)}{Graphs.disk_io[name]["write"](None if cls.redraw else mem.disks_io_dict[name]["write"][-1])}'
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy-1, x+cx-1)}{THEME.main_fg}{item["io_w"] or "W"}'
else:
for name, item in mem.disks.items():
if Collector.collect_interrupt: return
if not name in Meters.disks_used:
continue
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(item["io"]) // 2) - 2)}{Fx.ub}{THEME.main_fg}{Fx.trans(item["io"])}'
cy += 1
if cy > h - 1: break
if CONFIG.show_io_stat and name in Graphs.disk_io:
out += f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{Fx.ub}{" IO: " if big_disk else " IO " + Mv.l(2)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
if not big_disk and item["io"]:
out += f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{THEME.main_fg}{item["io"]}'
cy += 1
if cy > h - 1: break
out += Mv.to(y+cy, x+cx) + (f'Used:{str(item["used_percent"]) + "%":>4} ' if big_disk else "U ")
out += f'{Meters.disks_used[name](None if cls.resized else mem.disks[name]["used_percent"])}{item["used"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 3 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1:
if cy > h - 1: break
out += Mv.to(y+cy, x+cx)
out += f'Free:{str(item["free_percent"]) + "%":>4} ' if big_disk else f'{"F "}'
out += f'{Meters.disks_free[name](None if cls.resized else mem.disks[name]["free_percent"])}{item["free"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 4 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1: cy += 1
except (KeyError, TypeError):
return
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = False
class NetBox(Box, SubBox):
name = "net"
num = 3
height_p = 30
width_p = 45
min_w: int = 36
min_h: int = 6
x = 1
y = 1
resized: bool = True
redraw: bool = True
graph_height: Dict[str, int] = {}
symbols: Dict[str, str] = {"download" : "▼", "upload" : "▲"}
buffer: str = "net"
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "net" in cls.boxes:
cls.width = Term.width
return
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
cls.width = round(Term.width * width_p / 100)
cls.height = Term.height - Box._b_cpu_h - Box._b_mem_h
cls.y = Term.height - cls.height + 1
cls.box_width = 27 if cls.width > 45 else 19
cls.box_height = 9 if cls.height > 10 else cls.height - 2
cls.box_x = cls.width - cls.box_width - 1
cls.box_y = cls.y + ((cls.height - 2) // 2) - cls.box_height // 2 + 1
cls.graph_height["download"] = round((cls.height - 2) / 2)
cls.graph_height["upload"] = cls.height - 2 - cls.graph_height["download"]
cls.redraw = True
@classmethod
def _draw_bg(cls) -> str:
if not "net" in cls.boxes: return ""
return f'{create_box(box=cls, line_color=THEME.net_box)}\
{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title="Download", title2="Upload")}'
@classmethod
def _draw_fg(cls):
if not "net" in cls.boxes: return
net = NetCollector
if net.redraw: cls.redraw = True
if not net.nic: return
out: str = ""
out_misc: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
reset: bool = bool(net.stats[net.nic]["download"]["offset"])
if cls.resized or cls.redraw:
out_misc += cls._draw_bg()
if not "b" in Key.mouse:
Key.mouse["b"] = [[x+w - len(net.nic[:10]) - 9 + i, y-1] for i in range(4)]
Key.mouse["n"] = [[x+w - 5 + i, y-1] for i in range(4)]
Key.mouse["z"] = [[x+w - len(net.nic[:10]) - 14 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 25)}{THEME.net_box}{Symbol.h_line * (10 - len(net.nic[:10]))}{Symbol.title_left}{Fx.b if reset else ""}{THEME.hi_fg("z")}{THEME.title("ero")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}'
f'{THEME.net_box}{Symbol.title_left}{Fx.b}{THEME.hi_fg("<b")} {THEME.title(net.nic[:10])} {THEME.hi_fg("n>")}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 6:
if not "a" in Key.mouse: Key.mouse["a"] = [[x+w - 20 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 21 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if net.auto_min else ""}{THEME.hi_fg("a")}{THEME.title("uto")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 13:
if not "y" in Key.mouse: Key.mouse["y"] = [[x+w - 26 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 27 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if CONFIG.net_sync else ""}{THEME.title("s")}{THEME.hi_fg("y")}{THEME.title("nc")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if net.address and w - len(net.nic[:10]) - len(net.address) - 20 > 15:
out_misc += (f'{Mv.to(y-1, x+7)}{THEME.net_box(Symbol.title_left)}{Fx.b}{THEME.title(net.address)}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
Draw.buffer("net_misc", out_misc, only_save=True)
cy = 0
for direction in ["download", "upload"]:
strings = net.strings[net.nic][direction]
stats = net.stats[net.nic][direction]
if cls.redraw: stats["redraw"] = True
if stats["redraw"] or cls.resized:
Graphs.net[direction] = Graph(w - bw - 3, cls.graph_height[direction], THEME.gradient[direction], stats["speed"], max_value=net.sync_top if CONFIG.net_sync else stats["graph_top"],
invert=direction != "download", color_max_value=net.net_min.get(direction) if CONFIG.net_color_fixed else None, round_up_low=True)
out += f'{Mv.to(y if direction == "download" else y + cls.graph_height["download"], x)}{Graphs.net[direction](None if stats["redraw"] else stats["speed"][-1])}'
out += (f'{Mv.to(by+cy, bx)}{THEME.main_fg}{cls.symbols[direction]} {strings["byte_ps"]:<10.10}' +
("" if bw < 20 else f'{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["bit_ps"] + ")":>12.12}'))
cy += 1 if bh != 3 else 2
if bh >= 6:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Top:"}{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["top"] + ")":>12.12}'
cy += 1
if bh >= 4:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Total:"}{Mv.to(by+cy, bx+bw - 10)}{strings["total"]:>10.10}'
if bh > 2 and bh % 2: cy += 2
else: cy += 1
stats["redraw"] = False
out += (f'{Mv.to(y, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["download"]["graph_top"])}'
f'{Mv.to(y+h-1, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["upload"]["graph_top"])}')
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = False
class ProcBox(Box):
name = "proc"
num = 4
height_p = 68
width_p = 55
min_w: int = 44
min_h: int = 16
x = 1
y = 1
current_y: int = 0
current_h: int = 0
select_max: int = 0
selected: int = 0
selected_pid: int = 0
last_selection: int = 0
filtering: bool = False
moved: bool = False
start: int = 1
count: int = 0
s_len: int = 0
detailed: bool = False
detailed_x: int = 0
detailed_y: int = 0
detailed_width: int = 0
detailed_height: int = 8
resized: bool = True
redraw: bool = True
buffer: str = "proc"
pid_counter: Dict[int, int] = {}
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "proc" in cls.boxes:
cls.width = Term.width
return
width_p: int; height_p: int
if not "net" in cls.boxes and not "mem" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
cls.x = Term.width - cls.width + 1
cls.y = Box._b_cpu_h + 1
cls.current_y = cls.y
cls.current_h = cls.height
cls.select_max = cls.height - 3
cls.redraw = True
cls.resized = True
@classmethod
def _draw_bg(cls) -> str:
if not "proc" in cls.boxes: return ""
return create_box(box=cls, line_color=THEME.proc_box)
@classmethod
def selector(cls, key: str, mouse_pos: Tuple[int, int] = (0, 0)):
old: Tuple[int, int] = (cls.start, cls.selected)
new_sel: int
if key in ["up", "k"]:
if cls.selected == 1 and cls.start > 1:
cls.start -= 1
elif cls.selected == 1:
cls.selected = 0
elif cls.selected > 1:
cls.selected -= 1
elif key in ["down", "j"]:
if cls.selected == 0 and ProcCollector.detailed and cls.last_selection:
cls.selected = cls.last_selection
cls.last_selection = 0
if cls.selected == cls.select_max and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 1
elif cls.selected < cls.select_max:
cls.selected += 1
elif key == "mouse_scroll_up" and cls.start > 1:
cls.start -= 5
elif key == "mouse_scroll_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 5
elif key == "page_up" and cls.start > 1:
cls.start -= cls.select_max
elif key == "page_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += cls.select_max
elif key == "home":
if cls.start > 1: cls.start = 1
elif cls.selected > 0: cls.selected = 0
elif key == "end":
if cls.start < ProcCollector.num_procs - cls.select_max + 1: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.selected < cls.select_max: cls.selected = cls.select_max
elif key == "mouse_click":
if mouse_pos[0] > cls.x + cls.width - 4 and cls.current_y + 1 < mouse_pos[1] < cls.current_y + 1 + cls.select_max + 1:
if mouse_pos[1] == cls.current_y + 2:
cls.start = 1
elif mouse_pos[1] == cls.current_y + 1 + cls.select_max:
cls.start = ProcCollector.num_procs - cls.select_max + 1
else:
cls.start = round((mouse_pos[1] - cls.current_y) * ((ProcCollector.num_procs - cls.select_max - 2) / (cls.select_max - 2)))
else:
new_sel = mouse_pos[1] - cls.current_y - 1 if mouse_pos[1] >= cls.current_y - 1 else 0
if new_sel > 0 and new_sel == cls.selected:
Key.list.insert(0, "enter")
return
elif new_sel > 0 and new_sel != cls.selected:
if cls.last_selection: cls.last_selection = 0
cls.selected = new_sel
elif key == "mouse_unselect":
cls.selected = 0
if cls.start > ProcCollector.num_procs - cls.select_max + 1 and ProcCollector.num_procs > cls.select_max: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.start > ProcCollector.num_procs: cls.start = ProcCollector.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > ProcCollector.num_procs and ProcCollector.num_procs < cls.select_max: cls.selected = ProcCollector.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
if old != (cls.start, cls.selected):
cls.moved = True
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True, only_draw=True)
@classmethod
def _draw_fg(cls):
if not "proc" in cls.boxes: return
proc = ProcCollector
if proc.proc_interrupt: return
if proc.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
n: int = 0
x, y, w, h = cls.x + 1, cls.current_y + 1, cls.width - 2, cls.current_h - 2
prog_len: int; arg_len: int; val: int; c_color: str; m_color: str; t_color: str; sort_pos: int; tree_len: int; is_selected: bool; calc: int
dgx: int; dgw: int; dx: int; dw: int; dy: int
l_count: int = 0
scroll_pos: int = 0
killed: bool = True
indent: str = ""
offset: int = 0
tr_show: bool = True
usr_show: bool = True
vals: List[str]
g_color: str = ""
s_len: int = 0
if proc.search_filter: s_len = len(proc.search_filter[:10])
loc_string: str = f'{cls.start + cls.selected - 1}/{proc.num_procs}'
end: str = ""
if proc.detailed:
dgx, dgw = x, w // 3
dw = w - dgw - 1
if dw > 120:
dw = 120
dgw = w - 121
dx = x + dgw + 2
dy = cls.y + 1
if w > 67:
arg_len = w - 53 - (1 if proc.num_procs > cls.select_max else 0)
prog_len = 15
else:
arg_len = 0
prog_len = w - 38 - (1 if proc.num_procs > cls.select_max else 0)
if prog_len < 15:
tr_show = False
prog_len += 5
if prog_len < 12:
usr_show = False
prog_len += 9
if CONFIG.proc_tree:
tree_len = arg_len + prog_len + 6
arg_len = 0
#* Buttons and titles only redrawn if needed
if cls.resized or cls.redraw:
s_len += len(CONFIG.proc_sorting)
if cls.resized or s_len != cls.s_len or proc.detailed:
cls.s_len = s_len
for k in ["e", "r", "c", "T", "K", "I", "enter", "left", " ", "f", "delete"]:
if k in Key.mouse: del Key.mouse[k]
if proc.detailed:
killed = proc.details.get("killed", False)
main = THEME.main_fg if cls.selected == 0 and not killed else THEME.inactive_fg
hi = THEME.hi_fg if cls.selected == 0 and not killed else THEME.inactive_fg
title = THEME.title if cls.selected == 0 and not killed else THEME.inactive_fg
if cls.current_y != cls.y + 8 or cls.resized or Graphs.detailed_cpu is NotImplemented:
cls.current_y = cls.y + 8
cls.current_h = cls.height - 8
for i in range(7): out_misc += f'{Mv.to(dy+i, x)}{" " * w}'
out_misc += (f'{Mv.to(dy+7, x-1)}{THEME.proc_box}{Symbol.title_right}{Symbol.h_line*w}{Symbol.title_left}'
f'{Mv.to(dy+7, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}{THEME.div_line}')
for i in range(7):
out_misc += f'{Mv.to(dy + i, dgx + dgw + 1)}{Symbol.v_line}'
out_misc += (f'{Mv.to(dy-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(dy-1, dgx + dgw + 1)}{Symbol.div_up}'
f'{Mv.to(dy-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(str(proc.details["pid"]))}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(proc.details["name"][:(dgw - 11)])}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if cls.selected == 0:
Key.mouse["enter"] = [[dx+dw-10 + i, dy-1] for i in range(7)]
if cls.selected == 0 and not killed:
Key.mouse["T"] = [[dx+2 + i, dy-1] for i in range(9)]
out_misc += (f'{Mv.to(dy-1, dx+dw - 11)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{title if cls.selected > 0 else THEME.title}close{Fx.ub} {main if cls.selected > 0 else THEME.main_fg}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(dy-1, dx+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if dw > 28:
if cls.selected == 0 and not killed and not "K" in Key.mouse: Key.mouse["K"] = [[dx + 13 + i, dy-1] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if dw > 39:
if cls.selected == 0 and not killed and not "I" in Key.mouse: Key.mouse["I"] = [[dx + 19 + i, dy-1] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if Graphs.detailed_cpu is NotImplemented or cls.resized:
Graphs.detailed_cpu = Graph(dgw+1, 7, THEME.gradient["cpu"], proc.details_cpu)
Graphs.detailed_mem = Graph(dw // 3, 1, None, proc.details_mem)
cls.select_max = cls.height - 11
y = cls.y + 9
h = cls.height - 10
else:
if cls.current_y != cls.y or cls.resized:
cls.current_y = cls.y
cls.current_h = cls.height
y, h = cls.y + 1, cls.height - 2
out_misc += (f'{Mv.to(y-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(y-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(y+7, x-1)}{THEME.proc_box(Symbol.v_line)}{Mv.r(w)}{THEME.proc_box(Symbol.v_line)}')
cls.select_max = cls.height - 3
sort_pos = x + w - len(CONFIG.proc_sorting) - 7
if not "left" in Key.mouse:
Key.mouse["left"] = [[sort_pos + i, y-1] for i in range(3)]
Key.mouse["right"] = [[sort_pos + len(CONFIG.proc_sorting) + 3 + i, y-1] for i in range(3)]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.h_line * (w - 9))}' +
("" if not proc.detailed else f"{Mv.to(dy+7, dgx + dgw + 1)}{THEME.proc_box(Symbol.div_down)}") +
f'{Mv.to(y-1, sort_pos)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("<")} {THEME.title(CONFIG.proc_sorting)} '
f'{THEME.hi_fg(">")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 29 + s_len:
if not "e" in Key.mouse: Key.mouse["e"] = [[sort_pos - 5 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, sort_pos - 6)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_tree else ""}'
f'{THEME.title("tre")}{THEME.hi_fg("e")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 37 + s_len:
if not "r" in Key.mouse: Key.mouse["r"] = [[sort_pos - 14 + i, y-1] for i in range(7)]
out_misc += (f'{Mv.to(y-1, sort_pos - 15)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_reversed else ""}'
f'{THEME.hi_fg("r")}{THEME.title("everse")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 47 + s_len:
if not "c" in Key.mouse: Key.mouse["c"] = [[sort_pos - 24 + i, y-1] for i in range(8)]
out_misc += (f'{Mv.to(y-1, sort_pos - 25)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_per_core else ""}'
f'{THEME.title("per-")}{THEME.hi_fg("c")}{THEME.title("ore")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if not "f" in Key.mouse or cls.resized: Key.mouse["f"] = [[x+6 + i, y-1] for i in range(6 if not proc.search_filter else 2 + len(proc.search_filter[-10:]))]
if proc.search_filter:
if not "delete" in Key.mouse: Key.mouse["delete"] = [[x+12 + len(proc.search_filter[-10:]) + i, y-1] for i in range(3)]
elif "delete" in Key.mouse:
del Key.mouse["delete"]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.title_left)}{Fx.b if cls.filtering or proc.search_filter else ""}{THEME.hi_fg("F" if cls.filtering and proc.case_sensitive else "f")}{THEME.title}' +
("ilter" if not proc.search_filter and not cls.filtering else f' {proc.search_filter[-(10 if w < 83 else w - 74):]}{(Fx.bl + "█" + Fx.ubl) if cls.filtering else THEME.hi_fg(" del")}') +
f'{THEME.proc_box(Symbol.title_right)}')
main = THEME.inactive_fg if cls.selected == 0 else THEME.main_fg
hi = THEME.inactive_fg if cls.selected == 0 else THEME.hi_fg
title = THEME.inactive_fg if cls.selected == 0 else THEME.title
out_misc += (f'{Mv.to(y+h, x + 1)}{THEME.proc_box}{Symbol.h_line*(w-4)}'
f'{Mv.to(y+h, x+1)}{THEME.proc_box(Symbol.title_left)}{main}{Symbol.up} {Fx.b}{THEME.main_fg("select")} {Fx.ub}'
f'{THEME.inactive_fg if cls.selected == cls.select_max else THEME.main_fg}{Symbol.down}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{title}{Fx.b}info {Fx.ub}{main}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}')
if not "enter" in Key.mouse: Key.mouse["enter"] = [[x + 14 + i, y+h] for i in range(6)]
if w - len(loc_string) > 34:
if not "T" in Key.mouse: Key.mouse["T"] = [[x + 22 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 40:
if not "K" in Key.mouse: Key.mouse["K"] = [[x + 33 + i, y+h] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 51:
if not "I" in Key.mouse: Key.mouse["I"] = [[x + 39 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if CONFIG.proc_tree and w - len(loc_string) > 65:
if not " " in Key.mouse: Key.mouse[" "] = [[x + 50 + i, y+h] for i in range(12)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}spc {title}collapse{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
#* Processes labels
selected: str = CONFIG.proc_sorting
label: str
if selected == "memory": selected = "mem"
if selected == "threads" and not CONFIG.proc_tree and not arg_len: selected = "tr"
if CONFIG.proc_tree:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{" Tree:":<{tree_len-2}}' + (f'{"Threads: ":<9}' if tr_show else " "*4) + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected in ["pid", "program", "arguments"]: selected = "tree"
else:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{"Pid:":>7} {"Program:" if prog_len > 8 else "Prg:":<{prog_len}}' + (f'{"Arguments:":<{arg_len-4}}' if arg_len else "") +
((f'{"Threads:":<9}' if arg_len else f'{"Tr:":^5}') if tr_show else "") + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected == "program" and prog_len <= 8: selected = "prg"
selected = selected.split(" ")[0].capitalize()
if CONFIG.proc_mem_bytes: label = label.replace("Mem%", "MemB")
label = label.replace(selected, f'{Fx.u}{selected}{Fx.uu}')
out_misc += label
Draw.buffer("proc_misc", out_misc, only_save=True)
#* Detailed box draw
if proc.detailed:
if proc.details["status"] == psutil.STATUS_RUNNING: stat_color = Fx.b
elif proc.details["status"] in [psutil.STATUS_DEAD, psutil.STATUS_STOPPED, psutil.STATUS_ZOMBIE]: stat_color = f'{THEME.inactive_fg}'
else: stat_color = ""
expand = proc.expand
iw = (dw - 3) // (4 + expand)
iw2 = iw - 1
out += (f'{Mv.to(dy, dgx)}{Graphs.detailed_cpu(None if cls.moved or proc.details["killed"] else proc.details_cpu[-1])}'
f'{Mv.to(dy, dgx)}{THEME.title}{Fx.b}{0 if proc.details["killed"] else proc.details["cpu_percent"]}%{Mv.r(1)}{"" if SYSTEM == "MacOS" else (("C" if dgw < 20 else "Core") + str(proc.details["cpu_num"]))}')
for i, l in enumerate(["C", "P", "U"]):
out += f'{Mv.to(dy+2+i, dgx)}{l}'
for i, l in enumerate(["C", "M", "D"]):
out += f'{Mv.to(dy+4+i, dx+1)}{l}'
out += (f'{Mv.to(dy, dx+1)} {"Status:":^{iw}.{iw2}}{"Elapsed:":^{iw}.{iw2}}' +
(f'{"Parent:":^{iw}.{iw2}}' if dw > 28 else "") + (f'{"User:":^{iw}.{iw2}}' if dw > 38 else "") +
(f'{"Threads:":^{iw}.{iw2}}' if expand > 0 else "") + (f'{"Nice:":^{iw}.{iw2}}' if expand > 1 else "") +
(f'{"IO Read:":^{iw}.{iw2}}' if expand > 2 else "") + (f'{"IO Write:":^{iw}.{iw2}}' if expand > 3 else "") +
(f'{"TTY:":^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+1, dx+1)}{Fx.ub}{THEME.main_fg}{stat_color}{proc.details["status"]:^{iw}.{iw2}}{Fx.ub}{THEME.main_fg}{proc.details["uptime"]:^{iw}.{iw2}} ' +
(f'{proc.details["parent_name"]:^{iw}.{iw2}}' if dw > 28 else "") + (f'{proc.details["username"]:^{iw}.{iw2}}' if dw > 38 else "") +
(f'{proc.details["threads"]:^{iw}.{iw2}}' if expand > 0 else "") + (f'{proc.details["nice"]:^{iw}.{iw2}}' if expand > 1 else "") +
(f'{proc.details["io_read"]:^{iw}.{iw2}}' if expand > 2 else "") + (f'{proc.details["io_write"]:^{iw}.{iw2}}' if expand > 3 else "") +
(f'{proc.details["terminal"][-(iw2):]:^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+3, dx)}{THEME.title}{Fx.b}{("Memory: " if dw > 42 else "M:") + str(round(proc.details["memory_percent"], 1)) + "%":>{dw//3-1}}{Fx.ub} {THEME.inactive_fg}{"⡀"*(dw//3)}'
f'{Mv.l(dw//3)}{THEME.proc_misc}{Graphs.detailed_mem(None if cls.moved else proc.details_mem[-1])} '
f'{THEME.title}{Fx.b}{proc.details["memory_bytes"]:.{dw//3 - 2}}{THEME.main_fg}{Fx.ub}')
cy = dy + (4 if len(proc.details["cmdline"]) > dw - 5 else 5)
for i in range(ceil(len(proc.details["cmdline"]) / (dw - 5))):
out += f'{Mv.to(cy+i, dx + 3)}{proc.details["cmdline"][((dw-5)*i):][:(dw-5)]:{"^" if i == 0 else "<"}{dw-5}}'
if i == 2: break
#* Checking for selection out of bounds
if cls.start > proc.num_procs - cls.select_max + 1 and proc.num_procs > cls.select_max: cls.start = proc.num_procs - cls.select_max + 1
elif cls.start > proc.num_procs: cls.start = proc.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > proc.num_procs and proc.num_procs < cls.select_max: cls.selected = proc.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
#* Start iteration over all processes and info
cy = 1
for n, (pid, items) in enumerate(proc.processes.items(), start=1):
if n < cls.start: continue
l_count += 1
if l_count == cls.selected:
is_selected = True
cls.selected_pid = pid
else: is_selected = False
indent, name, cmd, threads, username, mem, mem_b, cpu = [items.get(v, d) for v, d in [("indent", ""), ("name", ""), ("cmd", ""), ("threads", 0), ("username", "?"), ("mem", 0.0), ("mem_b", 0), ("cpu", 0.0)]]
if CONFIG.proc_tree:
arg_len = 0
offset = tree_len - len(f'{indent}{pid}')
if offset < 1: offset = 0
indent = f'{indent:.{tree_len - len(str(pid))}}'
if offset - len(name) > 12:
cmd = cmd.split(" ")[0].split("/")[-1]
if not cmd.startswith(name):
offset = len(name)
arg_len = tree_len - len(f'{indent}{pid} {name} ') + 2
cmd = f'({cmd[:(arg_len-4)]})'
else:
offset = prog_len - 1
if cpu > 1.0 or pid in Graphs.pid_cpu:
if pid not in Graphs.pid_cpu:
Graphs.pid_cpu[pid] = Graph(5, 1, None, [0])
cls.pid_counter[pid] = 0
elif cpu < 1.0:
cls.pid_counter[pid] += 1
if cls.pid_counter[pid] > 10:
del cls.pid_counter[pid], Graphs.pid_cpu[pid]
else:
cls.pid_counter[pid] = 0
end = f'{THEME.main_fg}{Fx.ub}' if CONFIG.proc_colors else Fx.ub
if cls.selected > cy: calc = cls.selected - cy
elif 0 < cls.selected <= cy: calc = cy - cls.selected
else: calc = cy
if CONFIG.proc_colors and not is_selected:
vals = []
for v in [int(cpu), int(mem), int(threads // 3)]:
if CONFIG.proc_gradient:
val = ((v if v <= 100 else 100) + 100) - calc * 100 // cls.select_max
vals += [f'{THEME.gradient["proc_color" if val < 100 else "process"][val if val < 100 else val - 100]}']
else:
vals += [f'{THEME.gradient["process"][v if v <= 100 else 100]}']
c_color, m_color, t_color = vals
else:
c_color = m_color = t_color = Fx.b
if CONFIG.proc_gradient and not is_selected:
g_color = f'{THEME.gradient["proc"][calc * 100 // cls.select_max]}'
if is_selected:
c_color = m_color = t_color = g_color = end = ""
out += f'{THEME.selected_bg}{THEME.selected_fg}{Fx.b}'
#* Creates one line for a process with all gathered information
out += (f'{Mv.to(y+cy, x)}{g_color}{indent}{pid:>{(1 if CONFIG.proc_tree else 7)}} ' +
f'{c_color}{name:<{offset}.{offset}} {end}' +
(f'{g_color}{cmd:<{arg_len}.{arg_len-1}}' if arg_len else "") +
(t_color + (f'{threads:>4} ' if threads < 1000 else "999> ") + end if tr_show else "") +
(g_color + (f'{username:<9.9}' if len(username) < 10 else f'{username[:8]:<8}+') if usr_show else "") +
m_color + ((f'{mem:>4.1f}' if mem < 100 else f'{mem:>4.0f} ') if not CONFIG.proc_mem_bytes else f'{floating_humanizer(mem_b, short=True):>4.4}') + end +
f' {THEME.inactive_fg}{"⡀"*5}{THEME.main_fg}{g_color}{c_color}' + (f' {cpu:>4.1f} ' if cpu < 100 else f'{cpu:>5.0f} ') + end +
(" " if proc.num_procs > cls.select_max else ""))
#* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates
if pid in Graphs.pid_cpu:
out += f'{Mv.to(y+cy, x + w - (12 if proc.num_procs > cls.select_max else 11))}{c_color if CONFIG.proc_colors else THEME.proc_misc}{Graphs.pid_cpu[pid](None if cls.moved else round(cpu))}{THEME.main_fg}'
if is_selected: out += f'{Fx.ub}{Term.fg}{Term.bg}{Mv.to(y+cy, x + w - 1)}{" " if proc.num_procs > cls.select_max else ""}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+cy+i, x)}{" " * w}'
#* Draw scrollbar if needed
if proc.num_procs > cls.select_max:
if cls.resized:
Key.mouse["mouse_scroll_up"] = [[x+w-2+i, y] for i in range(3)]
Key.mouse["mouse_scroll_down"] = [[x+w-2+i, y+h-1] for i in range(3)]
scroll_pos = round(cls.start * (cls.select_max - 2) / (proc.num_procs - (cls.select_max - 2)))
if scroll_pos < 0 or cls.start == 1: scroll_pos = 0
elif scroll_pos > h - 3 or cls.start >= proc.num_procs - cls.select_max: scroll_pos = h - 3
out += (f'{Mv.to(y, x+w-1)}{Fx.b}{THEME.main_fg}↑{Mv.to(y+h-1, x+w-1)}↓{Fx.ub}'
f'{Mv.to(y+1+scroll_pos, x+w-1)}█')
elif "scroll_up" in Key.mouse:
del Key.mouse["scroll_up"], Key.mouse["scroll_down"]
#* Draw current selection and number of processes
out += (f'{Mv.to(y+h, x + w - 3 - len(loc_string))}{THEME.proc_box}{Symbol.title_left}{THEME.title}'
f'{Fx.b}{loc_string}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
#* Clean up dead processes graphs and counters
cls.count += 1
if cls.count == 100:
cls.count == 0
for p in list(cls.pid_counter):
if not psutil.pid_exists(p):
del cls.pid_counter[p], Graphs.pid_cpu[p]
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = cls.moved = False
class Collector:
'''Data collector master class
* .start(): Starts collector thread
* .stop(): Stops collector thread
* .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run'''
stopping: bool = False
started: bool = False
draw_now: bool = False
redraw: bool = False
only_draw: bool = False
thread: threading.Thread
collect_run = threading.Event()
collect_idle = threading.Event()
collect_idle.set()
collect_done = threading.Event()
collect_queue: List = []
collect_interrupt: bool = False
proc_interrupt: bool = False
use_draw_list: bool = False
proc_counter: int = 1
@classmethod
def start(cls):
cls.stopping = False
cls.thread = threading.Thread(target=cls._runner, args=())
cls.thread.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.thread.is_alive():
cls.stopping = True
cls.started = False
cls.collect_queue = []
cls.collect_idle.set()
cls.collect_done.set()
try:
cls.thread.join()
except:
pass
@classmethod
def _runner(cls):
'''This is meant to run in it's own thread, collecting and drawing when collect_run is set'''
draw_buffers: List[str] = []
debugged: bool = False
try:
while not cls.stopping:
if CONFIG.draw_clock and CONFIG.update_ms != 1000: Box.draw_clock()
cls.collect_run.wait(0.1)
if not cls.collect_run.is_set():
continue
draw_buffers = []
cls.collect_interrupt = False
cls.collect_run.clear()
cls.collect_idle.clear()
cls.collect_done.clear()
if DEBUG and not debugged: TimeIt.start("Collect and draw")
while cls.collect_queue:
collector = cls.collect_queue.pop()
if not cls.only_draw:
collector._collect()
collector._draw()
if cls.use_draw_list: draw_buffers.append(collector.buffer)
if cls.collect_interrupt: break
if DEBUG and not debugged: TimeIt.stop("Collect and draw"); debugged = True
if cls.draw_now and not Menu.active and not cls.collect_interrupt:
if cls.use_draw_list: Draw.out(*draw_buffers)
else: Draw.out()
if CONFIG.draw_clock and CONFIG.update_ms == 1000: Box.draw_clock()
cls.collect_idle.set()
cls.collect_done.set()
except Exception as e:
errlog.exception(f'Data collection thread failed with exception: {e}')
cls.collect_idle.set()
cls.collect_done.set()
clean_quit(1, thread=True)
@classmethod
def collect(cls, *collectors, draw_now: bool = True, interrupt: bool = False, proc_interrupt: bool = False, redraw: bool = False, only_draw: bool = False):
'''Setup collect queue for _runner'''
cls.collect_interrupt = interrupt
cls.proc_interrupt = proc_interrupt
cls.collect_idle.wait()
cls.collect_interrupt = False
cls.proc_interrupt = False
cls.use_draw_list = False
cls.draw_now = draw_now
cls.redraw = redraw
cls.only_draw = only_draw
if collectors:
cls.collect_queue = [*collectors]
cls.use_draw_list = True
if ProcCollector in cls.collect_queue:
cls.proc_counter = 1
else:
cls.collect_queue = list(cls.__subclasses__())
if CONFIG.proc_update_mult > 1:
if cls.proc_counter > 1:
cls.collect_queue.remove(ProcCollector)
if cls.proc_counter == CONFIG.proc_update_mult:
cls.proc_counter = 0
cls.proc_counter += 1
cls.collect_run.set()
class CpuCollector(Collector):
'''Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps'''
cpu_usage: List[List[int]] = []
cpu_upper: List[int] = []
cpu_lower: List[int] = []
cpu_temp: List[List[int]] = []
cpu_temp_high: int = 0
cpu_temp_crit: int = 0
for _ in range(THREADS + 1):
cpu_usage.append([])
cpu_temp.append([])
freq_error: bool = False
cpu_freq: int = 0
load_avg: List[float] = []
uptime: str = ""
buffer: str = CpuBox.buffer
sensor_method: str = ""
got_sensors: bool = False
sensor_swap: bool = False
cpu_temp_only: bool = False
@classmethod
def get_sensors(cls):
'''Check if we can get cpu temps and return method of getting temps'''
cls.sensor_method = ""
if SYSTEM == "MacOS":
try:
if which("coretemp") and subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip().replace("-", "").isdigit():
cls.sensor_method = "coretemp"
elif which("osx-cpu-temp") and subprocess.check_output("osx-cpu-temp", universal_newlines=True).rstrip().endswith("°C"):
cls.sensor_method = "osx-cpu-temp"
except: pass
elif CONFIG.cpu_sensor != "Auto" and CONFIG.cpu_sensor in CONFIG.cpu_sensors:
cls.sensor_method = "psutil"
elif hasattr(psutil, "sensors_temperatures"):
try:
temps = psutil.sensors_temperatures()
if temps:
for name, entries in temps.items():
if name.lower().startswith("cpu"):
cls.sensor_method = "psutil"
break
for entry in entries:
if entry.label.startswith(("Package", "Core 0", "Tdie", "CPU")):
cls.sensor_method = "psutil"
break
except: pass
if not cls.sensor_method and SYSTEM == "Linux":
try:
if which("vcgencmd") and subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip().endswith("'C"):
cls.sensor_method = "vcgencmd"
except: pass
cls.got_sensors = bool(cls.sensor_method)
@classmethod
def _collect(cls):
cls.cpu_usage[0].append(ceil(psutil.cpu_percent(percpu=False)))
if len(cls.cpu_usage[0]) > Term.width * 4:
del cls.cpu_usage[0][0]
cpu_times_percent = psutil.cpu_times_percent()
for x in ["upper", "lower"]:
if getattr(CONFIG, "cpu_graph_" + x) == "total":
setattr(cls, "cpu_" + x, cls.cpu_usage[0])
else:
getattr(cls, "cpu_" + x).append(ceil(getattr(cpu_times_percent, getattr(CONFIG, "cpu_graph_" + x))))
if len(getattr(cls, "cpu_" + x)) > Term.width * 4:
del getattr(cls, "cpu_" + x)[0]
for n, thread in enumerate(psutil.cpu_percent(percpu=True), start=1):
cls.cpu_usage[n].append(ceil(thread))
if len(cls.cpu_usage[n]) > Term.width * 2:
del cls.cpu_usage[n][0]
try:
if hasattr(psutil.cpu_freq(), "current"):
cls.cpu_freq = round(psutil.cpu_freq().current)
except Exception as e:
if not cls.freq_error:
cls.freq_error = True
errlog.error("Exception while getting cpu frequency!")
errlog.exception(f'{e}')
else:
pass
cls.load_avg = [round(lavg, 2) for lavg in psutil.getloadavg()]
cls.uptime = str(timedelta(seconds=round(time()-psutil.boot_time(),0)))[:-3].replace(" days,", "d").replace(" day,", "d")
if CONFIG.check_temp and cls.got_sensors:
cls._collect_temps()
@classmethod
def _collect_temps(cls):
temp: int = 1000
cores: List[int] = []
core_dict: Dict[int, int] = {}
entry_int: int = 0
cpu_type: str = ""
c_max: int = 0
s_name: str = "_-_"
s_label: str = "_-_"
if cls.sensor_method == "psutil":
try:
if CONFIG.cpu_sensor != "Auto":
s_name, s_label = CONFIG.cpu_sensor.split(":", 1)
for name, entries in psutil.sensors_temperatures().items():
for num, entry in enumerate(entries, 1):
if name == s_name and (entry.label == s_label or str(num) == s_label):
if entry.label.startswith("Package"):
cpu_type = "intel"
elif entry.label.startswith("Tdie"):
cpu_type = "ryzen"
else:
cpu_type = "other"
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
temp = round(entry.current)
elif entry.label.startswith(("Package", "Tdie")) and cpu_type in ["", "other"] and s_name == "_-_" and hasattr(entry, "current"):
if not cls.cpu_temp_high or cls.sensor_swap or cpu_type == "other":
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
cpu_type = "intel" if entry.label.startswith("Package") else "ryzen"
temp = round(entry.current)
elif (entry.label.startswith(("Core", "Tccd", "CPU")) or (name.lower().startswith("cpu") and not entry.label)) and hasattr(entry, "current"):
if entry.label.startswith(("Core", "Tccd")):
entry_int = int(entry.label.replace("Core", "").replace("Tccd", ""))
if entry_int in core_dict and cpu_type != "ryzen":
if c_max == 0:
c_max = max(core_dict) + 1
if c_max < THREADS // 2 and (entry_int + c_max) not in core_dict:
core_dict[(entry_int + c_max)] = round(entry.current)
continue
elif entry_int in core_dict:
continue
core_dict[entry_int] = round(entry.current)
continue
elif cpu_type in ["intel", "ryzen"]:
continue
if not cpu_type:
cpu_type = "other"
if not cls.cpu_temp_high or cls.sensor_swap:
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 60 if name == "cpu_thermal" else 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 80 if name == "cpu_thermal" else 95
temp = round(entry.current)
cores.append(round(entry.current))
if core_dict:
if not temp or temp == 1000:
temp = sum(core_dict.values()) // len(core_dict)
if not cls.cpu_temp_high or not cls.cpu_temp_crit:
cls.cpu_temp_high, cls.cpu_temp_crit = 80, 95
cls.cpu_temp[0].append(temp)
if cpu_type == "ryzen":
ccds: int = len(core_dict)
cores_per_ccd: int = CORES // ccds
z: int = 1
for x in range(THREADS):
if x == CORES:
z = 1
if CORE_MAP[x] + 1 > cores_per_ccd * z:
z += 1
if z in core_dict:
cls.cpu_temp[x+1].append(core_dict[z])
else:
for x in range(THREADS):
if CORE_MAP[x] in core_dict:
cls.cpu_temp[x+1].append(core_dict[CORE_MAP[x]])
elif len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cls.cpu_temp[0].append(temp)
if len(cores) > 1:
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
try:
if cls.sensor_method == "coretemp":
temp = max(0, int(subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip()))
cores = [max(0, int(x)) for x in subprocess.check_output("coretemp", universal_newlines=True).split()]
if len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "osx-cpu-temp":
temp = max(0, round(float(subprocess.check_output("osx-cpu-temp", universal_newlines=True).strip()[:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "vcgencmd":
temp = max(0, round(float(subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip()[5:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 60
cls.cpu_temp_crit = 80
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
if not cores:
cls.cpu_temp[0].append(temp)
if not core_dict and len(cores) <= 1:
cls.cpu_temp_only = True
if len(cls.cpu_temp[0]) > 5:
for n in range(len(cls.cpu_temp)):
if cls.cpu_temp[n]:
del cls.cpu_temp[n][0]
@classmethod
def _draw(cls):
CpuBox._draw_fg()
class MemCollector(Collector):
'''Collects memory and disks information'''
values: Dict[str, int] = {}
vlist: Dict[str, List[int]] = {}
percent: Dict[str, int] = {}
string: Dict[str, str] = {}
swap_values: Dict[str, int] = {}
swap_vlist: Dict[str, List[int]] = {}
swap_percent: Dict[str, int] = {}
swap_string: Dict[str, str] = {}
disks: Dict[str, Dict]
disk_hist: Dict[str, Tuple] = {}
timestamp: float = time()
disks_io_dict: Dict[str, Dict[str, List[int]]] = {}
recheck_diskutil: bool = True
diskutil_map: Dict[str, str] = {}
io_error: bool = False
old_disks: List[str] = []
old_io_disks: List[str] = []
fstab_filter: List[str] = []
excludes: List[str] = ["squashfs", "nullfs"]
if SYSTEM == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
buffer: str = MemBox.buffer
@classmethod
def _collect(cls):
#* Collect memory
mem = psutil.virtual_memory()
if hasattr(mem, "cached"):
cls.values["cached"] = mem.cached
else:
cls.values["cached"] = mem.active
cls.values["total"], cls.values["free"], cls.values["available"] = mem.total, mem.free, mem.available
cls.values["used"] = cls.values["total"] - cls.values["available"]
for key, value in cls.values.items():
cls.string[key] = floating_humanizer(value)
if key == "total": continue
cls.percent[key] = round(value * 100 / cls.values["total"])
if CONFIG.mem_graphs:
if not key in cls.vlist: cls.vlist[key] = []
cls.vlist[key].append(cls.percent[key])
if len(cls.vlist[key]) > MemBox.width: del cls.vlist[key][0]
#* Collect swap
if CONFIG.show_swap or CONFIG.swap_disk:
swap = psutil.swap_memory()
cls.swap_values["total"], cls.swap_values["free"] = swap.total, swap.free
cls.swap_values["used"] = cls.swap_values["total"] - cls.swap_values["free"]
if swap.total:
if not MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = True
for key, value in cls.swap_values.items():
cls.swap_string[key] = floating_humanizer(value)
if key == "total": continue
cls.swap_percent[key] = round(value * 100 / cls.swap_values["total"])
if CONFIG.mem_graphs:
if not key in cls.swap_vlist: cls.swap_vlist[key] = []
cls.swap_vlist[key].append(cls.swap_percent[key])
if len(cls.swap_vlist[key]) > MemBox.width: del cls.swap_vlist[key][0]
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
if not CONFIG.show_disks: return
#* Collect disks usage
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
filtering: Tuple = ()
filter_exclude: bool = False
io_string_r: str
io_string_w: str
u_percent: int
cls.disks = {}
if CONFIG.disks_filter:
if CONFIG.disks_filter.startswith("exclude="):
filter_exclude = True
filtering = tuple(v.strip() for v in CONFIG.disks_filter.replace("exclude=", "").strip().split(","))
else:
filtering = tuple(v.strip() for v in CONFIG.disks_filter.strip().split(","))
try:
io_counters = psutil.disk_io_counters(perdisk=SYSTEM != "BSD", nowrap=True)
except ValueError as e:
if not cls.io_error:
cls.io_error = True
errlog.error(f'Non fatal error during disk io collection!')
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
errlog.error(f'Caused by outdated psutil version.')
errlog.exception(f'{e}')
io_counters = None
if SYSTEM == "MacOS" and cls.recheck_diskutil:
cls.recheck_diskutil = False
try:
dutil_out = subprocess.check_output(["diskutil", "list", "physical"], universal_newlines=True)
for line in dutil_out.split("\n"):
line = line.replace("\u2068", "").replace("\u2069", "")
if line.startswith("/dev/"):
xdisk = line.split()[0].replace("/dev/", "")
elif "Container" in line:
ydisk = line.split()[3]
if xdisk and ydisk:
cls.diskutil_map[xdisk] = ydisk
xdisk = ydisk = ""
except:
pass
if CONFIG.use_fstab and SYSTEM != "MacOS" and not cls.fstab_filter:
try:
with open('/etc/fstab','r') as fstab:
for line in fstab:
line = line.strip()
if line and not line.startswith('#'):
mount_data = (line.split())
if mount_data[2].lower() != "swap":
cls.fstab_filter += [mount_data[1]]
errlog.debug(f'new fstab_filter set : {cls.fstab_filter}')
except IOError:
CONFIG.use_fstab = False
errlog.warning(f'Error reading fstab, use_fstab flag reset to {CONFIG.use_fstab}')
if not CONFIG.use_fstab and cls.fstab_filter:
cls.fstab_filter = []
errlog.debug(f'use_fstab flag has been turned to {CONFIG.use_fstab}, fstab_filter cleared')
for disk in psutil.disk_partitions(all=CONFIG.use_fstab or not CONFIG.only_physical):
disk_io = None
io_string_r = io_string_w = ""
if CONFIG.use_fstab and disk.mountpoint not in cls.fstab_filter:
continue
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
if cls.excludes and disk.fstype in cls.excludes:
continue
if filtering and ((not filter_exclude and not disk.mountpoint in filtering) or (filter_exclude and disk.mountpoint in filtering)):
continue
if SYSTEM == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
u_percent = round(getattr(disk_u, "percent", 0))
cls.disks[disk.device] = { "name" : disk_name, "used_percent" : u_percent, "free_percent" : 100 - u_percent }
for name in ["total", "used", "free"]:
cls.disks[disk.device][name] = floating_humanizer(getattr(disk_u, name, 0))
#* Collect disk io
if io_counters:
try:
if SYSTEM != "BSD":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if not dev_name in io_counters:
for names in io_counters:
if names in dev_name:
disk_io = io_counters[names]
break
else:
if cls.diskutil_map:
for names, items in cls.diskutil_map.items():
if items in dev_name and names in io_counters:
disk_io = io_counters[names]
else:
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = round((disk_io.read_bytes - cls.disk_hist[disk.device][0]) / (time() - cls.timestamp)) #type: ignore
disk_write = round((disk_io.write_bytes - cls.disk_hist[disk.device][1]) / (time() - cls.timestamp)) #type: ignore
if not disk.device in cls.disks_io_dict:
cls.disks_io_dict[disk.device] = {"read" : [], "write" : [], "rw" : []}
cls.disks_io_dict[disk.device]["read"].append(disk_read >> 20)
cls.disks_io_dict[disk.device]["write"].append(disk_write >> 20)
cls.disks_io_dict[disk.device]["rw"].append((disk_read + disk_write) >> 20)
if len(cls.disks_io_dict[disk.device]["read"]) > MemBox.width:
del cls.disks_io_dict[disk.device]["read"][0], cls.disks_io_dict[disk.device]["write"][0], cls.disks_io_dict[disk.device]["rw"][0]
except:
disk_read = disk_write = 0
else:
disk_read = disk_write = 0
if disk_io:
cls.disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
if CONFIG.io_mode or MemBox.disks_width > 30:
if disk_read > 0:
io_string_r = f'▲{floating_humanizer(disk_read, short=True)}'
if disk_write > 0:
io_string_w = f'▼{floating_humanizer(disk_write, short=True)}'
if CONFIG.io_mode:
cls.disks[disk.device]["io_r"] = io_string_r
cls.disks[disk.device]["io_w"] = io_string_w
elif disk_read + disk_write > 0:
io_string_r += f'▼▲{floating_humanizer(disk_read + disk_write, short=True)}'
cls.disks[disk.device]["io"] = io_string_r + (" " if io_string_w and io_string_r else "") + io_string_w
if CONFIG.swap_disk and MemBox.swap_on:
cls.disks["__swap"] = { "name" : "swap", "used_percent" : cls.swap_percent["used"], "free_percent" : cls.swap_percent["free"], "io" : "" }
for name in ["total", "used", "free"]:
cls.disks["__swap"][name] = cls.swap_string[name]
if len(cls.disks) > 2:
try:
new = { list(cls.disks)[0] : cls.disks.pop(list(cls.disks)[0])}
new["__swap"] = cls.disks.pop("__swap")
new.update(cls.disks)
cls.disks = new
except:
pass
if cls.old_disks != list(cls.disks) or cls.old_io_disks != list(cls.disks_io_dict):
MemBox.redraw = True
cls.recheck_diskutil = True
cls.old_disks = list(cls.disks)
cls.old_io_disks = list(cls.disks_io_dict)
cls.timestamp = time()
@classmethod
def _draw(cls):
MemBox._draw_fg()
class NetCollector(Collector):
'''Collects network stats'''
buffer: str = NetBox.buffer
nics: List[str] = []
nic_i: int = 0
nic: str = ""
new_nic: str = ""
nic_error: bool = False
reset: bool = False
graph_raise: Dict[str, int] = {"download" : 5, "upload" : 5}
graph_lower: Dict[str, int] = {"download" : 5, "upload" : 5}
#min_top: int = 10<<10
#* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool
stats: Dict[str, Dict[str, Dict[str, Any]]] = {}
#* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str
strings: Dict[str, Dict[str, Dict[str, str]]] = {}
switched: bool = False
timestamp: float = time()
net_min: Dict[str, int] = {"download" : -1, "upload" : -1}
auto_min: bool = CONFIG.net_auto
net_iface: str = CONFIG.net_iface
sync_top: int = 0
sync_string: str = ""
address: str = ""
@classmethod
def _get_nics(cls):
'''Get a list of all network devices sorted by highest throughput'''
cls.nic_i = 0
cls.nics = []
cls.nic = ""
try:
io_all = psutil.net_io_counters(pernic=True)
except Exception as e:
if not cls.nic_error:
cls.nic_error = True
errlog.exception(f'{e}')
if not io_all: return
up_stat = psutil.net_if_stats()
for nic in sorted(io_all.keys(), key=lambda nic: (getattr(io_all[nic], "bytes_recv", 0) + getattr(io_all[nic], "bytes_sent", 0)), reverse=True):
if nic not in up_stat or not up_stat[nic].isup:
continue
cls.nics.append(nic)
if not cls.nics: cls.nics = [""]
cls.nic = cls.nics[cls.nic_i]
if cls.net_iface and cls.net_iface in cls.nics:
cls.nic = cls.net_iface
cls.nic_i = cls.nics.index(cls.nic)
@classmethod
def switch(cls, key: str):
if cls.net_iface: cls.net_iface = ""
if len(cls.nics) < 2 and cls.nic in cls.nics:
return
if cls.nic_i == -1:
cls.nic_i = 0 if key == "n" else -1
else:
cls.nic_i += +1 if key == "n" else -1
cls.nic_i %= len(cls.nics)
cls.new_nic = cls.nics[cls.nic_i]
cls.switched = True
Collector.collect(NetCollector, redraw=True)
@classmethod
def _collect(cls):
speed: int
stat: Dict
up_stat = psutil.net_if_stats()
if sorted(cls.nics) != sorted(nic for nic in up_stat if up_stat[nic].isup):
old_nic = cls.nic
cls._get_nics()
cls.nic = old_nic
if cls.nic not in cls.nics:
cls.nic_i = -1
else:
cls.nic_i = cls.nics.index(cls.nic)
if cls.switched:
cls.nic = cls.new_nic
cls.switched = False
if not cls.nic or cls.nic not in up_stat:
cls._get_nics()
if not cls.nic: return
try:
io_all = psutil.net_io_counters(pernic=True)[cls.nic]
except KeyError:
pass
return
if not cls.nic in cls.stats:
cls.stats[cls.nic] = {}
cls.strings[cls.nic] = { "download" : {}, "upload" : {}}
for direction, value in ["download", io_all.bytes_recv], ["upload", io_all.bytes_sent]:
cls.stats[cls.nic][direction] = { "total" : value, "last" : value, "top" : 0, "graph_top" : 0, "offset" : 0, "speed" : [], "redraw" : True, "graph_raise" : 0, "graph_lower" : 7 }
for v in ["total", "byte_ps", "bit_ps", "top", "graph_top"]:
cls.strings[cls.nic][direction][v] = ""
cls.stats[cls.nic]["download"]["total"] = io_all.bytes_recv
cls.stats[cls.nic]["upload"]["total"] = io_all.bytes_sent
if cls.nic in psutil.net_if_addrs():
cls.address = getattr(psutil.net_if_addrs()[cls.nic][0], "address", "")
for direction in ["download", "upload"]:
stat = cls.stats[cls.nic][direction]
strings = cls.strings[cls.nic][direction]
#* Calculate current speed
stat["speed"].append(round((stat["total"] - stat["last"]) / (time() - cls.timestamp)))
stat["last"] = stat["total"]
speed = stat["speed"][-1]
if cls.net_min[direction] == -1:
cls.net_min[direction] = units_to_bytes(getattr(CONFIG, "net_" + direction))
stat["graph_top"] = cls.net_min[direction]
stat["graph_lower"] = 7
if not cls.auto_min:
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
if stat["offset"] and stat["offset"] > stat["total"]:
cls.reset = True
if cls.reset:
if not stat["offset"]:
stat["offset"] = stat["total"]
else:
stat["offset"] = 0
if direction == "upload":
cls.reset = False
NetBox.redraw = True
if len(stat["speed"]) > NetBox.width * 2:
del stat["speed"][0]
strings["total"] = floating_humanizer(stat["total"] - stat["offset"])
strings["byte_ps"] = floating_humanizer(stat["speed"][-1], per_second=True)
strings["bit_ps"] = floating_humanizer(stat["speed"][-1], bit=True, per_second=True)
if speed > stat["top"] or not stat["top"]:
stat["top"] = speed
strings["top"] = floating_humanizer(stat["top"], bit=True, per_second=True)
if cls.auto_min:
if speed > stat["graph_top"]:
stat["graph_raise"] += 1
if stat["graph_lower"] > 0: stat["graph_lower"] -= 1
elif speed < stat["graph_top"] // 10:
stat["graph_lower"] += 1
if stat["graph_raise"] > 0: stat["graph_raise"] -= 1
if stat["graph_raise"] >= 5 or stat["graph_lower"] >= 5:
if stat["graph_raise"] >= 5:
stat["graph_top"] = round(max(stat["speed"][-5:]) / 0.8)
elif stat["graph_lower"] >= 5:
stat["graph_top"] = max(10 << 10, max(stat["speed"][-5:]) * 3)
stat["graph_raise"] = 0
stat["graph_lower"] = 0
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
cls.timestamp = time()
if CONFIG.net_sync:
c_max: int = max(cls.stats[cls.nic]["download"]["graph_top"], cls.stats[cls.nic]["upload"]["graph_top"])
if c_max != cls.sync_top:
cls.sync_top = c_max
cls.sync_string = floating_humanizer(cls.sync_top, short=True)
NetBox.redraw = True
@classmethod
def _draw(cls):
NetBox._draw_fg()
class ProcCollector(Collector):
'''Collects process stats'''
buffer: str = ProcBox.buffer
search_filter: str = ""
case_sensitive: bool = False
processes: Dict = {}
num_procs: int = 0
det_cpu: float = 0.0
detailed: bool = False
detailed_pid: Union[int, None] = None
details: Dict[str, Any] = {}
details_cpu: List[int] = []
details_mem: List[int] = []
expand: int = 0
collapsed: Dict = {}
tree_counter: int = 0
p_values: List[str] = ["pid", "name", "cmdline", "num_threads", "username", "memory_percent", "cpu_percent", "cpu_times", "create_time"]
sort_expr: Dict = {}
sort_expr["pid"] = compile("p.info['pid']", "str", "eval")
sort_expr["program"] = compile("'' if p.info['name'] == 0.0 else p.info['name']", "str", "eval")
sort_expr["arguments"] = compile("' '.join(str(p.info['cmdline'])) or ('' if p.info['name'] == 0.0 else p.info['name'])", "str", "eval")
sort_expr["threads"] = compile("0 if p.info['num_threads'] == 0.0 else p.info['num_threads']", "str", "eval")
sort_expr["user"] = compile("'' if p.info['username'] == 0.0 else p.info['username']", "str", "eval")
sort_expr["memory"] = compile("p.info['memory_percent']", "str", "eval")
sort_expr["cpu lazy"] = compile("(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time() - p.info['create_time']))", "str", "eval")
sort_expr["cpu responsive"] = compile("(p.info['cpu_percent'] if CONFIG.proc_per_core else (p.info['cpu_percent'] / THREADS))", "str", "eval")
@classmethod
def _collect(cls):
'''List all processess with pid, name, arguments, threads, username, memory percent and cpu percent'''
if not "proc" in Box.boxes: return
out: Dict = {}
cls.det_cpu = 0.0
sorting: str = CONFIG.proc_sorting
reverse: bool = not CONFIG.proc_reversed
proc_per_cpu: bool = CONFIG.proc_per_core
search: List[str] = []
if cls.search_filter:
if cls.case_sensitive:
search = [i.strip() for i in cls.search_filter.split(",")]
else:
search = [i.strip() for i in cls.search_filter.lower().split(",")]
err: float = 0.0
n: int = 0
if CONFIG.proc_tree and sorting == "arguments":
sorting = "program"
sort_cmd = cls.sort_expr[sorting]
if CONFIG.proc_tree:
cls._tree(sort_cmd=sort_cmd, reverse=reverse, proc_per_cpu=proc_per_cpu, search=search)
else:
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt or cls.proc_interrupt:
return
if p.info["name"] == "idle" or p.info["name"] == err or p.info["pid"] == err:
continue
if p.info["cmdline"] == err:
p.info["cmdline"] = ""
if p.info["username"] == err:
p.info["username"] = ""
if p.info["num_threads"] == err:
p.info["num_threads"] = 0
if search:
if cls.detailed and p.info["pid"] == cls.detailed_pid:
cls.det_cpu = p.info["cpu_percent"]
for value in [ p.info["name"], " ".join(p.info["cmdline"]), str(p.info["pid"]), p.info["username"] ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
break
else: continue
break
else: continue
cpu = p.info["cpu_percent"] if proc_per_cpu else round(p.info["cpu_percent"] / THREADS, 2)
mem = p.info["memory_percent"]
if CONFIG.proc_mem_bytes and hasattr(p.info["memory_info"], "rss"):
mem_b = p.info["memory_info"].rss
else:
mem_b = 0
cmd = " ".join(p.info["cmdline"]) or "[" + p.info["name"] + "]"
out[p.info["pid"]] = {
"name" : p.info["name"],
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : p.info["num_threads"],
"username" : p.info["username"],
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu }
n += 1
cls.num_procs = n
cls.processes = out.copy()
if cls.detailed:
cls.expand = ((ProcBox.width - 2) - ((ProcBox.width - 2) // 3) - 40) // 10
if cls.expand > 5: cls.expand = 5
if cls.detailed and not cls.details.get("killed", False):
try:
c_pid = cls.detailed_pid
det = psutil.Process(c_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
cls.details["killed"] = True
cls.details["status"] = psutil.STATUS_DEAD
ProcBox.redraw = True
else:
attrs: List[str] = ["status", "memory_info", "create_time"]
if not SYSTEM == "MacOS": attrs.extend(["cpu_num"])
if cls.expand:
attrs.extend(["nice", "terminal"])
if not SYSTEM == "MacOS": attrs.extend(["io_counters"])
if not c_pid in cls.processes: attrs.extend(["pid", "name", "cmdline", "num_threads", "username", "memory_percent"])
cls.details = det.as_dict(attrs=attrs, ad_value="")
if det.parent() != None: cls.details["parent_name"] = det.parent().name()
else: cls.details["parent_name"] = ""
cls.details["pid"] = c_pid
if c_pid in cls.processes:
cls.details["name"] = cls.processes[c_pid]["name"]
cls.details["cmdline"] = cls.processes[c_pid]["cmd"]
cls.details["threads"] = f'{cls.processes[c_pid]["threads"]}'
cls.details["username"] = cls.processes[c_pid]["username"]
cls.details["memory_percent"] = cls.processes[c_pid]["mem"]
cls.details["cpu_percent"] = round(cls.processes[c_pid]["cpu"] * (1 if CONFIG.proc_per_core else THREADS))
else:
cls.details["cmdline"] = " ".join(cls.details["cmdline"]) or "[" + cls.details["name"] + "]"
cls.details["threads"] = f'{cls.details["num_threads"]}'
cls.details["cpu_percent"] = round(cls.det_cpu)
cls.details["killed"] = False
if SYSTEM == "MacOS":
cls.details["cpu_num"] = -1
cls.details["io_counters"] = ""
if hasattr(cls.details["memory_info"], "rss"): cls.details["memory_bytes"] = floating_humanizer(cls.details["memory_info"].rss) # type: ignore
else: cls.details["memory_bytes"] = "? Bytes"
if isinstance(cls.details["create_time"], float):
uptime = timedelta(seconds=round(time()-cls.details["create_time"],0))
if uptime.days > 0: cls.details["uptime"] = f'{uptime.days}d {str(uptime).split(",")[1][:-3].strip()}'
else: cls.details["uptime"] = f'{uptime}'
else: cls.details["uptime"] = "??:??:??"
if cls.expand:
if cls.expand > 1 : cls.details["nice"] = f'{cls.details["nice"]}'
if SYSTEM == "BSD":
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_count"): cls.details["io_read"] = f'{cls.details["io_counters"].read_count}'
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_count"): cls.details["io_write"] = f'{cls.details["io_counters"].write_count}'
else: cls.details["io_write"] = "?"
else:
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_bytes"): cls.details["io_read"] = floating_humanizer(cls.details["io_counters"].read_bytes)
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_bytes"): cls.details["io_write"] = floating_humanizer(cls.details["io_counters"].write_bytes)
else: cls.details["io_write"] = "?"
if cls.expand > 4 : cls.details["terminal"] = f'{cls.details["terminal"]}'.replace("/dev/", "")
cls.details_cpu.append(cls.details["cpu_percent"])
mem = cls.details["memory_percent"]
if mem > 80: mem = round(mem)
elif mem > 60: mem = round(mem * 1.2)
elif mem > 30: mem = round(mem * 1.5)
elif mem > 10: mem = round(mem * 2)
elif mem > 5: mem = round(mem * 10)
else: mem = round(mem * 20)
cls.details_mem.append(mem)
if len(cls.details_cpu) > ProcBox.width: del cls.details_cpu[0]
if len(cls.details_mem) > ProcBox.width: del cls.details_mem[0]
@classmethod
def _tree(cls, sort_cmd, reverse: bool, proc_per_cpu: bool, search: List[str]):
'''List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent'''
out: Dict = {}
err: float = 0.0
det_cpu: float = 0.0
infolist: Dict = {}
cls.tree_counter += 1
tree = defaultdict(list)
n: int = 0
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt: return
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
n += 1
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
def create_tree(pid: int, tree: defaultdict, indent: str = "", inindent: str = " ", found: bool = False, depth: int = 0, collapse_to: Union[None, int] = None):
nonlocal infolist, proc_per_cpu, search, out, det_cpu
name: str; threads: int; username: str; mem: float; cpu: float; collapse: bool = False
cont: bool = True
getinfo: Dict = {}
if cls.collect_interrupt: return
try:
name = psutil.Process(pid).name()
if name == "idle": return
except psutil.Error:
pass
cont = False
name = ""
if pid in infolist:
getinfo = infolist[pid]
if search and not found:
if cls.detailed and pid == cls.detailed_pid:
det_cpu = getinfo["cpu_percent"]
if "username" in getinfo and isinstance(getinfo["username"], float): getinfo["username"] = ""
if "cmdline" in getinfo and isinstance(getinfo["cmdline"], float): getinfo["cmdline"] = ""
for value in [ name, str(pid), getinfo.get("username", ""), " ".join(getinfo.get("cmdline", "")) ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
found = True
break
else: continue
break
else: cont = False
if cont:
if getinfo:
if getinfo["num_threads"] == err: threads = 0
else: threads = getinfo["num_threads"]
if getinfo["username"] == err: username = ""
else: username = getinfo["username"]
cpu = getinfo["cpu_percent"] if proc_per_cpu else round(getinfo["cpu_percent"] / THREADS, 2)
mem = getinfo["memory_percent"]
if getinfo["cmdline"] == err: cmd = ""
else: cmd = " ".join(getinfo["cmdline"]) or "[" + getinfo["name"] + "]"
if CONFIG.proc_mem_bytes and hasattr(getinfo["memory_info"], "rss"):
mem_b = getinfo["memory_info"].rss
else:
mem_b = 0
else:
threads = mem_b = 0
username = ""
mem = cpu = 0.0
if pid in cls.collapsed:
collapse = cls.collapsed[pid]
else:
collapse = depth > CONFIG.tree_depth
cls.collapsed[pid] = collapse
if collapse_to and not search:
out[collapse_to]["threads"] += threads
out[collapse_to]["mem"] += mem
out[collapse_to]["mem_b"] += mem_b
out[collapse_to]["cpu"] += cpu
else:
if pid in tree and len(tree[pid]) > 0:
sign: str = "+" if collapse else "-"
inindent = inindent.replace(" ├─ ", "[" + sign + "]─").replace(" └─ ", "[" + sign + "]─")
out[pid] = {
"indent" : inindent,
"name": name,
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : threads,
"username" : username,
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu,
"depth" : depth,
}
if search: collapse = False
elif collapse and not collapse_to:
collapse_to = pid
if pid not in tree:
return
children = tree[pid][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found, depth=depth+1, collapse_to=collapse_to)
create_tree(tree[pid][-1], tree, indent + " ", indent + " └─ ", depth=depth+1, collapse_to=collapse_to)
create_tree(min(tree), tree)
cls.det_cpu = det_cpu
if cls.collect_interrupt: return
if cls.tree_counter >= 100:
cls.tree_counter = 0
for pid in list(cls.collapsed):
if not psutil.pid_exists(pid):
del cls.collapsed[pid]
cls.num_procs = len(out)
cls.processes = out.copy()
@classmethod
def sorting(cls, key: str):
index: int = CONFIG.sorting_options.index(CONFIG.proc_sorting) + (1 if key in ["right", "l"] else -1)
if index >= len(CONFIG.sorting_options): index = 0
elif index < 0: index = len(CONFIG.sorting_options) - 1
CONFIG.proc_sorting = CONFIG.sorting_options[index]
if "left" in Key.mouse: del Key.mouse["left"]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
@classmethod
def _draw(cls):
ProcBox._draw_fg()
class Menu:
'''Holds all menus'''
active: bool = False
close: bool = False
resized: bool = True
menus: Dict[str, Dict[str, str]] = {}
menu_length: Dict[str, int] = {}
background: str = ""
for name, menu in MENUS.items():
menu_length[name] = len(menu["normal"][0])
menus[name] = {}
for sel in ["normal", "selected"]:
menus[name][sel] = ""
for i in range(len(menu[sel])):
menus[name][sel] += Fx.trans(f'{Color.fg(MENU_COLORS[sel][i])}{menu[sel][i]}')
if i < len(menu[sel]) - 1: menus[name][sel] += f'{Mv.d(1)}{Mv.l(len(menu[sel][i]))}'
@classmethod
def main(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
banner: str = ""
redraw: bool = True
key: str = ""
mx: int = 0
my: int = 0
skip: bool = False
mouse_over: bool = False
mouse_items: Dict[str, Dict[str, int]] = {}
cls.active = True
cls.resized = True
menu_names: List[str] = list(cls.menus.keys())
menu_index: int = 0
menu_current: str = menu_names[0]
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
while not cls.close:
key = ""
if cls.resized:
banner = (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
if UpdateChecker.version != VERSION:
banner += f'{Mv.to(Term.height, 1)}{Fx.b}{THEME.title}New release {UpdateChecker.version} available at https://github.com/aristocratos/bpytop{Fx.ub}{Term.fg}'
cy = 0
for name, menu in cls.menus.items():
ypos = Term.height // 2 - 2 + cy
xpos = Term.width // 2 - (cls.menu_length[name] // 2)
mouse_items[name] = { "x1" : xpos, "x2" : xpos + cls.menu_length[name] - 1, "y1" : ypos, "y2" : ypos + 2 }
cy += 3
redraw = True
cls.resized = False
if redraw:
out = ""
for name, menu in cls.menus.items():
out += f'{Mv.to(mouse_items[name]["y1"], mouse_items[name]["x1"])}{menu["selected" if name == menu_current else "normal"]}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{banner}{out}')
skip = redraw = False
if Key.input_wait(Timer.left(), mouse=True):
if Key.mouse_moved():
mx, my = Key.get_mouse()
for name, pos in mouse_items.items():
if pos["x1"] <= mx <= pos["x2"] and pos["y1"] <= my <= pos["y2"]:
mouse_over = True
if name != menu_current:
menu_current = name
menu_index = menu_names.index(name)
redraw = True
break
else:
mouse_over = False
else:
key = Key.get()
if key == "mouse_click" and not mouse_over:
key = "M"
if key == "q":
clean_quit()
elif key in ["escape", "M"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "shift_tab"]:
menu_index -= 1
if menu_index < 0: menu_index = len(menu_names) - 1
menu_current = menu_names[menu_index]
redraw = True
elif key in ["down", "mouse_scroll_down", "tab"]:
menu_index += 1
if menu_index > len(menu_names) - 1: menu_index = 0
menu_current = menu_names[menu_index]
redraw = True
elif key == "enter" or (key == "mouse_click" and mouse_over):
if menu_current == "quit":
clean_quit()
elif menu_current == "options":
cls.options()
cls.resized = True
elif menu_current == "help":
cls.help()
cls.resized = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def help(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
help_items: Dict[str, str] = {
"(Mouse 1)" : "Clicks buttons and selects in process list.",
"Selected (Mouse 1)" : "Show detailed information for selected process.",
"(Mouse scroll)" : "Scrolls any scrollable list/text under cursor.",
"(Esc, shift+m)" : "Toggles main menu.",
"(m)" : "Cycle view presets, order: full->proc->stat->user.",
"(1)" : "Toggle CPU box.",
"(2)" : "Toggle MEM box.",
"(3)" : "Toggle NET box.",
"(4)" : "Toggle PROC box.",
"(d)" : "Toggle disks view in MEM box.",
"(F2, o)" : "Shows options.",
"(F1, shift+h)" : "Shows this window.",
"(ctrl+z)" : "Sleep program and put in background.",
"(ctrl+c, q)" : "Quits program.",
"(+) / (-)" : "Add/Subtract 100ms to/from update timer.",
"(Up, k) (Down, j)" : "Select in process list.",
"(Enter)" : "Show detailed information for selected process.",
"(Spacebar)" : "Expand/collapse the selected process in tree view.",
"(Pg Up) (Pg Down)" : "Jump 1 page in process list.",
"(Home) (End)" : "Jump to first or last page in process list.",
"(Left, h) (Right, l)" : "Select previous/next sorting column.",
"(b) (n)" : "Select previous/next network device.",
"(s)" : "Toggle showing swap as a disk.",
"(i)" : "Toggle disks io mode with big graphs.",
"(z)" : "Toggle totals reset for current network device",
"(a)" : "Toggle auto scaling for the network graphs.",
"(y)" : "Toggle synced scaling mode for network graphs.",
"(f)" : "Input a NON case-sensitive process filter.",
"(shift+f)" : "Input a case-sensitive process filter.",
"(c)" : "Toggle per-core cpu usage of processes.",
"(r)" : "Reverse sorting order in processes box.",
"(e)" : "Toggle processes tree view.",
"(delete)" : "Clear any entered filter.",
"Selected (shift+t)" : "Terminate selected process with SIGTERM - 15.",
"Selected (shift+k)" : "Kill selected process with SIGKILL - 9.",
"Selected (shift+i)" : "Interrupt selected process with SIGINT - 2.",
"_1" : " ",
"_2" : "For bug reporting and project updates, visit:",
"_3" : "https://github.com/aristocratos/bpytop",
}
while not cls.close:
key = ""
if cls.resized:
y = 8 if Term.height < len(help_items) + 10 else Term.height // 2 - len(help_items) // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-36
h, w = Term.height-2-y, 72
if len(help_items) > h:
pages = ceil(len(help_items) / h)
else:
h = len(help_items)
pages = 0
page = 1
out_misc += create_box(x, y, w, h+3, "help", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
if pages:
out += (f'{Mv.to(y, x+56)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, (keys, desc) in enumerate(help_items.items()):
if pages and n < (page - 1) * h: continue
out += f'{Mv.to(y+2+cy, x+1)}{Fx.b}{("" if keys.startswith("_") else keys):^20.20}{Fx.ub}{desc:50.50}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+2+cy+i, x+1)}{" " * (w-2)}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
if key == "mouse_click":
mx, my = Key.get_mouse()
if x <= mx < x + w and y <= my < y + h + 3:
if pages and my == y and x + 56 < mx < x + 61:
key = "up"
elif pages and my == y and x + 63 < mx < x + 68:
key = "down"
else:
key = "escape"
if key == "q":
clean_quit()
elif key in ["escape", "M", "enter", "backspace", "H", "f1"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
redraw = True
elif key in ["down", "mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
redraw = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def options(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
selected_cat: str = ""
selected_int: int = 0
option_items: Dict[str, List[str]] = {}
cat_list: List[str] = []
cat_int: int = 0
change_cat: bool = False
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
d_quote: str
inputting: bool = False
input_val: str = ""
Theme.refresh()
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
categories: Dict[str, Dict[str, List[str]]] = {
"system" : {
"color_theme" : [
'Set color theme.',
'',
'Choose from all theme files in',
'"/usr/[local/]share/bpytop/themes" and',
'"~/.config/bpytop/themes".',
'',
'"Default" for builtin default theme.',
'User themes are prefixed by a plus sign "+".',
'',
'For theme updates see:',
'https://github.com/aristocratos/bpytop'],
"theme_background" : [
'If the theme set background should be shown.',
'',
'Set to False if you want terminal background',
'transparency.'],
"truecolor" : [
'Sets if 24-bit truecolor should be used.',
'(Requires restart to take effect!)',
'',
'Will convert 24-bit colors to 256 color',
'(6x6x6 color cube) if False.',
'',
'Set to False if your terminal doesn\'t have',
'truecolor support and can\'t convert to',
'256-color.'],
"shown_boxes" : [
'Manually set which boxes to show.',
'',
'Available values are "cpu mem net proc".',
'Seperate values with whitespace.',
'',
'Toggle between presets with mode key "m".'],
"update_ms" : [
'Update time in milliseconds.',
'',
'Recommended 2000 ms or above for better sample',
'times for graphs.',
'',
'Min value: 100 ms',
'Max value: 86400000 ms = 24 hours.'],
"draw_clock" : [
'Draw a clock at top of screen.',
'(Only visible if cpu box is enabled!)',
'',
'Formatting according to strftime, empty',
'string to disable.',
'',
'Custom formatting options:',
'"/host" = hostname',
'"/user" = username',
'"/uptime" = system uptime',
'',
'Examples of strftime formats:',
'"%X" = locale HH:MM:SS',
'"%H" = 24h hour, "%I" = 12h hour',
'"%M" = minute, "%S" = second',
'"%d" = day, "%m" = month, "%y" = year'],
"background_update" : [
'Update main ui when menus are showing.',
'',
'True or False.',
'',
'Set this to false if the menus is flickering',
'too much for a comfortable experience.'],
"show_battery" : [
'Show battery stats.',
'(Only visible if cpu box is enabled!)',
'',
'Show battery stats in the top right corner',
'if a battery is present.'],
"show_init" : [
'Show init screen at startup.',
'',
'The init screen is purely cosmetical and',
'slows down start to show status messages.'],
"update_check" : [
'Check for updates at start.',
'',
'Checks for latest version from:',
'https://github.com/aristocratos/bpytop'],
"log_level" : [
'Set loglevel for error.log',
'',
'Levels are: "ERROR" "WARNING" "INFO" "DEBUG".',
'The level set includes all lower levels,',
'i.e. "DEBUG" will show all logging info.']
},
"cpu" : {
"cpu_graph_upper" : [
'Sets the CPU stat shown in upper half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_graph_lower" : [
'Sets the CPU stat shown in lower half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_invert_lower" : [
'Toggles orientation of the lower CPU graph.',
'',
'True or False.'],
"cpu_single_graph" : [
'Completely disable the lower CPU graph.',
'',
'Shows only upper CPU graph and resizes it',
'to fit to box height.',
'',
'True or False.'],
"check_temp" : [
'Enable cpu temperature reporting.',
'',
'True or False.'],
"cpu_sensor" : [
'Cpu temperature sensor',
'',
'Select the sensor that corresponds to',
'your cpu temperature.',
'Set to "Auto" for auto detection.'],
"show_coretemp" : [
'Show temperatures for cpu cores.',
'',
'Only works if check_temp is True and',
'the system is reporting core temps.'],
"temp_scale" : [
'Which temperature scale to use.',
'',
'Celsius, default scale.',
'',
'Fahrenheit, the american one.',
'',
'Kelvin, 0 = absolute zero, 1 degree change',
'equals 1 degree change in Celsius.',
'',
'Rankine, 0 = abosulte zero, 1 degree change',
'equals 1 degree change in Fahrenheit.'],
"custom_cpu_name" : [
'Custom cpu model name in cpu percentage box.',
'',
'Empty string to disable.'],
"show_uptime" : [
'Shows the system uptime in the CPU box.',
'',
'Can also be shown in the clock by using',
'"/uptime" in the formatting.',
'',
'True or False.'],
},
"mem" : {
"mem_graphs" : [
'Show graphs for memory values.',
'',
'True or False.'],
"show_disks" : [
'Split memory box to also show disks.',
'',
'True or False.'],
"show_io_stat" : [
'Toggle small IO stat graphs.',
'',
'Toggles the small IO graphs for the regular',
'disk usage view.',
'',
'True or False.'],
"io_mode" : [
'Toggles io mode for disks.',
'',
'Shows big graphs for disk read/write speeds',
'instead of used/free percentage meters.',
'',
'True or False.'],
"io_graph_combined" : [
'Toggle combined read and write graphs.',
'',
'Only has effect if "io mode" is True.',
'',
'True or False.'],
"io_graph_speeds" : [
'Set top speeds for the io graphs.',
'',
'Manually set which speed in MiB/s that equals',
'100 percent in the io graphs.',
'(10 MiB/s by default).',
'',
'Format: "device:speed" seperate disks with a',
'comma ",".',
'',
'Example: "/dev/sda:100, /dev/sdb:20".'],
"show_swap" : [
'If swap memory should be shown in memory box.',
'',
'True or False.'],
"swap_disk" : [
'Show swap as a disk.',
'',
'Ignores show_swap value above.',
'Inserts itself after first disk.'],
"only_physical" : [
'Filter out non physical disks.',
'',
'Set this to False to include network disks,',
'RAM disks and similar.',
'',
'True or False.'],
"use_fstab" : [
'Read disks list from /etc/fstab.',
'(Has no effect on macOS X)',
'',
'This also disables only_physical.',
'',
'True or False.'],
"disks_filter" : [
'Optional filter for shown disks.',
'',
'Should be full path of a mountpoint,',
'"root" replaces "/", separate multiple values',
'with a comma ",".',
'Begin line with "exclude=" to change to exclude',
'filter.',
'Oterwise defaults to "most include" filter.',
'',
'Example: disks_filter="exclude=/boot, /home/user"'],
},
"net" : {
"net_download" : [
'Fixed network graph download value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_upload" : [
'Fixed network graph upload value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_auto" : [
'Start in network graphs auto rescaling mode.',
'',
'Ignores any values set above at start and',
'rescales down to 10KibiBytes at the lowest.',
'',
'True or False.'],
"net_sync" : [
'Network scale sync.',
'',
'Syncs the scaling for download and upload to',
'whichever currently has the highest scale.',
'',
'True or False.'],
"net_color_fixed" : [
'Set network graphs color gradient to fixed.',
'',
'If True the network graphs color is based',
'on the total bandwidth usage instead of',
'the current autoscaling.',
'',
'The bandwidth usage is based on the',
'"net_download" and "net_upload" values set',
'above.'],
"net_iface" : [
'Network Interface.',
'',
'Manually set the starting Network Interface.',
'Will otherwise automatically choose the NIC',
'with the highest total download since boot.'],
},
"proc" : {
"proc_update_mult" : [
'Processes update multiplier.',
'Sets how often the process list is updated as',
'a multiplier of "update_ms".',
'',
'Set to 2 or higher to greatly decrease bpytop',
'cpu usage. (Only integers)'],
"proc_sorting" : [
'Processes sorting option.',
'',
'Possible values: "pid", "program", "arguments",',
'"threads", "user", "memory", "cpu lazy" and',
'"cpu responsive".',
'',
'"cpu lazy" updates top process over time,',
'"cpu responsive" updates top process directly.'],
"proc_reversed" : [
'Reverse processes sorting order.',
'',
'True or False.'],
"proc_tree" : [
'Processes tree view.',
'',
'Set true to show processes grouped by parents,',
'with lines drawn between parent and child',
'process.'],
"tree_depth" : [
'Process tree auto collapse depth.',
'',
'Sets the depth were the tree view will auto',
'collapse processes at.'],
"proc_colors" : [
'Enable colors in process view.',
'',
'Uses the cpu graph gradient colors.'],
"proc_gradient" : [
'Enable process view gradient fade.',
'',
'Fades from top or current selection.',
'Max fade value is equal to current themes',
'"inactive_fg" color value.'],
"proc_per_core" : [
'Process usage per core.',
'',
'If process cpu usage should be of the core',
'it\'s running on or usage of the total',
'available cpu power.',
'',
'If true and process is multithreaded',
'cpu usage can reach over 100%.'],
"proc_mem_bytes" : [
'Show memory as bytes in process list.',
' ',
'True or False.'],
}
}
loglevel_i: int = CONFIG.log_levels.index(CONFIG.log_level)
cpu_sensor_i: int = CONFIG.cpu_sensors.index(CONFIG.cpu_sensor)
cpu_graph_i: Dict[str, int] = { "cpu_graph_upper" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_upper),
"cpu_graph_lower" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_lower)}
temp_scale_i: int = CONFIG.temp_scales.index(CONFIG.temp_scale)
color_i: int
max_opt_len: int = max([len(categories[x]) for x in categories]) * 2
cat_list = list(categories)
while not cls.close:
key = ""
if cls.resized or change_cat:
cls.resized = change_cat = False
selected_cat = list(categories)[cat_int]
option_items = categories[cat_list[cat_int]]
option_len: int = len(option_items) * 2
y = 12 if Term.height < max_opt_len + 13 else Term.height // 2 - max_opt_len // 2 + 7
out_misc = (f'{Banner.draw(y-10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-38
x2 = x + 27
h, w, w2 = min(Term.height-1-y, option_len), 26, 50
h -= h % 2
color_i = list(Theme.themes).index(THEME.current)
out_misc += create_box(x, y - 3, w+w2+1, 3, f'tab{Symbol.right}', line_color=THEME.div_line)
out_misc += create_box(x, y, w, h+2, "options", line_color=THEME.div_line)
redraw = True
cat_width = floor((w+w2) / len(categories))
out_misc += f'{Fx.b}'
for cx, cat in enumerate(categories):
out_misc += f'{Mv.to(y-2, x + 1 + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 ))}'
if cat == selected_cat:
out_misc += f'{THEME.hi_fg}[{THEME.title}{Fx.u}{cat}{Fx.uu}{THEME.hi_fg}]'
else:
out_misc += f'{THEME.hi_fg}{SUPERSCRIPT[cx+1]}{THEME.title}{cat}'
out_misc += f'{Fx.ub}'
if option_len > h:
pages = ceil(option_len / h)
else:
h = option_len
pages = 0
page = pages if selected_int == -1 and pages > 0 else 1
selected_int = 0 if selected_int >= 0 else len(option_items) - 1
if redraw:
out = ""
cy = 0
selected = list(option_items)[selected_int]
if pages:
out += (f'{Mv.to(y+h+1, x+11)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
#out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, opt in enumerate(option_items):
if pages and n < (page - 1) * ceil(h / 2): continue
value = getattr(CONFIG, opt)
t_color = f'{THEME.selected_bg}{THEME.selected_fg}' if opt == selected else f'{THEME.title}'
v_color = "" if opt == selected else f'{THEME.title}'
d_quote = '"' if isinstance(value, str) else ""
if opt == "color_theme":
counter = f' {color_i + 1}/{len(Theme.themes)}'
elif opt == "proc_sorting":
counter = f' {CONFIG.sorting_options.index(CONFIG.proc_sorting) + 1}/{len(CONFIG.sorting_options)}'
elif opt == "log_level":
counter = f' {loglevel_i + 1}/{len(CONFIG.log_levels)}'
elif opt == "cpu_sensor":
counter = f' {cpu_sensor_i + 1}/{len(CONFIG.cpu_sensors)}'
elif opt in ["cpu_graph_upper", "cpu_graph_lower"]:
counter = f' {cpu_graph_i[opt] + 1}/{len(CONFIG.cpu_percent_fields)}'
elif opt == "temp_scale":
counter = f' {temp_scale_i + 1}/{len(CONFIG.temp_scales)}'
else:
counter = ""
out += f'{Mv.to(y+1+cy, x+1)}{t_color}{Fx.b}{opt.replace("_", " ").capitalize() + counter:^24.24}{Fx.ub}{Mv.to(y+2+cy, x+1)}{v_color}'
if opt == selected:
if isinstance(value, bool) or opt in ["color_theme", "proc_sorting", "log_level", "cpu_sensor", "cpu_graph_upper", "cpu_graph_lower", "temp_scale"]:
out += f'{t_color} {Symbol.left}{v_color}{d_quote + str(value) + d_quote:^20.20}{t_color}{Symbol.right} '
elif inputting:
out += f'{str(input_val)[-17:] + Fx.bl + "█" + Fx.ubl + "" + Symbol.enter:^33.33}'
else:
out += ((f'{t_color} {Symbol.left}{v_color}' if type(value) is int else " ") +
f'{str(value) + " " + Symbol.enter:^20.20}' + (f'{t_color}{Symbol.right} ' if type(value) is int else " "))
else:
out += f'{d_quote + str(value) + d_quote:^24.24}'
out += f'{Term.bg}'
if opt == selected:
h2 = len(option_items[opt]) + 2
y2 = y + (selected_int * 2) - ((page-1) * h)
if y2 + h2 > Term.height: y2 = Term.height - h2
out += f'{create_box(x2, y2, w2, h2, "description", line_color=THEME.div_line)}{THEME.main_fg}'
for n, desc in enumerate(option_items[opt]):
out += f'{Mv.to(y2+1+n, x2+2)}{desc:.48}'
cy += 2
if cy >= h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+1+cy+i, x+1)}{" " * (w-2)}'
if not skip or redraw:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
redraw = True
has_sel = False
if key == "mouse_click" and not inputting:
mx, my = Key.get_mouse()
if x < mx < x + w + w2 and y - 4 < my < y:
# if my == y - 2:
for cx, cat in enumerate(categories):
ccx = x + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 )
if ccx - 2 < mx < ccx + 2 + len(cat):
key = str(cx+1)
break
elif x < mx < x + w and y < my < y + h + 2:
mouse_sel = ceil((my - y) / 2) - 1 + ceil((page-1) * (h / 2))
if pages and my == y+h+1 and x+11 < mx < x+16:
key = "page_up"
elif pages and my == y+h+1 and x+19 < mx < x+24:
key = "page_down"
elif my == y+h+1:
pass
elif mouse_sel == selected_int:
if mx < x + 6:
key = "left"
elif mx > x + 19:
key = "right"
else:
key = "enter"
elif mouse_sel < len(option_items):
selected_int = mouse_sel
has_sel = True
else:
key = "escape"
if inputting:
if key in ["escape", "mouse_click"]:
inputting = False
elif key == "enter":
inputting = False
if str(getattr(CONFIG, selected)) != input_val:
if selected == "update_ms":
if not input_val or int(input_val) < 100:
CONFIG.update_ms = 100
elif int(input_val) > 86399900:
CONFIG.update_ms = 86399900
else:
CONFIG.update_ms = int(input_val)
elif selected == "proc_update_mult":
if not input_val or int(input_val) < 1:
CONFIG.proc_update_mult = 1
else:
CONFIG.proc_update_mult = int(input_val)
Collector.proc_counter = 1
elif selected == "tree_depth":
if not input_val or int(input_val) < 0:
CONFIG.tree_depth = 0
else:
CONFIG.tree_depth = int(input_val)
ProcCollector.collapsed = {}
elif selected == "shown_boxes":
new_boxes: List = []
for box in input_val.split():
if box in ["cpu", "mem", "net", "proc"]:
new_boxes.append(box)
CONFIG.shown_boxes = " ".join(new_boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
elif isinstance(getattr(CONFIG, selected), str):
setattr(CONFIG, selected, input_val)
if selected.startswith("net_"):
NetCollector.net_min = {"download" : -1, "upload" : -1}
elif selected == "draw_clock":
Box.clock_on = len(CONFIG.draw_clock) > 0
if not Box.clock_on: Draw.clear("clock", saved=True)
elif selected == "io_graph_speeds":
MemBox.graph_speeds = {}
Term.refresh(force=True)
cls.resized = False
elif key == "backspace" and len(input_val):
input_val = input_val[:-1]
elif key == "delete":
input_val = ""
elif isinstance(getattr(CONFIG, selected), str) and len(key) == 1:
input_val += key
elif isinstance(getattr(CONFIG, selected), int) and key.isdigit():
input_val += key
elif key == "q":
clean_quit()
elif key in ["escape", "o", "M", "f2"]:
cls.close = True
break
elif key == "tab" or (key == "down" and selected_int == len(option_items) - 1 and (page == pages or pages == 0)):
if cat_int == len(categories) - 1:
cat_int = 0
else:
cat_int += 1
change_cat = True
elif key == "shift_tab" or (key == "up" and selected_int == 0 and page == 1):
if cat_int == 0:
cat_int = len(categories) - 1
else:
cat_int -= 1
change_cat = True
selected_int = -1 if key != "shift_tab" else 0
elif key in list(map(str, range(1, len(cat_list)+1))) and key != str(cat_int + 1):
cat_int = int(key) - 1
change_cat = True
elif key == "enter" and selected in ["update_ms", "disks_filter", "custom_cpu_name", "net_download",
"net_upload", "draw_clock", "tree_depth", "proc_update_mult", "shown_boxes", "net_iface", "io_graph_speeds"]:
inputting = True
input_val = str(getattr(CONFIG, selected))
elif key == "left" and selected == "update_ms" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key == "right" and selected == "update_ms" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "left" and selected == "proc_update_mult" and CONFIG.proc_update_mult > 1:
CONFIG.proc_update_mult -= 1
Collector.proc_counter = 1
elif key == "right" and selected == "proc_update_mult":
CONFIG.proc_update_mult += 1
Collector.proc_counter = 1
elif key == "left" and selected == "tree_depth" and CONFIG.tree_depth > 0:
CONFIG.tree_depth -= 1
ProcCollector.collapsed = {}
elif key == "right" and selected == "tree_depth":
CONFIG.tree_depth += 1
ProcCollector.collapsed = {}
elif key in ["left", "right"] and isinstance(getattr(CONFIG, selected), bool):
setattr(CONFIG, selected, not getattr(CONFIG, selected))
if selected == "check_temp":
if CONFIG.check_temp:
CpuCollector.get_sensors()
else:
CpuCollector.sensor_method = ""
CpuCollector.got_sensors = False
if selected in ["net_auto", "net_color_fixed", "net_sync"]:
if selected == "net_auto": NetCollector.auto_min = CONFIG.net_auto
NetBox.redraw = True
if selected == "theme_background":
Term.bg = f'{THEME.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(Term.bg)
if selected == "show_battery":
Draw.clear("battery", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "color_theme" and len(Theme.themes) > 1:
if key == "left":
color_i -= 1
if color_i < 0: color_i = len(Theme.themes) - 1
elif key == "right":
color_i += 1
if color_i > len(Theme.themes) - 1: color_i = 0
Collector.collect_idle.wait()
CONFIG.color_theme = list(Theme.themes)[color_i]
THEME(CONFIG.color_theme)
Term.refresh(force=True)
Timer.finish()
elif key in ["left", "right"] and selected == "proc_sorting":
ProcCollector.sorting(key)
elif key in ["left", "right"] and selected == "log_level":
if key == "left":
loglevel_i -= 1
if loglevel_i < 0: loglevel_i = len(CONFIG.log_levels) - 1
elif key == "right":
loglevel_i += 1
if loglevel_i > len(CONFIG.log_levels) - 1: loglevel_i = 0
CONFIG.log_level = CONFIG.log_levels[loglevel_i]
errlog.setLevel(getattr(logging, CONFIG.log_level))
errlog.info(f'Loglevel set to {CONFIG.log_level}')
elif key in ["left", "right"] and selected in ["cpu_graph_upper", "cpu_graph_lower"]:
if key == "left":
cpu_graph_i[selected] -= 1
if cpu_graph_i[selected] < 0: cpu_graph_i[selected] = len(CONFIG.cpu_percent_fields) - 1
if key == "right":
cpu_graph_i[selected] += 1
if cpu_graph_i[selected] > len(CONFIG.cpu_percent_fields) - 1: cpu_graph_i[selected] = 0
setattr(CONFIG, selected, CONFIG.cpu_percent_fields[cpu_graph_i[selected]])
setattr(CpuCollector, selected.replace("_graph", ""), [])
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "temp_scale":
if key == "left":
temp_scale_i -= 1
if temp_scale_i < 0: temp_scale_i = len(CONFIG.temp_scales) - 1
if key == "right":
temp_scale_i += 1
if temp_scale_i > len(CONFIG.temp_scales) - 1: temp_scale_i = 0
CONFIG.temp_scale = CONFIG.temp_scales[temp_scale_i]
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "cpu_sensor" and len(CONFIG.cpu_sensors) > 1:
if key == "left":
cpu_sensor_i -= 1
if cpu_sensor_i < 0: cpu_sensor_i = len(CONFIG.cpu_sensors) - 1
elif key == "right":
cpu_sensor_i += 1
if cpu_sensor_i > len(CONFIG.cpu_sensors) - 1: cpu_sensor_i = 0
Collector.collect_idle.wait()
CpuCollector.sensor_swap = True
CONFIG.cpu_sensor = CONFIG.cpu_sensors[cpu_sensor_i]
if CONFIG.check_temp and (CpuCollector.sensor_method != "psutil" or CONFIG.cpu_sensor == "Auto"):
CpuCollector.get_sensors()
Term.refresh(force=True)
cls.resized = False
elif key in ["up", "mouse_scroll_up"]:
selected_int -= 1
if selected_int < 0: selected_int = len(option_items) - 1
page = floor(selected_int * 2 / h) + 1
elif key in ["down", "mouse_scroll_down"]:
selected_int += 1
if selected_int > len(option_items) - 1: selected_int = 0
page = floor(selected_int * 2 / h) + 1
elif key == "page_up":
if not pages or page == 1:
selected_int = 0
else:
page -= 1
if page < 1: page = pages
selected_int = (page-1) * ceil(h / 2)
elif key == "page_down":
if not pages or page == pages:
selected_int = len(option_items) - 1
else:
page += 1
if page > pages: page = 1
selected_int = (page-1) * ceil(h / 2)
elif has_sel:
pass
else:
redraw = False
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
class Timer:
timestamp: float
return_zero = False
@classmethod
def stamp(cls):
cls.timestamp = time()
@classmethod
def not_zero(cls) -> bool:
if cls.return_zero:
cls.return_zero = False
return False
return cls.timestamp + (CONFIG.update_ms / 1000) > time()
@classmethod
def left(cls) -> float:
return cls.timestamp + (CONFIG.update_ms / 1000) - time()
@classmethod
def finish(cls):
cls.return_zero = True
cls.timestamp = time() - (CONFIG.update_ms / 1000)
Key.break_wait()
class UpdateChecker:
version: str = VERSION
thread: threading.Thread
@classmethod
def run(cls):
cls.thread = threading.Thread(target=cls._checker)
cls.thread.start()
@classmethod
def _checker(cls):
try:
with urllib.request.urlopen("https://github.com/aristocratos/bpytop/raw/master/bpytop.py", timeout=5) as source: # type: ignore
for line in source:
line = line.decode("utf-8")
if line.startswith("VERSION: str ="):
cls.version = line[(line.index("=")+1):].strip('" \n')
break
except Exception as e:
errlog.exception(f'{e}')
else:
if cls.version != VERSION and which("notify-send"):
try:
subprocess.run(["notify-send", "-u", "normal", "BpyTop Update!",
f'New version of BpyTop available!\nCurrent version: {VERSION}\nNew version: {cls.version}\nDownload at github.com/aristocratos/bpytop',
"-i", "update-notifier", "-t", "10000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
errlog.exception(f'{e}')
class Init:
running: bool = True
initbg_colors: List[str] = []
initbg_data: List[int]
initbg_up: Graph
initbg_down: Graph
resized = False
@classmethod
def start(cls):
Draw.buffer("init", z=1)
Draw.buffer("initbg", z=10)
for i in range(51):
for _ in range(2): cls.initbg_colors.append(Color.fg(i, i, i))
Draw.buffer("banner", (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(11)}{Colors.black_bg}{Colors.default}'
f'{Fx.b}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}{Color.fg("#50")}'), z=2)
for _i in range(7):
perc = f'{str(round((_i + 1) * 14 + 2)) + "%":>5}'
Draw.buffer("+banner", f'{Mv.to(Term.height // 2 - 2 + _i, Term.width // 2 - 28)}{Fx.trans(perc)}{Symbol.v_line}')
Draw.out("banner")
Draw.buffer("+init!", f'{Color.fg("#cc")}{Fx.b}{Mv.to(Term.height // 2 - 2, Term.width // 2 - 21)}{Mv.save}')
cls.initbg_data = [randint(0, 100) for _ in range(Term.width * 2)]
cls.initbg_up = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=True)
cls.initbg_down = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=False)
@classmethod
def success(cls):
if not CONFIG.show_init or cls.resized: return
cls.draw_bg(5)
Draw.buffer("+init!", f'{Mv.restore}{Symbol.ok}\n{Mv.r(Term.width // 2 - 22)}{Mv.save}')
@staticmethod
def fail(err):
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Symbol.fail}')
sleep(2)
errlog.exception(f'{err}')
clean_quit(1, errmsg=f'Error during init! See {CONFIG_DIR}/error.log for more information.')
@classmethod
def draw_bg(cls, times: int = 5):
for _ in range(times):
sleep(0.05)
x = randint(0, 100)
Draw.buffer("initbg", f'{Fx.ub}{Mv.to(0, 0)}{cls.initbg_up(x)}{Mv.to(Term.height // 2, 0)}{cls.initbg_down(x)}')
Draw.out("initbg", "banner", "init")
@classmethod
def done(cls):
cls.running = False
if not CONFIG.show_init: return
if cls.resized:
Draw.now(Term.clear)
else:
cls.draw_bg(10)
Draw.clear("initbg", "banner", "init", saved=True)
if cls.resized: return
del cls.initbg_up, cls.initbg_down, cls.initbg_data, cls.initbg_colors
#? Functions ------------------------------------------------------------------------------------->
def get_cpu_name() -> str:
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
nlist: List = []
command: str = ""
cmd_out: str = ""
rem_line: str = ""
if SYSTEM == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif SYSTEM == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif SYSTEM == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
try:
cmd_out = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
except:
pass
if rem_line:
for line in cmd_out.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = cmd_out
nlist = name.split(" ")
try:
if "Xeon" in name and "CPU" in name:
name = nlist[nlist.index("CPU")+(-1 if name.endswith(("CPU", "z")) else 1)]
elif "Ryzen" in name:
name = " ".join(nlist[nlist.index("Ryzen"):nlist.index("Ryzen")+3])
elif "Duo" in name and "@" in name:
name = " ".join(nlist[:nlist.index("@")])
elif "CPU" in name and not nlist[0] == "CPU" and not nlist[nlist.index("CPU")-1].isdigit():
name = nlist[nlist.index("CPU")-1]
except:
pass
name = name.replace("Processor", "").replace("CPU", "").replace("(R)", "").replace("(TM)", "").replace("Intel", "")
name = re.sub(r"\d?\.?\d+[mMgG][hH][zZ]", "", name)
name = " ".join(name.split())
return name
def get_cpu_core_mapping() -> List[int]:
mapping: List[int] = []
core_ids: List[int] = []
if SYSTEM == "Linux" and os.path.isfile("/proc/cpuinfo"):
try:
mapping = [0] * THREADS
num = 0
with open("/proc/cpuinfo", "r") as f:
for line in f:
if line.startswith("processor"):
num = int(line.strip()[(line.index(": ")+2):])
if num > THREADS - 1:
break
elif line.startswith("core id"):
core_id = int(line.strip()[(line.index(": ")+2):])
if core_id not in core_ids:
core_ids.append(core_id)
mapping[num] = core_ids.index(core_id)
if num < THREADS - 1:
raise Exception
except:
mapping = []
if not mapping:
mapping = []
for _ in range(THREADS // CORES):
mapping.extend([x for x in range(CORES)])
return mapping
def create_box(x: int = 0, y: int = 0, width: int = 0, height: int = 0, title: str = "", title2: str = "", line_color: Color = None, title_color: Color = None, fill: bool = True, box = None) -> str:
'''Create a box from a box object or by given arguments'''
out: str = f'{Term.fg}{Term.bg}'
num: int = 0
if not line_color: line_color = THEME.div_line
if not title_color: title_color = THEME.title
#* Get values from box class if given
if box:
x = box.x
y = box.y
width = box.width
height = box.height
title = box.name
num = box.num
hlines: Tuple[int, int] = (y, y + height - 1)
out += f'{line_color}'
#* Draw all horizontal lines
for hpos in hlines:
out += f'{Mv.to(hpos, x)}{Symbol.h_line * (width - 1)}'
#* Draw all vertical lines and fill if enabled
for hpos in range(hlines[0]+1, hlines[1]):
out += f'{Mv.to(hpos, x)}{Symbol.v_line}{" " * (width-2) if fill else Mv.r(width-2)}{Symbol.v_line}'
#* Draw corners
out += f'{Mv.to(y, x)}{Symbol.left_up}\
{Mv.to(y, x + width - 1)}{Symbol.right_up}\
{Mv.to(y + height - 1, x)}{Symbol.left_down}\
{Mv.to(y + height - 1, x + width - 1)}{Symbol.right_down}'
#* Draw titles if enabled
if title:
numbered: str = "" if not num else f'{THEME.hi_fg(SUPERSCRIPT[num])}'
out += f'{Mv.to(y, x + 2)}{Symbol.title_left}{Fx.b}{numbered}{title_color}{title}{Fx.ub}{line_color}{Symbol.title_right}'
if title2:
out += f'{Mv.to(hlines[1], x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title2}{Fx.ub}{line_color}{Symbol.title_right}'
return f'{out}{Term.fg}{Mv.to(y + 1, x + 1)}'
def now_sleeping(signum, frame):
"""Reset terminal settings and stop background input read before putting to sleep"""
Key.stop()
Collector.stop()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
os.kill(os.getpid(), signal.SIGSTOP)
def now_awake(signum, frame):
"""Set terminal settings and restart background input read"""
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Key.start()
Term.refresh()
Box.calc_sizes()
Box.draw_bg()
Collector.start()
def quit_sigint(signum, frame):
"""SIGINT redirection to clean_quit()"""
clean_quit()
def clean_quit(errcode: int = 0, errmsg: str = "", thread: bool = False):
"""Stop background input read, save current config and reset terminal settings before quitting"""
global THREAD_ERROR
if thread:
THREAD_ERROR = errcode
interrupt_main()
return
if THREAD_ERROR: errcode = THREAD_ERROR
Key.stop()
Collector.stop()
if not errcode: CONFIG.save_config()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
if errcode == 0:
errlog.info(f'Exiting. Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
else:
errlog.warning(f'Exiting with errorcode ({errcode}). Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
if not errmsg: errmsg = f'Bpytop exited with errorcode ({errcode}). See {CONFIG_DIR}/error.log for more information!'
if errmsg: print(errmsg)
raise SystemExit(errcode)
def floating_humanizer(value: Union[float, int], bit: bool = False, per_second: bool = False, start: int = 0, short: bool = False) -> str:
'''Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed
* bit=True or defaults to bytes
* start=int to set 1024 multiplier starting unit
* short=True always returns 0 decimals and shortens unit to 1 character
'''
out: str = ""
mult: int = 8 if bit else 1
selector: int = start
unit: Tuple[str, ...] = UNITS["bit"] if bit else UNITS["byte"]
if isinstance(value, float): value = round(value * 100 * mult)
elif value > 0: value *= 100 * mult
else: value = 0
while len(f'{value}') > 5 and value >= 102400:
value >>= 10
if value < 100:
out = f'{value}'
break
selector += 1
else:
if len(f'{value}') == 4 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2]
elif len(f'{value}') == 3 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2:]
elif len(f'{value}') >= 2:
out = f'{value}'[:-2]
else:
out = f'{value}'
if short:
if "." in out:
out = f'{round(float(out))}'
if len(out) > 3:
out = f'{int(out[0]) + 1}'
selector += 1
out += f'{"" if short else " "}{unit[selector][0] if short else unit[selector]}'
if per_second: out += "ps" if bit else "/s"
return out
def units_to_bytes(value: str) -> int:
if not value: return 0
out: int = 0
mult: int = 0
bit: bool = False
value_i: int = 0
units: Dict[str, int] = {"k" : 1, "m" : 2, "g" : 3}
try:
if value.lower().endswith("s"):
value = value[:-1]
if value.lower().endswith("bit"):
bit = True
value = value[:-3]
elif value.lower().endswith("byte"):
value = value[:-4]
if value[-1].lower() in units:
mult = units[value[-1].lower()]
value = value[:-1]
if "." in value and value.replace(".", "").isdigit():
if mult > 0:
value_i = round(float(value) * 1024)
mult -= 1
else:
value_i = round(float(value))
elif value.isdigit():
value_i = int(value)
out = int(value_i) << (10 * mult)
if bit: out = round(out / 8)
except ValueError:
out = 0
return out
def min_max(value: int, min_value: int=0, max_value: int=100) -> int:
return max(min_value, min(value, max_value))
def readfile(file: str, default: str = "") -> str:
out: Union[str, None] = None
if os.path.isfile(file):
try:
with open(file, "r") as f:
out = f.read().strip()
except:
pass
return default if out is None else out
def temperature(value: int, scale: str = "celsius") -> Tuple[int, str]:
"""Returns a tuple with integer value and string unit converted from an integer in celsius to: celsius, fahrenheit, kelvin or rankine."""
if scale == "celsius":
return (value, "°C")
elif scale == "fahrenheit":
return (round(value * 1.8 + 32), "°F")
elif scale == "kelvin":
return (round(value + 273.15), "°K")
elif scale == "rankine":
return (round(value * 1.8 + 491.67), "°R")
else:
return (0, "")
def process_keys():
mouse_pos: Tuple[int, int] = (0, 0)
filtered: bool = False
box_keys = {"1" : "cpu", "2" : "mem", "3" : "net", "4" : "proc"}
while Key.has_key():
key = Key.get()
found: bool = True
if key in ["mouse_scroll_up", "mouse_scroll_down", "mouse_click"]:
mouse_pos = Key.get_mouse()
if mouse_pos[0] >= ProcBox.x and ProcBox.current_y + 1 <= mouse_pos[1] < ProcBox.current_y + ProcBox.current_h - 1:
pass
elif key == "mouse_click":
key = "mouse_unselect"
else:
key = "_null"
if ProcBox.filtering:
if key in ["enter", "mouse_click", "mouse_unselect"]:
ProcBox.filtering = False
Collector.collect(ProcCollector, redraw=True, only_draw=True)
continue
elif key in ["escape", "delete"]:
ProcCollector.search_filter = ""
ProcBox.filtering = False
elif len(key) == 1:
ProcCollector.search_filter += key
elif key == "backspace" and len(ProcCollector.search_filter) > 0:
ProcCollector.search_filter = ProcCollector.search_filter[:-1]
else:
continue
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
if filtered: Collector.collect_done.wait(0.1)
filtered = True
continue
if key == "_null":
continue
elif key == "q":
clean_quit()
elif key == "+" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "-" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key in ["M", "escape"]:
Menu.main()
elif key in ["o", "f2"]:
Menu.options()
elif key in ["H", "f1"]:
Menu.help()
elif key == "m":
if list(Box.view_modes).index(Box.view_mode) + 1 > len(list(Box.view_modes)) - 1:
Box.view_mode = list(Box.view_modes)[0]
else:
Box.view_mode = list(Box.view_modes)[(list(Box.view_modes).index(Box.view_mode) + 1)]
CONFIG.shown_boxes = " ".join(Box.view_modes[Box.view_mode])
Draw.clear(saved=True)
Term.refresh(force=True)
elif key in box_keys:
boxes = CONFIG.shown_boxes.split()
if box_keys[key] in boxes:
boxes.remove(box_keys[key])
else:
boxes.append(box_keys[key])
CONFIG.shown_boxes = " ".join(boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
Term.refresh(force=True)
else:
found = False
if found: continue
if "proc" in Box.boxes:
if key in ["left", "right", "h", "l"]:
ProcCollector.sorting(key)
elif key == " " and CONFIG.proc_tree and ProcBox.selected > 0:
if ProcBox.selected_pid in ProcCollector.collapsed:
ProcCollector.collapsed[ProcBox.selected_pid] = not ProcCollector.collapsed[ProcBox.selected_pid]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "e":
CONFIG.proc_tree = not CONFIG.proc_tree
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "r":
CONFIG.proc_reversed = not CONFIG.proc_reversed
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "c":
CONFIG.proc_per_core = not CONFIG.proc_per_core
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key in ["f", "F"]:
ProcBox.filtering = True
ProcCollector.case_sensitive = key == "F"
if not ProcCollector.search_filter: ProcBox.start = 0
Collector.collect(ProcCollector, redraw=True, only_draw=True)
elif key in ["T", "K", "I"] and (ProcBox.selected > 0 or ProcCollector.detailed):
pid: int = ProcBox.selected_pid if ProcBox.selected > 0 else ProcCollector.detailed_pid # type: ignore
if psutil.pid_exists(pid):
if key == "T": sig = signal.SIGTERM
elif key == "K": sig = signal.SIGKILL
elif key == "I": sig = signal.SIGINT
try:
os.kill(pid, sig)
except Exception as e:
errlog.error(f'Exception when sending signal {sig} to pid {pid}')
errlog.exception(f'{e}')
elif key == "delete" and ProcCollector.search_filter:
ProcCollector.search_filter = ""
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key == "enter":
if ProcBox.selected > 0 and ProcCollector.detailed_pid != ProcBox.selected_pid and psutil.pid_exists(ProcBox.selected_pid):
ProcCollector.detailed = True
ProcBox.last_selection = ProcBox.selected
ProcBox.selected = 0
ProcCollector.detailed_pid = ProcBox.selected_pid
ProcBox.resized = True
Collector.proc_counter = 1
elif ProcCollector.detailed:
ProcBox.selected = ProcBox.last_selection
ProcBox.last_selection = 0
ProcCollector.detailed = False
ProcCollector.detailed_pid = None
ProcBox.resized = True
Collector.proc_counter = 1
else:
continue
ProcCollector.details = {}
ProcCollector.details_cpu = []
ProcCollector.details_mem = []
Graphs.detailed_cpu = NotImplemented
Graphs.detailed_mem = NotImplemented
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key in ["up", "down", "mouse_scroll_up", "mouse_scroll_down", "page_up", "page_down", "home", "end", "mouse_click", "mouse_unselect", "j", "k"]:
ProcBox.selector(key, mouse_pos)
if "net" in Box.boxes:
if key in ["b", "n"]:
NetCollector.switch(key)
elif key == "z":
NetCollector.reset = not NetCollector.reset
Collector.collect(NetCollector, redraw=True)
elif key == "y":
CONFIG.net_sync = not CONFIG.net_sync
Collector.collect(NetCollector, redraw=True)
elif key == "a":
NetCollector.auto_min = not NetCollector.auto_min
NetCollector.net_min = {"download" : -1, "upload" : -1}
Collector.collect(NetCollector, redraw=True)
if "mem" in Box.boxes:
if key == "g":
CONFIG.mem_graphs = not CONFIG.mem_graphs
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "s":
Collector.collect_idle.wait()
CONFIG.swap_disk = not CONFIG.swap_disk
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "d":
Collector.collect_idle.wait()
CONFIG.show_disks = not CONFIG.show_disks
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "i":
Collector.collect_idle.wait()
CONFIG.io_mode = not CONFIG.io_mode
Collector.collect(MemCollector, interrupt=True, redraw=True)
#? Pre main -------------------------------------------------------------------------------------->
CPU_NAME: str = get_cpu_name()
CORE_MAP: List[int] = get_cpu_core_mapping()
THEME: Theme
def main():
global THEME
Term.width = os.get_terminal_size().columns
Term.height = os.get_terminal_size().lines
#? Init -------------------------------------------------------------------------------------->
if DEBUG: TimeIt.start("Init")
#? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
#Term.refresh(force=True)
#? Start a thread checking for updates while running init
if CONFIG.update_check: UpdateChecker.run()
#? Draw banner and init status
if CONFIG.show_init and not Init.resized:
Init.start()
#? Load theme
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Loading theme and creating colors... ")}{Mv.save}')
try:
THEME = Theme(CONFIG.color_theme)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup boxes
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Doing some maths and drawing... ")}{Mv.save}')
try:
if CONFIG.check_temp: CpuCollector.get_sensors()
Box.calc_sizes()
Box.draw_bg(now=False)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Setting up signal handlers... ")}{Mv.save}')
try:
signal.signal(signal.SIGTSTP, now_sleeping) #* Ctrl-Z
signal.signal(signal.SIGCONT, now_awake) #* Resume
signal.signal(signal.SIGINT, quit_sigint) #* Ctrl-C
signal.signal(signal.SIGWINCH, Term.refresh) #* Terminal resized
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for reading keyboard input
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting input reader thread... ")}{Mv.save}')
try:
if isinstance(sys.stdin, io.TextIOWrapper) and sys.version_info >= (3, 7):
sys.stdin.reconfigure(errors="ignore") # type: ignore
Key.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for data collection and drawing
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting data collection and drawer thread... ")}{Mv.save}')
try:
Collector.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Collect data and draw to buffer
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Collecting data and drawing... ")}{Mv.save}')
try:
Collector.collect(draw_now=False)
pass
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Draw to screen
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Finishing up... ")}{Mv.save}')
try:
Collector.collect_done.wait()
except Exception as e:
Init.fail(e)
else:
Init.success()
Init.done()
Term.refresh()
Draw.out(clear=True)
if CONFIG.draw_clock:
Box.clock_on = True
if DEBUG: TimeIt.stop("Init")
#? Main loop ------------------------------------------------------------------------------------->
def run():
while not False:
Term.refresh()
Timer.stamp()
while Timer.not_zero():
if Key.input_wait(Timer.left()):
process_keys()
Collector.collect()
#? Start main loop
try:
run()
except Exception as e:
errlog.exception(f'{e}')
clean_quit(1)
else:
#? Quit cleanly even if false starts being true...
clean_quit()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# pylint: disable=not-callable, no-member, unsubscriptable-object
# indent = tab
# tab-size = 4
# Copyright 2020 Aristocratos (<EMAIL>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, io, threading, signal, re, subprocess, logging, logging.handlers, argparse
import urllib.request
from time import time, sleep, strftime, localtime
from datetime import timedelta
from _thread import interrupt_main
from collections import defaultdict
from select import select
from distutils.util import strtobool
from string import Template
from math import ceil, floor
from random import randint
from shutil import which
from typing import List, Set, Dict, Tuple, Optional, Union, Any, Callable, ContextManager, Iterable, Type, NamedTuple
errors: List[str] = []
try: import fcntl, termios, tty, pwd
except Exception as e: errors.append(f'{e}')
try: import psutil # type: ignore
except Exception as e: errors.append(f'{e}')
SELF_START = time()
SYSTEM: str
if "linux" in sys.platform: SYSTEM = "Linux"
elif "bsd" in sys.platform: SYSTEM = "BSD"
elif "darwin" in sys.platform: SYSTEM = "MacOS"
else: SYSTEM = "Other"
if errors:
print("ERROR!")
print("\n".join(errors))
if SYSTEM == "Other":
print("\nUnsupported platform!\n")
else:
print("\nInstall required modules!\n")
raise SystemExit(1)
VERSION: str = "1.0.63"
#? Argument parser ------------------------------------------------------------------------------->
args = argparse.ArgumentParser()
args.add_argument("-b", "--boxes", action="store", dest="boxes", help = "which boxes to show at start, example: -b \"cpu mem net proc\"")
args.add_argument("-lc", "--low-color", action="store_true", help = "disable truecolor, converts 24-bit colors to 256-color")
args.add_argument("-v", "--version", action="store_true", help = "show version info and exit")
args.add_argument("--debug", action="store_true", help = "start with loglevel set to DEBUG overriding value set in config")
stdargs = args.parse_args()
if stdargs.version:
print(f'bpytop version: {VERSION}\n'
f'psutil version: {".".join(str(x) for x in psutil.version_info)}')
raise SystemExit(0)
ARG_BOXES: str = stdargs.boxes
LOW_COLOR: bool = stdargs.low_color
DEBUG: bool = stdargs.debug
#? Variables ------------------------------------------------------------------------------------->
BANNER_SRC: List[Tuple[str, str, str]] = [
("#ffa50a", "#0fd7ff", "██████╗ ██████╗ ██╗ ██╗████████╗ ██████╗ ██████╗"),
("#f09800", "#00bfe6", "██╔══██╗██╔══██╗╚██╗ ██╔╝╚══██╔══╝██╔═══██╗██╔══██╗"),
("#db8b00", "#00a6c7", "██████╔╝██████╔╝ ╚████╔╝ ██║ ██║ ██║██████╔╝"),
("#c27b00", "#008ca8", "██╔══██╗██╔═══╝ ╚██╔╝ ██║ ██║ ██║██╔═══╝ "),
("#a86b00", "#006e85", "██████╔╝██║ ██║ ██║ ╚██████╔╝██║"),
("#000000", "#000000", "╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝"),
]
#*?This is the template used to create the config file
DEFAULT_CONF: Template = Template(f'#? Config file for bpytop v. {VERSION}' + '''
#* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme.
#* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai"
color_theme="$color_theme"
#* If the theme set background should be shown, set to False if you want terminal background transparency
theme_background=$theme_background
#* Sets if 24-bit truecolor should be used, will convert 24-bit colors to 256 color (6x6x6 color cube) if false.
truecolor=$truecolor
#* Manually set which boxes to show. Available values are "cpu mem net proc", seperate values with whitespace.
shown_boxes="$shown_boxes"
#* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs.
update_ms=$update_ms
#* Processes update multiplier, sets how often the process list is updated as a multiplier of "update_ms".
#* Set to 2 or higher to greatly decrease bpytop cpu usage. (Only integers)
proc_update_mult=$proc_update_mult
#* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive",
#* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly.
proc_sorting="$proc_sorting"
#* Reverse sorting order, True or False.
proc_reversed=$proc_reversed
#* Show processes as a tree
proc_tree=$proc_tree
#* Which depth the tree view should auto collapse processes at
tree_depth=$tree_depth
#* Use the cpu graph colors in the process list.
proc_colors=$proc_colors
#* Use a darkening gradient in the process list.
proc_gradient=$proc_gradient
#* If process cpu usage should be of the core it's running on or usage of the total available cpu power.
proc_per_core=$proc_per_core
#* Show process memory as bytes instead of percent
proc_mem_bytes=$proc_mem_bytes
#* Sets the CPU stat shown in upper half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_upper="$cpu_graph_upper"
#* Sets the CPU stat shown in lower half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_lower="$cpu_graph_lower"
#* Toggles if the lower CPU graph should be inverted.
cpu_invert_lower=$cpu_invert_lower
#* Set to True to completely disable the lower CPU graph.
cpu_single_graph=$cpu_single_graph
#* Shows the system uptime in the CPU box.
show_uptime=$show_uptime
#* Check cpu temperature, needs "osx-cpu-temp" on MacOS X.
check_temp=$check_temp
#* Which sensor to use for cpu temperature, use options menu to select from list of available sensors.
cpu_sensor=$cpu_sensor
#* Show temperatures for cpu cores also if check_temp is True and sensors has been found
show_coretemp=$show_coretemp
#* Which temperature scale to use, available values: "celsius", "fahrenheit", "kelvin" and "rankine"
temp_scale="$temp_scale"
#* Draw a clock at top of screen, formatting according to strftime, empty string to disable.
draw_clock="$draw_clock"
#* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort.
background_update=$background_update
#* Custom cpu model name, empty string to disable.
custom_cpu_name="$custom_cpu_name"
#* Optional filter for shown disks, should be full path of a mountpoint, separate multiple values with a comma ",".
#* Begin line with "exclude=" to change to exclude filter, oterwise defaults to "most include" filter. Example: disks_filter="exclude=/boot, /home/user"
disks_filter="$disks_filter"
#* Show graphs instead of meters for memory values.
mem_graphs=$mem_graphs
#* If swap memory should be shown in memory box.
show_swap=$show_swap
#* Show swap as a disk, ignores show_swap value above, inserts itself after first disk.
swap_disk=$swap_disk
#* If mem box should be split to also show disks info.
show_disks=$show_disks
#* Filter out non physical disks. Set this to False to include network disks, RAM disks and similar.
only_physical=$only_physical
#* Read disks list from /etc/fstab. This also disables only_physical.
use_fstab=$use_fstab
#* Toggles if io stats should be shown in regular disk usage view
show_io_stat=$show_io_stat
#* Toggles io mode for disks, showing only big graphs for disk read/write speeds.
io_mode=$io_mode
#* Set to True to show combined read/write io graphs in io mode.
io_graph_combined=$io_graph_combined
#* Set the top speed for the io graphs in MiB/s (10 by default), use format "device:speed" seperate disks with a comma ",".
#* Example: "/dev/sda:100, /dev/sdb:20"
io_graph_speeds="$io_graph_speeds"
#* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit"
net_download="$net_download"
net_upload="$net_upload"
#* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest.
net_auto=$net_auto
#* Sync the scaling for download and upload to whichever currently has the highest scale
net_sync=$net_sync
#* If the network graphs color gradient should scale to bandwith usage or auto scale, bandwith usage is based on "net_download" and "net_upload" values
net_color_fixed=$net_color_fixed
#* Starts with the Network Interface specified here.
net_iface="$net_iface"
#* Show battery stats in top right if battery is present
show_battery=$show_battery
#* Show init screen at startup, the init screen is purely cosmetical
show_init=$show_init
#* Enable check for new version from github.com/aristocratos/bpytop at start.
update_check=$update_check
#* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG".
#* The level set includes all lower levels, i.e. "DEBUG" will show all logging info.
log_level=$log_level
''')
CONFIG_DIR: str = f'{os.path.expanduser("~")}/.config/bpytop'
if not os.path.isdir(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
os.mkdir(f'{CONFIG_DIR}/themes')
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
CONFIG_FILE: str = f'{CONFIG_DIR}/bpytop.conf'
THEME_DIR: str = ""
if os.path.isdir(f'{os.path.dirname(__file__)}/bpytop-themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/bpytop-themes'
else:
for td in ["/usr/local/", "/usr/", "/snap/bpytop/current/usr/"]:
if os.path.isdir(f'{td}share/bpytop/themes'):
THEME_DIR = f'{td}share/bpytop/themes'
break
USER_THEME_DIR: str = f'{CONFIG_DIR}/themes'
CORES: int = psutil.cpu_count(logical=False) or 1
THREADS: int = psutil.cpu_count(logical=True) or 1
THREAD_ERROR: int = 0
DEFAULT_THEME: Dict[str, str] = {
"main_bg" : "#00",
"main_fg" : "#cc",
"title" : "#ee",
"hi_fg" : "#969696",
"selected_bg" : "#7e2626",
"selected_fg" : "#ee",
"inactive_fg" : "#40",
"graph_text" : "#60",
"meter_bg" : "#40",
"proc_misc" : "#0de756",
"cpu_box" : "#3d7b46",
"mem_box" : "#8a882e",
"net_box" : "#423ba5",
"proc_box" : "#923535",
"div_line" : "#30",
"temp_start" : "#4897d4",
"temp_mid" : "#5474e8",
"temp_end" : "#ff40b6",
"cpu_start" : "#50f095",
"cpu_mid" : "#f2e266",
"cpu_end" : "#fa1e1e",
"free_start" : "#223014",
"free_mid" : "#b5e685",
"free_end" : "#dcff85",
"cached_start" : "#0b1a29",
"cached_mid" : "#74e6fc",
"cached_end" : "#26c5ff",
"available_start" : "#292107",
"available_mid" : "#ffd77a",
"available_end" : "#ffb814",
"used_start" : "#3b1f1c",
"used_mid" : "#d9626d",
"used_end" : "#ff4769",
"download_start" : "#231a63",
"download_mid" : "#4f43a3",
"download_end" : "#b0a9de",
"upload_start" : "#510554",
"upload_mid" : "#7d4180",
"upload_end" : "#dcafde",
"process_start" : "#80d0a3",
"process_mid" : "#dcd179",
"process_end" : "#d45454",
}
MENUS: Dict[str, Dict[str, Tuple[str, ...]]] = {
"options" : {
"normal" : (
"┌─┐┌─┐┌┬┐┬┌─┐┌┐┌┌─┐",
"│ │├─┘ │ ││ ││││└─┐",
"└─┘┴ ┴ ┴└─┘┘└┘└─┘"),
"selected" : (
"╔═╗╔═╗╔╦╗╦╔═╗╔╗╔╔═╗",
"║ ║╠═╝ ║ ║║ ║║║║╚═╗",
"╚═╝╩ ╩ ╩╚═╝╝╚╝╚═╝") },
"help" : {
"normal" : (
"┬ ┬┌─┐┬ ┌─┐",
"├─┤├┤ │ ├─┘",
"┴ ┴└─┘┴─┘┴ "),
"selected" : (
"╦ ╦╔═╗╦ ╔═╗",
"╠═╣║╣ ║ ╠═╝",
"╩ ╩╚═╝╩═╝╩ ") },
"quit" : {
"normal" : (
"┌─┐ ┬ ┬ ┬┌┬┐",
"│─┼┐│ │ │ │ ",
"└─┘└└─┘ ┴ ┴ "),
"selected" : (
"╔═╗ ╦ ╦ ╦╔╦╗ ",
"║═╬╗║ ║ ║ ║ ",
"╚═╝╚╚═╝ ╩ ╩ ") }
}
MENU_COLORS: Dict[str, Tuple[str, ...]] = {
"normal" : ("#0fd7ff", "#00bfe6", "#00a6c7", "#008ca8"),
"selected" : ("#ffa50a", "#f09800", "#db8b00", "#c27b00")
}
#? Units for floating_humanizer function
UNITS: Dict[str, Tuple[str, ...]] = {
"bit" : ("bit", "Kib", "Mib", "Gib", "Tib", "Pib", "Eib", "Zib", "Yib", "Bib", "GEb"),
"byte" : ("Byte", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "BiB", "GEB")
}
SUBSCRIPT: Tuple[str, ...] = ("₀", "₁", "₂", "₃", "₄", "₅", "₆", "₇", "₈", "₉")
SUPERSCRIPT: Tuple[str, ...] = ("⁰", "¹", "²", "³", "⁴", "⁵", "⁶", "⁷", "⁸", "⁹")
#? Setup error logger ---------------------------------------------------------------->
try:
errlog = logging.getLogger("ErrorLogger")
errlog.setLevel(logging.DEBUG)
eh = logging.handlers.RotatingFileHandler(f'{CONFIG_DIR}/error.log', maxBytes=1048576, backupCount=4)
eh.setLevel(logging.DEBUG)
eh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s: %(message)s", datefmt="%d/%m/%y (%X)"))
errlog.addHandler(eh)
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
#? Timers for testing and debugging -------------------------------------------------------------->
class TimeIt:
timers: Dict[str, float] = {}
paused: Dict[str, float] = {}
@classmethod
def start(cls, name):
cls.timers[name] = time()
@classmethod
def pause(cls, name):
if name in cls.timers:
cls.paused[name] = time() - cls.timers[name]
del cls.timers[name]
@classmethod
def stop(cls, name):
if name in cls.timers:
total: float = time() - cls.timers[name]
del cls.timers[name]
if name in cls.paused:
total += cls.paused[name]
del cls.paused[name]
errlog.debug(f'{name} completed in {total:.6f} seconds')
def timeit_decorator(func):
def timed(*args, **kw):
ts = time()
out = func(*args, **kw)
errlog.debug(f'{func.__name__} completed in {time() - ts:.6f} seconds')
return out
return timed
#? Set up config class and load config ----------------------------------------------------------->
class Config:
'''Holds all config variables and functions for loading from and saving to disk'''
keys: List[str] = ["color_theme", "update_ms", "proc_sorting", "proc_reversed", "proc_tree", "check_temp", "draw_clock", "background_update", "custom_cpu_name",
"proc_colors", "proc_gradient", "proc_per_core", "proc_mem_bytes", "disks_filter", "update_check", "log_level", "mem_graphs", "show_swap",
"swap_disk", "show_disks", "use_fstab", "net_download", "net_upload", "net_auto", "net_color_fixed", "show_init", "theme_background",
"net_sync", "show_battery", "tree_depth", "cpu_sensor", "show_coretemp", "proc_update_mult", "shown_boxes", "net_iface", "only_physical",
"truecolor", "io_mode", "io_graph_combined", "io_graph_speeds", "show_io_stat", "cpu_graph_upper", "cpu_graph_lower", "cpu_invert_lower",
"cpu_single_graph", "show_uptime", "temp_scale"]
conf_dict: Dict[str, Union[str, int, bool]] = {}
color_theme: str = "Default"
theme_background: bool = True
truecolor: bool = True
shown_boxes: str = "cpu mem net proc"
update_ms: int = 2000
proc_update_mult: int = 2
proc_sorting: str = "cpu lazy"
proc_reversed: bool = False
proc_tree: bool = False
tree_depth: int = 3
proc_colors: bool = True
proc_gradient: bool = True
proc_per_core: bool = False
proc_mem_bytes: bool = True
cpu_graph_upper: str = "total"
cpu_graph_lower: str = "total"
cpu_invert_lower: bool = True
cpu_single_graph: bool = False
show_uptime: bool = True
check_temp: bool = True
cpu_sensor: str = "Auto"
show_coretemp: bool = True
temp_scale: str = "celsius"
draw_clock: str = "%X"
background_update: bool = True
custom_cpu_name: str = ""
disks_filter: str = ""
update_check: bool = True
mem_graphs: bool = True
show_swap: bool = True
swap_disk: bool = True
show_disks: bool = True
only_physical: bool = True
use_fstab: bool = False
show_io_stat: bool = True
io_mode: bool = False
io_graph_combined: bool = False
io_graph_speeds: str = ""
net_download: str = "10M"
net_upload: str = "10M"
net_color_fixed: bool = False
net_auto: bool = True
net_sync: bool = False
net_iface: str = ""
show_battery: bool = True
show_init: bool = True
log_level: str = "WARNING"
warnings: List[str] = []
info: List[str] = []
sorting_options: List[str] = ["pid", "program", "arguments", "threads", "user", "memory", "cpu lazy", "cpu responsive"]
log_levels: List[str] = ["ERROR", "WARNING", "INFO", "DEBUG"]
cpu_percent_fields: List = ["total"]
cpu_percent_fields.extend(getattr(psutil.cpu_times_percent(), "_fields", []))
temp_scales: List[str] = ["celsius", "fahrenheit", "kelvin", "rankine"]
cpu_sensors: List[str] = [ "Auto" ]
if hasattr(psutil, "sensors_temperatures"):
try:
_temps = psutil.sensors_temperatures()
if _temps:
for _name, _entries in _temps.items():
for _num, _entry in enumerate(_entries, 1):
if hasattr(_entry, "current"):
cpu_sensors.append(f'{_name}:{_num if _entry.label == "" else _entry.label}')
except:
pass
changed: bool = False
recreate: bool = False
config_file: str = ""
_initialized: bool = False
def __init__(self, path: str):
self.config_file = path
conf: Dict[str, Union[str, int, bool]] = self.load_config()
if not "version" in conf.keys():
self.recreate = True
self.info.append(f'Config file malformatted or missing, will be recreated on exit!')
elif conf["version"] != VERSION:
self.recreate = True
self.info.append(f'Config file version and bpytop version missmatch, will be recreated on exit!')
for key in self.keys:
if key in conf.keys() and conf[key] != "_error_":
setattr(self, key, conf[key])
else:
self.recreate = True
self.conf_dict[key] = getattr(self, key)
self._initialized = True
def __setattr__(self, name, value):
if self._initialized:
object.__setattr__(self, "changed", True)
object.__setattr__(self, name, value)
if name not in ["_initialized", "recreate", "changed"]:
self.conf_dict[name] = value
def load_config(self) -> Dict[str, Union[str, int, bool]]:
'''Load config from file, set correct types for values and return a dict'''
new_config: Dict[str,Union[str, int, bool]] = {}
conf_file: str = ""
if os.path.isfile(self.config_file):
conf_file = self.config_file
elif os.path.isfile("/etc/bpytop.conf"):
conf_file = "/etc/bpytop.conf"
else:
return new_config
try:
with open(conf_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("#? Config"):
new_config["version"] = line[line.find("v. ") + 3:]
continue
if not '=' in line:
continue
key, line = line.split('=', maxsplit=1)
if not key in self.keys:
continue
line = line.strip('"')
if type(getattr(self, key)) == int:
try:
new_config[key] = int(line)
except ValueError:
self.warnings.append(f'Config key "{key}" should be an integer!')
if type(getattr(self, key)) == bool:
try:
new_config[key] = bool(strtobool(line))
except ValueError:
self.warnings.append(f'Config key "{key}" can only be True or False!')
if type(getattr(self, key)) == str:
new_config[key] = str(line)
except Exception as e:
errlog.exception(str(e))
if "proc_sorting" in new_config and not new_config["proc_sorting"] in self.sorting_options:
new_config["proc_sorting"] = "_error_"
self.warnings.append(f'Config key "proc_sorted" didn\'t get an acceptable value!')
if "log_level" in new_config and not new_config["log_level"] in self.log_levels:
new_config["log_level"] = "_error_"
self.warnings.append(f'Config key "log_level" didn\'t get an acceptable value!')
if "update_ms" in new_config and int(new_config["update_ms"]) < 100:
new_config["update_ms"] = 100
self.warnings.append(f'Config key "update_ms" can\'t be lower than 100!')
for net_name in ["net_download", "net_upload"]:
if net_name in new_config and not new_config[net_name][0].isdigit(): # type: ignore
new_config[net_name] = "_error_"
if "cpu_sensor" in new_config and not new_config["cpu_sensor"] in self.cpu_sensors:
new_config["cpu_sensor"] = "_error_"
self.warnings.append(f'Config key "cpu_sensor" does not contain an available sensor!')
if "shown_boxes" in new_config and not new_config["shown_boxes"] == "":
for box in new_config["shown_boxes"].split(): #type: ignore
if not box in ["cpu", "mem", "net", "proc"]:
new_config["shown_boxes"] = "_error_"
self.warnings.append(f'Config key "shown_boxes" contains invalid box names!')
break
for cpu_graph in ["cpu_graph_upper", "cpu_graph_lower"]:
if cpu_graph in new_config and not new_config[cpu_graph] in self.cpu_percent_fields:
new_config[cpu_graph] = "_error_"
self.warnings.append(f'Config key "{cpu_graph}" does not contain an available cpu stat attribute!')
if "temp_scale" in new_config and not new_config["temp_scale"] in self.temp_scales:
new_config["temp_scale"] = "_error_"
self.warnings.append(f'Config key "temp_scale" does not contain a recognized temperature scale!')
return new_config
def save_config(self):
'''Save current config to config file if difference in values or version, creates a new file if not found'''
if not self.changed and not self.recreate: return
try:
with open(self.config_file, "w" if os.path.isfile(self.config_file) else "x") as f:
f.write(DEFAULT_CONF.substitute(self.conf_dict))
except Exception as e:
errlog.exception(str(e))
try:
CONFIG: Config = Config(CONFIG_FILE)
if DEBUG:
errlog.setLevel(logging.DEBUG)
else:
errlog.setLevel(getattr(logging, CONFIG.log_level))
DEBUG = CONFIG.log_level == "DEBUG"
errlog.info(f'New instance of bpytop version {VERSION} started with pid {os.getpid()}')
errlog.info(f'Loglevel set to {"DEBUG" if DEBUG else CONFIG.log_level}')
errlog.debug(f'Using psutil version {".".join(str(x) for x in psutil.version_info)}')
errlog.debug(f'CMD: {" ".join(sys.argv)}')
if CONFIG.info:
for info in CONFIG.info:
errlog.info(info)
CONFIG.info = []
if CONFIG.warnings:
for warning in CONFIG.warnings:
errlog.warning(warning)
CONFIG.warnings = []
except Exception as e:
errlog.exception(f'{e}')
raise SystemExit(1)
if ARG_BOXES:
_new_boxes: List = []
for _box in ARG_BOXES.split():
if _box in ["cpu", "mem", "net", "proc"]:
_new_boxes.append(_box)
CONFIG.shown_boxes = " ".join(_new_boxes)
del _box, _new_boxes
if SYSTEM == "Linux" and not os.path.isdir("/sys/class/power_supply"):
CONFIG.show_battery = False
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
warn = f'psutil version {".".join(str(x) for x in psutil.version_info)} detected, version 5.7.0 or later required for full functionality!'
print("WARNING!", warn)
errlog.warning(warn)
#? Classes --------------------------------------------------------------------------------------->
class Term:
"""Terminal info and commands"""
width: int = 0
height: int = 0
resized: bool = False
_w : int = 0
_h : int = 0
fg: str = "" #* Default foreground color
bg: str = "" #* Default background color
hide_cursor = "\033[?25l" #* Hide terminal cursor
show_cursor = "\033[?25h" #* Show terminal cursor
alt_screen = "\033[?1049h" #* Switch to alternate screen
normal_screen = "\033[?1049l" #* Switch to normal screen
clear = "\033[2J\033[0;0f" #* Clear screen and set cursor to position 0,0
mouse_on = "\033[?1002h\033[?1015h\033[?1006h" #* Enable reporting of mouse position on click and release
mouse_off = "\033[?1002l" #* Disable mouse reporting
mouse_direct_on = "\033[?1003h" #* Enable reporting of mouse position at any movement
mouse_direct_off = "\033[?1003l" #* Disable direct mouse reporting
winch = threading.Event()
old_boxes: List = []
min_width: int = 0
min_height: int = 0
@classmethod
def refresh(cls, *args, force: bool = False):
"""Update width, height and set resized flag if terminal has been resized"""
if Init.running: cls.resized = False; return
if cls.resized: cls.winch.set(); return
cls._w, cls._h = os.get_terminal_size()
if (cls._w, cls._h) == (cls.width, cls.height) and cls.old_boxes == Box.boxes and not force: return
if force: Collector.collect_interrupt = True
if cls.old_boxes != Box.boxes:
w_p = h_p = 0
cls.min_width = cls.min_height = 0
cls.old_boxes = Box.boxes.copy()
for box_class in Box.__subclasses__():
for box_name in Box.boxes:
if box_name in str(box_class).capitalize():
if not (box_name == "cpu" and "proc" in Box.boxes) and not (box_name == "net" and "mem" in Box.boxes) and w_p + box_class.width_p <= 100:
w_p += box_class.width_p
cls.min_width += getattr(box_class, "min_w", 0)
if not (box_name in ["mem", "net"] and "proc" in Box.boxes) and h_p + box_class.height_p <= 100:
h_p += box_class.height_p
cls.min_height += getattr(box_class, "min_h", 0)
while (cls._w, cls._h) != (cls.width, cls.height) or (cls._w < cls.min_width or cls._h < cls.min_height):
if Init.running: Init.resized = True
CpuBox.clock_block = True
cls.resized = True
Collector.collect_interrupt = True
cls.width, cls.height = cls._w, cls._h
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, 50, 3, "resizing", line_color=Colors.green, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width : {cls._w} Height: {cls._h}{Fx.ub}{Term.bg}{Term.fg}')
if cls._w < 80 or cls._h < 24:
while cls._w < cls.min_width or cls._h < cls.min_height:
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, box_width, 4, "warning", line_color=Colors.red, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width: {Colors.red if cls._w < cls.min_width else Colors.green}{cls._w} ',
f'{Colors.default}Height: {Colors.red if cls._h < cls.min_height else Colors.green}{cls._h}{Term.bg}{Term.fg}',
f'{Mv.d(1)}{Mv.l(25)}{Colors.default}{Colors.black_bg}Current config need: {cls.min_width} x {cls.min_height}{Fx.ub}{Term.bg}{Term.fg}')
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
else:
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
Key.mouse = {}
Box.calc_sizes()
Collector.proc_counter = 1
if Menu.active: Menu.resized = True
Box.draw_bg(now=False)
cls.resized = False
Timer.finish()
@staticmethod
def echo(on: bool):
"""Toggle input echo"""
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(sys.stdin.fileno())
if on:
lflag |= termios.ECHO # type: ignore
else:
lflag &= ~termios.ECHO # type: ignore
new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, new_attr)
@staticmethod
def title(text: str = "") -> str:
out: str = f'{os.environ.get("TERMINAL_TITLE", "")}'
if out and text: out += " "
if text: out += f'{text}'
return f'\033]0;{out}\a'
class Fx:
"""Text effects
* trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace.
* uncolor(string: str) : Removes all 24-bit color and returns string ."""
start = "\033[" #* Escape sequence start
sep = ";" #* Escape sequence separator
end = "m" #* Escape sequence end
reset = rs = "\033[0m" #* Reset foreground/background color and text effects
bold = b = "\033[1m" #* Bold on
unbold = ub = "\033[22m" #* Bold off
dark = d = "\033[2m" #* Dark on
undark = ud = "\033[22m" #* Dark off
italic = i = "\033[3m" #* Italic on
unitalic = ui = "\033[23m" #* Italic off
underline = u = "\033[4m" #* Underline on
ununderline = uu = "\033[24m" #* Underline off
blink = bl = "\033[5m" #* Blink on
unblink = ubl = "\033[25m" #* Blink off
strike = s = "\033[9m" #* Strike / crossed-out on
unstrike = us = "\033[29m" #* Strike / crossed-out off
#* Precompiled regex for finding a 24-bit color escape sequence in a string
color_re = re.compile(r"\033\[\d+;\d?;?\d*;?\d*;?\d*m")
@staticmethod
def trans(string: str):
return string.replace(" ", "\033[1C")
@classmethod
def uncolor(cls, string: str) -> str:
return f'{cls.color_re.sub("", string)}'
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Mv:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
return f'\033[{line};{col}f' #* Move cursor to line, column
@staticmethod
def right(x: int) -> str: #* Move cursor right x columns
return f'\033[{x}C'
@staticmethod
def left(x: int) -> str: #* Move cursor left x columns
return f'\033[{x}D'
@staticmethod
def up(x: int) -> str: #* Move cursor up x lines
return f'\033[{x}A'
@staticmethod
def down(x: int) -> str: #* Move cursor down x lines
return f'\033[{x}B'
save: str = "\033[s" #* Save cursor position
restore: str = "\033[u" #* Restore saved cursor postion
t = to
r = right
l = left
u = up
d = down
class Key:
"""Handles the threaded input reader for keypresses and mouse events"""
list: List[str] = []
mouse: Dict[str, List[List[int]]] = {}
mouse_pos: Tuple[int, int] = (0, 0)
escape: Dict[Union[str, Tuple[str, str]], str] = {
"\n" : "enter",
("\x7f", "\x08") : "backspace",
("[A", "OA") : "up",
("[B", "OB") : "down",
("[D", "OD") : "left",
("[C", "OC") : "right",
"[2~" : "insert",
"[3~" : "delete",
"[H" : "home",
"[F" : "end",
"[5~" : "page_up",
"[6~" : "page_down",
"\t" : "tab",
"[Z" : "shift_tab",
"OP" : "f1",
"OQ" : "f2",
"OR" : "f3",
"OS" : "f4",
"[15" : "f5",
"[17" : "f6",
"[18" : "f7",
"[19" : "f8",
"[20" : "f9",
"[21" : "f10",
"[23" : "f11",
"[24" : "f12"
}
new = threading.Event()
idle = threading.Event()
mouse_move = threading.Event()
mouse_report: bool = False
idle.set()
stopping: bool = False
started: bool = False
reader: threading.Thread
@classmethod
def start(cls):
cls.stopping = False
cls.reader = threading.Thread(target=cls._get_key)
cls.reader.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.reader.is_alive():
cls.stopping = True
try:
cls.reader.join()
except:
pass
@classmethod
def last(cls) -> str:
if cls.list: return cls.list.pop()
else: return ""
@classmethod
def get(cls) -> str:
if cls.list: return cls.list.pop(0)
else: return ""
@classmethod
def get_mouse(cls) -> Tuple[int, int]:
if cls.new.is_set():
cls.new.clear()
return cls.mouse_pos
@classmethod
def mouse_moved(cls) -> bool:
if cls.mouse_move.is_set():
cls.mouse_move.clear()
return True
else:
return False
@classmethod
def has_key(cls) -> bool:
return bool(cls.list)
@classmethod
def clear(cls):
cls.list = []
@classmethod
def input_wait(cls, sec: float = 0.0, mouse: bool = False) -> bool:
'''Returns True if key is detected else waits out timer and returns False'''
if cls.list: return True
if mouse: Draw.now(Term.mouse_direct_on)
cls.new.wait(sec if sec > 0 else 0.0)
if mouse: Draw.now(Term.mouse_direct_off, Term.mouse_on)
if cls.new.is_set():
cls.new.clear()
return True
else:
return False
@classmethod
def break_wait(cls):
cls.list.append("_null")
cls.new.set()
sleep(0.01)
cls.new.clear()
@classmethod
def _get_key(cls):
"""Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread."""
input_key: str = ""
clean_key: str = ""
try:
while not cls.stopping:
with Raw(sys.stdin):
if not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag
continue
input_key += sys.stdin.read(1) #* Read 1 key safely with blocking on
if input_key == "\033": #* If first character is a escape sequence keep reading
cls.idle.clear() #* Report IO block in progress to prevent Draw functions from getting a IO Block error
Draw.idle.wait() #* Wait for Draw function to finish if busy
with Nonblocking(sys.stdin): #* Set non blocking to prevent read stall
input_key += sys.stdin.read(20)
if input_key.startswith("\033[<"):
_ = sys.stdin.read(1000)
cls.idle.set() #* Report IO blocking done
#errlog.debug(f'{repr(input_key)}')
if input_key == "\033": clean_key = "escape" #* Key is "escape" key if only containing \033
elif input_key.startswith(("\033[<0;", "\033[<35;", "\033[<64;", "\033[<65;")): #* Detected mouse event
try:
cls.mouse_pos = (int(input_key.split(";")[1]), int(input_key.split(";")[2].rstrip("mM")))
except:
pass
else:
if input_key.startswith("\033[<35;"): #* Detected mouse move in mouse direct mode
cls.mouse_move.set()
cls.new.set()
elif input_key.startswith("\033[<64;"): #* Detected mouse scroll up
clean_key = "mouse_scroll_up"
elif input_key.startswith("\033[<65;"): #* Detected mouse scroll down
clean_key = "mouse_scroll_down"
elif input_key.startswith("\033[<0;") and input_key.endswith("m"): #* Detected mouse click release
if Menu.active:
clean_key = "mouse_click"
else:
for key_name, positions in cls.mouse.items(): #* Check if mouse position is clickable
if list(cls.mouse_pos) in positions:
clean_key = key_name
break
else:
clean_key = "mouse_click"
elif input_key == "\\": clean_key = "\\" #* Clean up "\" to not return escaped
else:
for code in cls.escape.keys(): #* Go trough dict of escape codes to get the cleaned key name
if input_key.lstrip("\033").startswith(code):
clean_key = cls.escape[code]
break
else: #* If not found in escape dict and length of key is 1, assume regular character
if len(input_key) == 1:
clean_key = input_key
if clean_key:
cls.list.append(clean_key) #* Store up to 10 keys in input queue for later processing
if len(cls.list) > 10: del cls.list[0]
clean_key = ""
cls.new.set() #* Set threading event to interrupt main thread sleep
input_key = ""
except Exception as e:
errlog.exception(f'Input thread failed with exception: {e}')
cls.idle.set()
cls.list.clear()
clean_quit(1, thread=True)
class Draw:
'''Holds the draw buffer and manages IO blocking queue
* .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer
* - Adding "+" prefix to name sets append to True and appends to name's current string
* - Adding "!" suffix to name sets now to True and print name's current string
* .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after
* .now(*args) : Prints all arguments as a string
* .clear(*names) : Clear named buffers, all if no argument
* .last_screen() : Prints all saved buffers
'''
strings: Dict[str, str] = {}
z_order: Dict[str, int] = {}
saved: Dict[str, str] = {}
save: Dict[str, bool] = {}
once: Dict[str, bool] = {}
idle = threading.Event()
idle.set()
@classmethod
def now(cls, *args):
'''Wait for input reader and self to be idle then print to screen'''
Key.idle.wait()
cls.idle.wait()
cls.idle.clear()
try:
print(*args, sep="", end="", flush=True)
except BlockingIOError:
pass
Key.idle.wait()
print(*args, sep="", end="", flush=True)
cls.idle.set()
@classmethod
def buffer(cls, name: str, *args: str, append: bool = False, now: bool = False, z: int = 100, only_save: bool = False, no_save: bool = False, once: bool = False):
string: str = ""
if name.startswith("+"):
name = name.lstrip("+")
append = True
if name.endswith("!"):
name = name.rstrip("!")
now = True
cls.save[name] = not no_save
cls.once[name] = once
if not name in cls.z_order or z != 100: cls.z_order[name] = z
if args: string = "".join(args)
if only_save:
if name not in cls.saved or not append: cls.saved[name] = ""
cls.saved[name] += string
else:
if name not in cls.strings or not append: cls.strings[name] = ""
cls.strings[name] += string
if now:
cls.out(name)
@classmethod
def out(cls, *names: str, clear = False):
out: str = ""
if not cls.strings: return
if names:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in names and name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if clear or cls.once[name]:
cls.clear(name)
cls.now(out)
else:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if cls.once[name] and not clear:
cls.clear(name)
if clear:
cls.clear()
cls.now(out)
@classmethod
def saved_buffer(cls) -> str:
out: str = ""
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.saved:
out += cls.saved[name]
return out
@classmethod
def clear(cls, *names, saved: bool = False):
if names:
for name in names:
if name in cls.strings:
del cls.strings[name]
if name in cls.save:
del cls.save[name]
if name in cls.once:
del cls.once[name]
if saved:
if name in cls.saved:
del cls.saved[name]
if name in cls.z_order:
del cls.z_order[name]
else:
cls.strings = {}
cls.save = {}
cls.once = {}
if saved:
cls.saved = {}
cls.z_order = {}
class Color:
'''Holds representations for a 24-bit color value
__init__(color, depth="fg", default=False)
-- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string.
-- depth accepts "fg" or "bg"
__call__(*args) joins str arguments to a string and apply color
__str__ returns escape sequence to set color
__iter__ returns iteration over red, green and blue in integer values of 0-255.
* Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str
'''
hexa: str; dec: Tuple[int, int, int]; red: int; green: int; blue: int; depth: str; escape: str; default: bool
def __init__(self, color: str, depth: str = "fg", default: bool = False):
self.depth = depth
self.default = default
try:
if not color:
self.dec = (-1, -1, -1)
self.hexa = ""
self.red = self.green = self.blue = -1
self.escape = "\033[49m" if depth == "bg" and default else ""
return
elif color.startswith("#"):
self.hexa = color
if len(self.hexa) == 3:
self.hexa += self.hexa[1:3] + self.hexa[1:3]
c = int(self.hexa[1:3], base=16)
self.dec = (c, c, c)
elif len(self.hexa) == 7:
self.dec = (int(self.hexa[1:3], base=16), int(self.hexa[3:5], base=16), int(self.hexa[5:7], base=16))
else:
raise ValueError(f'Incorrectly formatted hexadecimal rgb string: {self.hexa}')
else:
c_t = tuple(map(int, color.split(" ")))
if len(c_t) == 3:
self.dec = c_t #type: ignore
else:
raise ValueError(f'RGB dec should be "0-255 0-255 0-255"')
ct = self.dec[0] + self.dec[1] + self.dec[2]
if ct > 255*3 or ct < 0:
raise ValueError(f'RGB values out of range: {color}')
except Exception as e:
errlog.exception(str(e))
self.escape = ""
return
if self.dec and not self.hexa: self.hexa = f'{hex(self.dec[0]).lstrip("0x").zfill(2)}{hex(self.dec[1]).lstrip("0x").zfill(2)}{hex(self.dec[2]).lstrip("0x").zfill(2)}'
if self.dec and self.hexa:
self.red, self.green, self.blue = self.dec
self.escape = f'\033[{38 if self.depth == "fg" else 48};2;{";".join(str(c) for c in self.dec)}m'
if not CONFIG.truecolor or LOW_COLOR:
self.escape = f'{self.truecolor_to_256(rgb=self.dec, depth=self.depth)}'
def __str__(self) -> str:
return self.escape
def __repr__(self) -> str:
return repr(self.escape)
def __iter__(self) -> Iterable:
for c in self.dec: yield c
def __call__(self, *args: str) -> str:
if len(args) < 1: return ""
return f'{self.escape}{"".join(args)}{getattr(Term, self.depth)}'
@staticmethod
def truecolor_to_256(rgb: Tuple[int, int, int], depth: str="fg") -> str:
out: str = ""
pre: str = f'\033[{"38" if depth == "fg" else "48"};5;'
greyscale: Tuple[int, int, int] = ( rgb[0] // 11, rgb[1] // 11, rgb[2] // 11 )
if greyscale[0] == greyscale[1] == greyscale[2]:
out = f'{pre}{232 + greyscale[0]}m'
else:
out = f'{pre}{round(rgb[0] / 51) * 36 + round(rgb[1] / 51) * 6 + round(rgb[2] / 51) + 16}m'
return out
@staticmethod
def escape_color(hexa: str = "", r: int = 0, g: int = 0, b: int = 0, depth: str = "fg") -> str:
"""Returns escape sequence to set color
* accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF"
* or decimal RGB: r=0-255, g=0-255, b=0-255
* depth="fg" or "bg"
"""
dint: int = 38 if depth == "fg" else 48
color: str = ""
if hexa:
try:
if len(hexa) == 3:
c = int(hexa[1:], base=16)
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{c};{c};{c}m'
else:
color = f'{Color.truecolor_to_256(rgb=(c, c, c), depth=depth)}'
elif len(hexa) == 7:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{int(hexa[1:3], base=16)};{int(hexa[3:5], base=16)};{int(hexa[5:7], base=16)}m'
else:
color = f'{Color.truecolor_to_256(rgb=(int(hexa[1:3], base=16), int(hexa[3:5], base=16), int(hexa[5:7], base=16)), depth=depth)}'
except ValueError as e:
errlog.exception(f'{e}')
else:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{r};{g};{b}m'
else:
color = f'{Color.truecolor_to_256(rgb=(r, g, b), depth=depth)}'
return color
@classmethod
def fg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="fg")
else: return cls.escape_color(hexa=args[0], depth="fg")
@classmethod
def bg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="bg")
else: return cls.escape_color(hexa=args[0], depth="bg")
class Colors:
'''Standard colors for menus and dialogs'''
default = Color("#cc")
white = Color("#ff")
red = Color("#bf3636")
green = Color("#68bf36")
blue = Color("#0fd7ff")
yellow = Color("#db8b00")
black_bg = Color("#00", depth="bg")
null = Color("")
class Theme:
'''__init__ accepts a dict containing { "color_element" : "color" }'''
themes: Dict[str, str] = {}
cached: Dict[str, Dict[str, str]] = { "Default" : DEFAULT_THEME }
current: str = ""
main_bg = main_fg = title = hi_fg = selected_bg = selected_fg = inactive_fg = proc_misc = cpu_box = mem_box = net_box = proc_box = div_line = temp_start = temp_mid = temp_end = cpu_start = cpu_mid = cpu_end = free_start = free_mid = free_end = cached_start = cached_mid = cached_end = available_start = available_mid = available_end = used_start = used_mid = used_end = download_start = download_mid = download_end = upload_start = upload_mid = upload_end = graph_text = meter_bg = process_start = process_mid = process_end = Colors.default
gradient: Dict[str, List[str]] = {
"temp" : [],
"cpu" : [],
"free" : [],
"cached" : [],
"available" : [],
"used" : [],
"download" : [],
"upload" : [],
"proc" : [],
"proc_color" : [],
"process" : [],
}
def __init__(self, theme: str):
self.refresh()
self._load_theme(theme)
def __call__(self, theme: str):
for k in self.gradient.keys(): self.gradient[k] = []
self._load_theme(theme)
def _load_theme(self, theme: str):
tdict: Dict[str, str]
if theme in self.cached:
tdict = self.cached[theme]
elif theme in self.themes:
tdict = self._load_file(self.themes[theme])
self.cached[theme] = tdict
else:
errlog.warning(f'No theme named "{theme}" found!')
theme = "Default"
CONFIG.color_theme = theme
tdict = DEFAULT_THEME
self.current = theme
#if CONFIG.color_theme != theme: CONFIG.color_theme = theme
if not "graph_text" in tdict and "inactive_fg" in tdict:
tdict["graph_text"] = tdict["inactive_fg"]
if not "meter_bg" in tdict and "inactive_fg" in tdict:
tdict["meter_bg"] = tdict["inactive_fg"]
if not "process_start" in tdict and "cpu_start" in tdict:
tdict["process_start"] = tdict["cpu_start"]
tdict["process_mid"] = tdict.get("cpu_mid", "")
tdict["process_end"] = tdict.get("cpu_end", "")
#* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict
for item, value in DEFAULT_THEME.items():
default = item in ["main_fg", "main_bg"]
depth = "bg" if item in ["main_bg", "selected_bg"] else "fg"
if item in tdict:
setattr(self, item, Color(tdict[item], depth=depth, default=default))
else:
setattr(self, item, Color(value, depth=depth, default=default))
#* Create color gradients from one, two or three colors, 101 values indexed 0-100
self.proc_start, self.proc_mid, self.proc_end = self.main_fg, Colors.null, self.inactive_fg
self.proc_color_start, self.proc_color_mid, self.proc_color_end = self.inactive_fg, Colors.null, self.process_start
rgb: Dict[str, Tuple[int, int, int]]
colors: List[List[int]] = []
for name in self.gradient:
rgb = { "start" : getattr(self, f'{name}_start').dec, "mid" : getattr(self, f'{name}_mid').dec, "end" : getattr(self, f'{name}_end').dec }
colors = [ list(getattr(self, f'{name}_start')) ]
if rgb["end"][0] >= 0:
r = 50 if rgb["mid"][0] >= 0 else 100
for first, second in ["start", "mid" if r == 50 else "end"], ["mid", "end"]:
for i in range(r):
colors += [[rgb[first][n] + i * (rgb[second][n] - rgb[first][n]) // r for n in range(3)]]
if r == 100:
break
self.gradient[name] += [ Color.fg(*color) for color in colors ]
else:
c = Color.fg(*rgb["start"])
self.gradient[name] += [c] * 101
#* Set terminal colors
Term.fg = f'{self.main_fg}'
Term.bg = f'{self.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(self.main_fg, self.main_bg)
@classmethod
def refresh(cls):
'''Sets themes dict with names and paths to all found themes'''
cls.themes = { "Default" : "Default" }
try:
for d in (THEME_DIR, USER_THEME_DIR):
if not d: continue
for f in os.listdir(d):
if f.endswith(".theme"):
cls.themes[f'{"" if d == THEME_DIR else "+"}{f[:-6]}'] = f'{d}/{f}'
except Exception as e:
errlog.exception(str(e))
@staticmethod
def _load_file(path: str) -> Dict[str, str]:
'''Load a bashtop formatted theme file and return a dict'''
new_theme: Dict[str, str] = {}
try:
with open(path, "r") as f:
for line in f:
if not line.startswith("theme["): continue
key = line[6:line.find("]")]
s = line.find('"')
value = line[s + 1:line.find('"', s + 1)]
new_theme[key] = value
except Exception as e:
errlog.exception(str(e))
return new_theme
class Banner:
'''Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False])'''
out: List[str] = []
c_color: str = ""
length: int = 0
if not out:
for num, (color, color2, line) in enumerate(BANNER_SRC):
if len(line) > length: length = len(line)
out_var = ""
line_color = Color.fg(color)
line_color2 = Color.fg(color2)
line_dark = Color.fg(f'#{80 - num * 6}')
for n, letter in enumerate(line):
if letter == "█" and c_color != line_color:
if 5 < n < 25: c_color = line_color2
else: c_color = line_color
out_var += c_color
elif letter == " ":
letter = f'{Mv.r(1)}'
c_color = ""
elif letter != "█" and c_color != line_dark:
c_color = line_dark
out_var += line_dark
out_var += letter
out.append(out_var)
@classmethod
def draw(cls, line: int, col: int = 0, center: bool = False, now: bool = False):
out: str = ""
if center: col = Term.width // 2 - cls.length // 2
for n, o in enumerate(cls.out):
out += f'{Mv.to(line + n, col)}{o}'
out += f'{Term.fg}'
if now: Draw.out(out)
else: return out
class Symbol:
h_line: str = "─"
v_line: str = "│"
left_up: str = "┌"
right_up: str = "┐"
left_down: str = "└"
right_down: str = "┘"
title_left: str = "┤"
title_right: str = "├"
div_up: str = "┬"
div_down: str = "┴"
graph_up: Dict[float, str] = {
0.0 : " ", 0.1 : "⢀", 0.2 : "⢠", 0.3 : "⢰", 0.4 : "⢸",
1.0 : "⡀", 1.1 : "⣀", 1.2 : "⣠", 1.3 : "⣰", 1.4 : "⣸",
2.0 : "⡄", 2.1 : "⣄", 2.2 : "⣤", 2.3 : "⣴", 2.4 : "⣼",
3.0 : "⡆", 3.1 : "⣆", 3.2 : "⣦", 3.3 : "⣶", 3.4 : "⣾",
4.0 : "⡇", 4.1 : "⣇", 4.2 : "⣧", 4.3 : "⣷", 4.4 : "⣿"
}
graph_up_small = graph_up.copy()
graph_up_small[0.0] = "\033[1C"
graph_down: Dict[float, str] = {
0.0 : " ", 0.1 : "⠈", 0.2 : "⠘", 0.3 : "⠸", 0.4 : "⢸",
1.0 : "⠁", 1.1 : "⠉", 1.2 : "⠙", 1.3 : "⠹", 1.4 : "⢹",
2.0 : "⠃", 2.1 : "⠋", 2.2 : "⠛", 2.3 : "⠻", 2.4 : "⢻",
3.0 : "⠇", 3.1 : "⠏", 3.2 : "⠟", 3.3 : "⠿", 3.4 : "⢿",
4.0 : "⡇", 4.1 : "⡏", 4.2 : "⡟", 4.3 : "⡿", 4.4 : "⣿"
}
graph_down_small = graph_down.copy()
graph_down_small[0.0] = "\033[1C"
meter: str = "■"
up: str = "↑"
down: str = "↓"
left: str = "←"
right: str = "→"
enter: str = "↲"
ok: str = f'{Color.fg("#30ff50")}√{Color.fg("#cc")}'
fail: str = f'{Color.fg("#ff3050")}!{Color.fg("#cc")}'
class Graph:
'''Class for creating and adding to graphs
* __str__ : returns graph as a string
* add(value: int) : adds a value to graph and returns it as a string
* __call__ : same as add
'''
out: str
width: int
height: int
graphs: Dict[bool, List[str]]
colors: List[str]
invert: bool
max_value: int
color_max_value: int
offset: int
no_zero: bool
round_up_low: bool
current: bool
last: int
lowest: int = 0
symbol: Dict[float, str]
def __init__(self, width: int, height: int, color: Union[List[str], Color, None], data: List[int], invert: bool = False, max_value: int = 0, offset: int = 0, color_max_value: Union[int, None] = None, no_zero: bool = False, round_up_low: bool = False):
self.graphs: Dict[bool, List[str]] = {False : [], True : []}
self.current: bool = True
self.width = width
self.height = height
self.invert = invert
self.offset = offset
self.round_up_low = round_up_low
self.no_zero = no_zero or round_up_low
if not data: data = [0]
if max_value:
self.lowest = 1 if self.round_up_low else 0
self.max_value = max_value
data = [ min_max((v + offset) * 100 // (max_value + offset), min_max(v + offset, 0, self.lowest), 100) for v in data ] #* Convert values to percentage values of max_value with max_value as ceiling
else:
self.max_value = 0
if color_max_value:
self.color_max_value = color_max_value
else:
self.color_max_value = self.max_value
if self.color_max_value and self.max_value:
color_scale = int(100.0 * self.max_value / self.color_max_value)
else:
color_scale = 100
self.colors: List[str] = []
if isinstance(color, list) and height > 1:
for i in range(1, height + 1): self.colors.insert(0, color[min(100, i * color_scale // height)]) #* Calculate colors of graph
if invert: self.colors.reverse()
elif isinstance(color, Color) and height > 1:
self.colors = [ f'{color}' for _ in range(height) ]
else:
if isinstance(color, list): self.colors = color
elif isinstance(color, Color): self.colors = [ f'{color}' for _ in range(101) ]
if self.height == 1:
self.symbol = Symbol.graph_down_small if invert else Symbol.graph_up_small
else:
self.symbol = Symbol.graph_down if invert else Symbol.graph_up
value_width: int = ceil(len(data) / 2)
filler: str = ""
if value_width > width: #* If the size of given data set is bigger then width of graph, shrink data set
data = data[-(width*2):]
value_width = ceil(len(data) / 2)
elif value_width < width: #* If the size of given data set is smaller then width of graph, fill graph with whitespace
filler = self.symbol[0.0] * (width - value_width)
if len(data) % 2: data.insert(0, 0)
for _ in range(height):
for b in [True, False]:
self.graphs[b].append(filler)
self._create(data, new=True)
def _create(self, data: List[int], new: bool = False):
h_high: int
h_low: int
value: Dict[str, int] = { "left" : 0, "right" : 0 }
val: int
side: str
#* Create the graph
for h in range(self.height):
h_high = round(100 * (self.height - h) / self.height) if self.height > 1 else 100
h_low = round(100 * (self.height - (h + 1)) / self.height) if self.height > 1 else 0
for v in range(len(data)):
if new: self.current = bool(v % 2) #* Switch between True and False graphs
if new and v == 0: self.last = 0
for val, side in [self.last, "left"], [data[v], "right"]: # type: ignore
if val >= h_high:
value[side] = 4
elif val <= h_low:
value[side] = 0
else:
if self.height == 1: value[side] = round(val * 4 / 100 + 0.5)
else: value[side] = round((val - h_low) * 4 / (h_high - h_low) + 0.1)
if self.no_zero and not (new and v == 0 and side == "left") and h == self.height - 1 and value[side] < 1 and not (self.round_up_low and val == 0): value[side] = 1
if new: self.last = data[v]
self.graphs[self.current][h] += self.symbol[float(value["left"] + value["right"] / 10)]
if data: self.last = data[-1]
self.out = ""
if self.height == 1:
self.out += f'{"" if not self.colors else (THEME.inactive_fg if self.last < 5 else self.colors[self.last])}{self.graphs[self.current][0]}'
elif self.height > 1:
for h in range(self.height):
if h > 0: self.out += f'{Mv.d(1)}{Mv.l(self.width)}'
self.out += f'{"" if not self.colors else self.colors[h]}{self.graphs[self.current][h if not self.invert else (self.height - 1) - h]}'
if self.colors: self.out += f'{Term.fg}'
def __call__(self, value: Union[int, None] = None) -> str:
if not isinstance(value, int): return self.out
self.current = not self.current
if self.height == 1:
if self.graphs[self.current][0].startswith(self.symbol[0.0]):
self.graphs[self.current][0] = self.graphs[self.current][0].replace(self.symbol[0.0], "", 1)
else:
self.graphs[self.current][0] = self.graphs[self.current][0][1:]
else:
for n in range(self.height):
self.graphs[self.current][n] = self.graphs[self.current][n][1:]
if self.max_value: value = min_max((value + self.offset) * 100 // (self.max_value + self.offset), min_max(value + self.offset, 0, self.lowest), 100)
self._create([value])
return self.out
def add(self, value: Union[int, None] = None) -> str:
return self.__call__(value)
def __str__(self):
return self.out
def __repr__(self):
return repr(self.out)
class Graphs:
'''Holds all graphs and lists of graphs for dynamically created graphs'''
cpu: Dict[str, Graph] = {}
cores: List[Graph] = [NotImplemented] * THREADS
temps: List[Graph] = [NotImplemented] * (THREADS + 1)
net: Dict[str, Graph] = {}
detailed_cpu: Graph = NotImplemented
detailed_mem: Graph = NotImplemented
pid_cpu: Dict[int, Graph] = {}
disk_io: Dict[str, Dict[str, Graph]] = {}
class Meter:
'''Creates a percentage meter
__init__(value, width, theme, gradient_name) to create new meter
__call__(value) to set value and return meter as a string
__str__ returns last set meter as a string
'''
out: str
color_gradient: List[str]
color_inactive: Color
gradient_name: str
width: int
invert: bool
saved: Dict[int, str]
def __init__(self, value: int, width: int, gradient_name: str, invert: bool = False):
self.gradient_name = gradient_name
self.color_gradient = THEME.gradient[gradient_name]
self.color_inactive = THEME.meter_bg
self.width = width
self.saved = {}
self.invert = invert
self.out = self._create(value)
def __call__(self, value: Union[int, None]) -> str:
if not isinstance(value, int): return self.out
if value > 100: value = 100
elif value < 0: value = 100
if value in self.saved:
self.out = self.saved[value]
else:
self.out = self._create(value)
return self.out
def __str__(self) -> str:
return self.out
def __repr__(self):
return repr(self.out)
def _create(self, value: int) -> str:
if value > 100: value = 100
elif value < 0: value = 100
out: str = ""
for i in range(1, self.width + 1):
if value >= round(i * 100 / self.width):
out += f'{self.color_gradient[round(i * 100 / self.width) if not self.invert else round(100 - (i * 100 / self.width))]}{Symbol.meter}'
else:
out += self.color_inactive(Symbol.meter * (self.width + 1 - i))
break
else:
out += f'{Term.fg}'
if not value in self.saved:
self.saved[value] = out
return out
class Meters:
cpu: Meter
battery: Meter
mem: Dict[str, Union[Meter, Graph]] = {}
swap: Dict[str, Union[Meter, Graph]] = {}
disks_used: Dict[str, Meter] = {}
disks_free: Dict[str, Meter] = {}
class Box:
'''Box class with all needed attributes for create_box() function'''
name: str
num: int = 0
boxes: List = []
view_modes: Dict[str, List] = {"full" : ["cpu", "mem", "net", "proc"], "stat" : ["cpu", "mem", "net"], "proc" : ["cpu", "proc"]}
view_mode: str
for view_mode in view_modes:
if sorted(CONFIG.shown_boxes.split(), key=str.lower) == view_modes[view_mode]:
break
else:
view_mode = "user"
view_modes["user"] = CONFIG.shown_boxes.split()
height_p: int
width_p: int
x: int
y: int
width: int
height: int
out: str
bg: str
_b_cpu_h: int
_b_mem_h: int
redraw_all: bool
buffers: List[str] = []
clock_on: bool = False
clock: str = ""
clock_len: int = 0
resized: bool = False
clock_custom_format: Dict[str, Any] = {
"/host" : os.uname()[1],
"/user" : os.environ.get("USER") or pwd.getpwuid(os.getuid())[0],
"/uptime" : "",
}
if clock_custom_format["/host"].endswith(".local"):
clock_custom_format["/host"] = clock_custom_format["/host"].replace(".local", "")
@classmethod
def calc_sizes(cls):
'''Calculate sizes of boxes'''
cls.boxes = CONFIG.shown_boxes.split()
for sub in cls.__subclasses__():
sub._calc_size() # type: ignore
sub.resized = True # type: ignore
@classmethod
def draw_update_ms(cls, now: bool = True):
if not "cpu" in cls.boxes: return
update_string: str = f'{CONFIG.update_ms}ms'
xpos: int = CpuBox.x + CpuBox.width - len(update_string) - 15
if not "+" in Key.mouse:
Key.mouse["+"] = [[xpos + 7 + i, CpuBox.y] for i in range(3)]
Key.mouse["-"] = [[CpuBox.x + CpuBox.width - 4 + i, CpuBox.y] for i in range(3)]
Draw.buffer("update_ms!" if now and not Menu.active else "update_ms",
f'{Mv.to(CpuBox.y, xpos)}{THEME.cpu_box(Symbol.h_line * 7, Symbol.title_left)}{Fx.b}{THEME.hi_fg("+")} ',
f'{THEME.title(update_string)} {THEME.hi_fg("-")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}', only_save=Menu.active, once=True)
if now and not Menu.active:
Draw.clear("update_ms")
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def draw_clock(cls, force: bool = False):
if not "cpu" in cls.boxes or not cls.clock_on: return
out: str = ""
if force: pass
elif Term.resized or strftime(CONFIG.draw_clock) == cls.clock: return
clock_string = cls.clock = strftime(CONFIG.draw_clock)
for custom in cls.clock_custom_format:
if custom in clock_string:
if custom == "/uptime": cls.clock_custom_format["/uptime"] = CpuCollector.uptime
clock_string = clock_string.replace(custom, cls.clock_custom_format[custom])
clock_len = len(clock_string[:(CpuBox.width-56)])
if cls.clock_len != clock_len and not CpuBox.resized:
out = f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(cls.clock_len//2))}{Fx.ub}{THEME.cpu_box}{Symbol.h_line * cls.clock_len}'
cls.clock_len = clock_len
now: bool = False if Menu.active else not force
out += (f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(clock_len//2))}{Fx.ub}{THEME.cpu_box}'
f'{Symbol.title_left}{Fx.b}{THEME.title(clock_string[:clock_len])}{Fx.ub}{THEME.cpu_box}{Symbol.title_right}{Term.fg}')
Draw.buffer("clock", out, z=1, now=now, once=not force, only_save=Menu.active)
if now and not Menu.active:
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def empty_bg(cls) -> str:
return (f'{Term.clear}' +
(f'{Banner.draw(Term.height // 2 - 10, center=True)}'
f'{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}[esc] Menu'
f'{Mv.r(25)}{Fx.i}Version: {VERSION}{Fx.ui}' if Term.height > 22 else "") +
f'{Mv.d(1)}{Mv.l(34)}{Fx.b}All boxes hidden!'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[1] {Fx.ub}Toggle CPU box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[2] {Fx.ub}Toggle MEM box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[3] {Fx.ub}Toggle NET box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[4] {Fx.ub}Toggle PROC box'
f'{Mv.d(1)}{Mv.l(19)}{Fx.b}[m] {Fx.ub}Cycle presets'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[q] Quit {Fx.ub}{Term.bg}{Term.fg}')
@classmethod
def draw_bg(cls, now: bool = True):
'''Draw all boxes outlines and titles'''
out: str = ""
if not cls.boxes:
out = cls.empty_bg()
else:
out = "".join(sub._draw_bg() for sub in cls.__subclasses__()) # type: ignore
Draw.buffer("bg", out, now=now, z=1000, only_save=Menu.active, once=True)
cls.draw_update_ms(now=now)
if CONFIG.draw_clock: cls.draw_clock(force=True)
class SubBox:
box_x: int = 0
box_y: int = 0
box_width: int = 0
box_height: int = 0
box_columns: int = 0
column_size: int = 0
class CpuBox(Box, SubBox):
name = "cpu"
num = 1
x = 1
y = 1
height_p = 32
width_p = 100
min_w: int = 60
min_h: int = 8
resized: bool = True
redraw: bool = False
buffer: str = "cpu"
battery_percent: int = 1000
battery_secs: int = 0
battery_status: str = "Unknown"
old_battery_pos = 0
old_battery_len = 0
battery_path: Union[str, None] = ""
battery_clear: bool = False
battery_symbols: Dict[str, str] = {"Charging": "▲",
"Discharging": "▼",
"Full": "■",
"Not charging": "■"}
clock_block: bool = True
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "cpu" in cls.boxes:
Box._b_cpu_h = 0
cls.width = Term.width
return
cpu = CpuCollector
height_p: int
if cls.boxes == ["cpu"]:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * cls.width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height < 8: cls.height = 8
Box._b_cpu_h = cls.height
#THREADS = 64
cls.box_columns = ceil((THREADS + 1) / (cls.height - 5))
if cls.box_columns * (20 + 13 if cpu.got_sensors else 21) < cls.width - (cls.width // 3):
cls.column_size = 2
cls.box_width = (20 + 13 if cpu.got_sensors else 21) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (15 + 6 if cpu.got_sensors else 15) < cls.width - (cls.width // 3):
cls.column_size = 1
cls.box_width = (15 + 6 if cpu.got_sensors else 15) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (8 + 6 if cpu.got_sensors else 8) < cls.width - (cls.width // 3):
cls.column_size = 0
else:
cls.box_columns = (cls.width - cls.width // 3) // (8 + 6 if cpu.got_sensors else 8); cls.column_size = 0
if cls.column_size == 0: cls.box_width = (8 + 6 if cpu.got_sensors else 8) * cls.box_columns + 1
cls.box_height = ceil(THREADS / cls.box_columns) + 4
if cls.box_height > cls.height - 2: cls.box_height = cls.height - 2
cls.box_x = (cls.width - 1) - cls.box_width
cls.box_y = cls.y + ceil((cls.height - 2) / 2) - ceil(cls.box_height / 2) + 1
@classmethod
def _draw_bg(cls) -> str:
if not "cpu" in cls.boxes: return ""
if not "M" in Key.mouse:
Key.mouse["M"] = [[cls.x + 10 + i, cls.y] for i in range(6)]
return (f'{create_box(box=cls, line_color=THEME.cpu_box)}'
f'{Mv.to(cls.y, cls.x + 10)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("M")}{THEME.title("enu")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
f'{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title=CPU_NAME[:cls.box_width - 14] if not CONFIG.custom_cpu_name else CONFIG.custom_cpu_name[:cls.box_width - 14])}')
@classmethod
def battery_activity(cls) -> bool:
if not hasattr(psutil, "sensors_battery") or psutil.sensors_battery() == None:
if cls.battery_percent != 1000:
cls.battery_clear = True
return False
if cls.battery_path == "":
cls.battery_path = None
if os.path.isdir("/sys/class/power_supply"):
for directory in sorted(os.listdir("/sys/class/power_supply")):
if directory.startswith('BAT') or 'battery' in directory.lower():
cls.battery_path = f'/sys/class/power_supply/{directory}/'
break
return_true: bool = False
percent: int = ceil(getattr(psutil.sensors_battery(), "percent", 0))
if percent != cls.battery_percent:
cls.battery_percent = percent
return_true = True
seconds: int = getattr(psutil.sensors_battery(), "secsleft", 0)
if seconds != cls.battery_secs:
cls.battery_secs = seconds
return_true = True
status: str = "not_set"
if cls.battery_path:
status = readfile(cls.battery_path + "status", default="not_set")
if status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == True:
status = "Charging" if cls.battery_percent < 100 else "Full"
elif status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == False:
status = "Discharging"
elif status == "not_set":
status = "Unknown"
if status != cls.battery_status:
cls.battery_status = status
return_true = True
return return_true or cls.resized or cls.redraw or Menu.active
@classmethod
def _draw_fg(cls):
if not "cpu" in cls.boxes: return
cpu = CpuCollector
if cpu.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
lavg: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
hh: int = ceil(h / 2)
hh2: int = h - hh
mid_line: bool = False
temp: int = 0
unit: str = ""
if not CONFIG.cpu_single_graph and CONFIG.cpu_graph_upper != CONFIG.cpu_graph_lower:
mid_line = True
if h % 2: hh = floor(h / 2)
else: hh2 -= 1
hide_cores: bool = (cpu.cpu_temp_only or not CONFIG.show_coretemp) and cpu.got_sensors
ct_width: int = (max(6, 6 * cls.column_size)) * hide_cores
if cls.resized or cls.redraw:
if not "m" in Key.mouse:
Key.mouse["m"] = [[cls.x + 16 + i, cls.y] for i in range(12)]
out_misc += f'{Mv.to(cls.y, cls.x + 16)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("m")}{THEME.title}ode:{Box.view_mode}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
Graphs.cpu["up"] = Graph(w - bw - 3, (h if CONFIG.cpu_single_graph else hh), THEME.gradient["cpu"], cpu.cpu_upper, round_up_low=True)
if not CONFIG.cpu_single_graph:
Graphs.cpu["down"] = Graph(w - bw - 3, hh2, THEME.gradient["cpu"], cpu.cpu_lower, invert=CONFIG.cpu_invert_lower, round_up_low=True)
Meters.cpu = Meter(cpu.cpu_usage[0][-1], bw - (21 if cpu.got_sensors else 9), "cpu")
if cls.column_size > 0 or ct_width > 0:
for n in range(THREADS):
Graphs.cores[n] = Graph(5 * cls.column_size + ct_width, 1, None, cpu.cpu_usage[n + 1])
if cpu.got_sensors:
Graphs.temps[0] = Graph(5, 1, None, cpu.cpu_temp[0], max_value=cpu.cpu_temp_crit, offset=-23)
if cls.column_size > 1:
for n in range(1, THREADS + 1):
if not cpu.cpu_temp[n]:
continue
Graphs.temps[n] = Graph(5, 1, None, cpu.cpu_temp[n], max_value=cpu.cpu_temp_crit, offset=-23)
Draw.buffer("cpu_misc", out_misc, only_save=True)
if CONFIG.show_battery and cls.battery_activity():
bat_out: str = ""
if cls.battery_secs > 0:
battery_time: str = f' {cls.battery_secs // 3600:02}:{(cls.battery_secs % 3600) // 60:02}'
else:
battery_time = ""
if not hasattr(Meters, "battery") or cls.resized:
Meters.battery = Meter(cls.battery_percent, 10, "cpu", invert=True)
battery_symbol: str = cls.battery_symbols.get(cls.battery_status, "○")
battery_len: int = len(f'{CONFIG.update_ms}') + (11 if cls.width >= 100 else 0) + len(battery_time) + len(f'{cls.battery_percent}')
battery_pos = cls.width - battery_len - 17
if (battery_pos != cls.old_battery_pos or battery_len != cls.old_battery_len) and cls.old_battery_pos > 0 and not cls.resized:
bat_out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.old_battery_pos, cls.old_battery_len = battery_pos, battery_len
bat_out += (f'{Mv.to(y-1, battery_pos)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.title}BAT{battery_symbol} {cls.battery_percent}%'+
("" if cls.width < 100 else f' {Fx.ub}{Meters.battery(cls.battery_percent)}{Fx.b}') +
f'{THEME.title}{battery_time}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}')
Draw.buffer("battery", f'{bat_out}{Term.fg}', only_save=Menu.active)
elif cls.battery_clear:
out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.battery_clear = False
cls.battery_percent = 1000
cls.battery_secs = 0
cls.battery_status = "Unknown"
cls.old_battery_pos = 0
cls.old_battery_len = 0
cls.battery_path = ""
Draw.clear("battery", saved=True)
cx = cy = cc = 0
ccw = (bw + 1) // cls.box_columns
if cpu.cpu_freq:
freq: str = f'{cpu.cpu_freq} Mhz' if cpu.cpu_freq < 1000 else f'{float(cpu.cpu_freq / 1000):.1f} GHz'
out += f'{Mv.to(by - 1, bx + bw - 9)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title(freq)}{Fx.ub}{THEME.div_line(Symbol.title_right)}'
out += f'{Mv.to(y, x)}{Graphs.cpu["up"](None if cls.resized else cpu.cpu_upper[-1])}'
if mid_line:
out += (f'{Mv.to(y+hh, x-1)}{THEME.cpu_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (w - bw - 3)}{THEME.div_line(Symbol.title_left)}'
f'{Mv.to(y+hh, x+((w-bw)//2)-((len(CONFIG.cpu_graph_upper)+len(CONFIG.cpu_graph_lower))//2)-4)}{THEME.main_fg}{CONFIG.cpu_graph_upper}{Mv.r(1)}▲▼{Mv.r(1)}{CONFIG.cpu_graph_lower}')
if not CONFIG.cpu_single_graph and Graphs.cpu.get("down"):
out += f'{Mv.to(y + hh + (1 * mid_line), x)}{Graphs.cpu["down"](None if cls.resized else cpu.cpu_lower[-1])}'
out += (f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b}{"CPU "}{Fx.ub}{Meters.cpu(cpu.cpu_usage[0][-1])}'
f'{THEME.gradient["cpu"][cpu.cpu_usage[0][-1]]}{cpu.cpu_usage[0][-1]:>4}{THEME.main_fg}%')
if cpu.got_sensors:
try:
temp, unit = temperature(cpu.cpu_temp[0][-1], CONFIG.temp_scale)
out += (f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[0][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[0](None if cls.resized else cpu.cpu_temp[0][-1])}'
f'{temp:>4}{THEME.main_fg}{unit}')
except:
cpu.got_sensors = False
cy += 1
for n in range(1, THREADS + 1):
out += f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b + "C" + Fx.ub if THREADS < 100 else ""}{str(n):<{2 if cls.column_size == 0 else 3}}'
if cls.column_size > 0 or ct_width > 0:
out += f'{THEME.inactive_fg}{"⡀" * (5 * cls.column_size + ct_width)}{Mv.l(5 * cls.column_size + ct_width)}{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}{Graphs.cores[n-1](None if cls.resized else cpu.cpu_usage[n][-1])}'
else:
out += f'{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}'
out += f'{cpu.cpu_usage[n][-1]:>{3 if cls.column_size < 2 else 4}}{THEME.main_fg}%'
if cpu.got_sensors and cpu.cpu_temp[n] and not hide_cores:
try:
temp, unit = temperature(cpu.cpu_temp[n][-1], CONFIG.temp_scale)
if cls.column_size > 1:
out += f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[n][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[n](None if cls.resized else cpu.cpu_temp[n][-1])}'
else:
out += f'{THEME.gradient["temp"][min_max(temp, 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}'
out += f'{temp:>4}{THEME.main_fg}{unit}'
except:
cpu.got_sensors = False
elif cpu.got_sensors and not hide_cores:
out += f'{Mv.r(max(6, 6 * cls.column_size))}'
out += f'{THEME.div_line(Symbol.v_line)}'
cy += 1
if cy > ceil(THREADS/cls.box_columns) and n != THREADS:
cc += 1; cy = 1; cx = ccw * cc
if cc == cls.box_columns: break
if cy < bh - 1: cy = bh - 1
if cy < bh and cc < cls.box_columns:
if cls.column_size == 2 and cpu.got_sensors:
lavg = f' Load AVG: {" ".join(str(l) for l in cpu.load_avg):^19.19}'
elif cls.column_size == 2 or (cls.column_size == 1 and cpu.got_sensors):
lavg = f'LAV: {" ".join(str(l) for l in cpu.load_avg):^14.14}'
elif cls.column_size == 1 or (cls.column_size == 0 and cpu.got_sensors):
lavg = f'L {" ".join(str(round(l, 1)) for l in cpu.load_avg):^11.11}'
else:
lavg = f'{" ".join(str(round(l, 1)) for l in cpu.load_avg[:2]):^7.7}'
out += f'{Mv.to(by + cy, bx + cx)}{THEME.main_fg}{lavg}{THEME.div_line(Symbol.v_line)}'
if CONFIG.show_uptime:
out += f'{Mv.to(y + (0 if not CONFIG.cpu_invert_lower or CONFIG.cpu_single_graph else h - 1), x + 1)}{THEME.graph_text}{Fx.trans("up " + cpu.uptime)}'
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = cls.clock_block = False
class MemBox(Box):
name = "mem"
num = 2
height_p = 38
width_p = 45
min_w: int = 36
min_h: int = 10
x = 1
y = 1
mem_meter: int = 0
mem_size: int = 0
disk_meter: int = 0
divider: int = 0
mem_width: int = 0
disks_width: int = 0
disks_io_h: int = 0
disks_io_order: List[str] = []
graph_speeds: Dict[str, int] = {}
graph_height: int
resized: bool = True
redraw: bool = False
buffer: str = "mem"
swap_on: bool = CONFIG.show_swap
Box.buffers.append(buffer)
mem_names: List[str] = ["used", "available", "cached", "free"]
swap_names: List[str] = ["used", "free"]
@classmethod
def _calc_size(cls):
if not "mem" in cls.boxes:
Box._b_mem_h = 0
cls.width = Term.width
return
width_p: int; height_p: int
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 60 if "net" in cls.boxes else 98
elif not "net" in cls.boxes:
height_p = 98 - CpuBox.height_p
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100) + 1
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
Box._b_mem_h = cls.height
cls.y = Box._b_cpu_h + 1
if CONFIG.show_disks:
cls.mem_width = ceil((cls.width - 3) / 2)
cls.disks_width = cls.width - cls.mem_width - 3
if cls.mem_width + cls.disks_width < cls.width - 2: cls.mem_width += 1
cls.divider = cls.x + cls.mem_width
else:
cls.mem_width = cls.width - 1
item_height: int = 6 if cls.swap_on and not CONFIG.swap_disk else 4
if cls.height - (3 if cls.swap_on and not CONFIG.swap_disk else 2) > 2 * item_height: cls.mem_size = 3
elif cls.mem_width > 25: cls.mem_size = 2
else: cls.mem_size = 1
cls.mem_meter = cls.width - (cls.disks_width if CONFIG.show_disks else 0) - (9 if cls.mem_size > 2 else 20)
if cls.mem_size == 1: cls.mem_meter += 6
if cls.mem_meter < 1: cls.mem_meter = 0
if CONFIG.mem_graphs:
cls.graph_height = round(((cls.height - (2 if cls.swap_on and not CONFIG.swap_disk else 1)) - (2 if cls.mem_size == 3 else 1) * item_height) / item_height)
if cls.graph_height == 0: cls.graph_height = 1
if cls.graph_height > 1: cls.mem_meter += 6
else:
cls.graph_height = 0
if CONFIG.show_disks:
cls.disk_meter = cls.width - cls.mem_width - 23
if cls.disks_width < 25:
cls.disk_meter += 10
if cls.disk_meter < 1: cls.disk_meter = 0
@classmethod
def _draw_bg(cls) -> str:
if not "mem" in cls.boxes: return ""
out: str = ""
out += f'{create_box(box=cls, line_color=THEME.mem_box)}'
if CONFIG.show_disks:
out += (f'{Mv.to(cls.y, cls.divider + 2)}{THEME.mem_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("d")}{THEME.title("isks")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}'
f'{Mv.to(cls.y, cls.divider)}{THEME.mem_box(Symbol.div_up)}'
f'{Mv.to(cls.y + cls.height - 1, cls.divider)}{THEME.mem_box(Symbol.div_down)}{THEME.div_line}'
f'{"".join(f"{Mv.to(cls.y + i, cls.divider)}{Symbol.v_line}" for i in range(1, cls.height - 1))}')
Key.mouse["d"] = [[cls.divider + 3 + i, cls.y] for i in range(5)]
else:
out += f'{Mv.to(cls.y, cls.x + cls.width - 9)}{THEME.mem_box(Symbol.title_left)}{THEME.hi_fg("d")}{THEME.title("isks")}{THEME.mem_box(Symbol.title_right)}'
Key.mouse["d"] = [[cls.x + cls.width - 8 + i, cls.y] for i in range(5)]
return out
@classmethod
def _draw_fg(cls):
if not "mem" in cls.boxes: return
mem = MemCollector
if mem.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
gbg: str = ""
gmv: str = ""
gli: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
if cls.resized or cls.redraw:
cls.redraw = True
cls._calc_size()
out_misc += cls._draw_bg()
Meters.mem = {}
Meters.swap = {}
Meters.disks_used = {}
Meters.disks_free = {}
if cls.mem_meter > 0:
for name in cls.mem_names:
if CONFIG.mem_graphs:
Meters.mem[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.vlist[name])
else:
Meters.mem[name] = Meter(mem.percent[name], cls.mem_meter, name)
if cls.swap_on:
for name in cls.swap_names:
if CONFIG.swap_disk and CONFIG.show_disks:
break
elif CONFIG.mem_graphs and not CONFIG.swap_disk:
Meters.swap[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.swap_vlist[name])
else:
Meters.swap[name] = Meter(mem.swap_percent[name], cls.mem_meter, name)
if CONFIG.show_disks and mem.disks:
if CONFIG.show_io_stat or CONFIG.io_mode:
d_graph: List[str] = []
d_no_graph: List[str] = []
l_vals: List[Tuple[str, int, str, bool]] = []
if CONFIG.io_mode:
cls.disks_io_h = (cls.height - 2 - len(mem.disks)) // max(1, len(mem.disks_io_dict))
if cls.disks_io_h < 2: cls.disks_io_h = 1 if CONFIG.io_graph_combined else 2
else:
cls.disks_io_h = 1
if CONFIG.io_graph_speeds and not cls.graph_speeds:
try:
cls.graph_speeds = { spds.split(":")[0] : int(spds.split(":")[1]) for spds in list(i.strip() for i in CONFIG.io_graph_speeds.split(","))}
except (KeyError, ValueError):
errlog.error("Wrong formatting in io_graph_speeds variable. Using defaults.")
for name in mem.disks.keys():
if name in mem.disks_io_dict:
d_graph.append(name)
else:
d_no_graph.append(name)
continue
if CONFIG.io_graph_combined or not CONFIG.io_mode:
l_vals = [("rw", cls.disks_io_h, "available", False)]
else:
l_vals = [("read", cls.disks_io_h // 2, "free", False), ("write", cls.disks_io_h // 2, "used", True)]
Graphs.disk_io[name] = {_name : Graph(width=cls.disks_width - (6 if not CONFIG.io_mode else 0), height=_height, color=THEME.gradient[_gradient],
data=mem.disks_io_dict[name][_name], invert=_invert, max_value=cls.graph_speeds.get(name, 10), no_zero=True)
for _name, _height, _gradient, _invert in l_vals}
cls.disks_io_order = d_graph + d_no_graph
if cls.disk_meter > 0:
for n, name in enumerate(mem.disks.keys()):
if n * 2 > h: break
Meters.disks_used[name] = Meter(mem.disks[name]["used_percent"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free[name] = Meter(mem.disks[name]["free_percent"], cls.disk_meter, "free")
if not "g" in Key.mouse:
Key.mouse["g"] = [[x + 8 + i, y-1] for i in range(5)]
out_misc += (f'{Mv.to(y-1, x + 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.mem_graphs else ""}'
f'{THEME.hi_fg("g")}{THEME.title("raph")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if CONFIG.show_disks:
if not "s" in Key.mouse:
Key.mouse["s"] = [[x + w - 6 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x + w - 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.swap_disk else ""}'
f'{THEME.hi_fg("s")}{THEME.title("wap")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if not "i" in Key.mouse:
Key.mouse["i"] = [[x + w - 10 + i, y-1] for i in range(2)]
out_misc += (f'{Mv.to(y-1, x + w - 11)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.io_mode else ""}'
f'{THEME.hi_fg("i")}{THEME.title("o")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if Collector.collect_interrupt: return
Draw.buffer("mem_misc", out_misc, only_save=True)
try:
#* Mem
cx = 1; cy = 1
out += f'{Mv.to(y, x+1)}{THEME.title}{Fx.b}Total:{mem.string["total"]:>{cls.mem_width - 9}}{Fx.ub}{THEME.main_fg}'
if cls.graph_height > 0:
gli = f'{Mv.l(2)}{THEME.mem_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (cls.mem_width - 1)}{"" if CONFIG.show_disks else THEME.mem_box}{Symbol.title_left}{Mv.l(cls.mem_width - 1)}{THEME.title}'
if cls.graph_height >= 2:
gbg = f'{Mv.l(1)}'
gmv = f'{Mv.l(cls.mem_width - 2)}{Mv.u(cls.graph_height - 1)}'
big_mem: bool = cls.mem_width > 21
for name in cls.mem_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.string[name])))}{Fx.trans(mem.string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{gmv}{str(mem.percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{mem.string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'
cy += 1 if not cls.graph_height else cls.graph_height
#* Swap
if cls.swap_on and CONFIG.show_swap and not CONFIG.swap_disk and mem.swap_string:
if h - cy > 5:
if cls.graph_height > 0: out += f'{Mv.to(y+cy, x+cx)}{gli}'
cy += 1
out += f'{Mv.to(y+cy, x+cx)}{THEME.title}{Fx.b}Swap:{mem.swap_string["total"]:>{cls.mem_width - 8}}{Fx.ub}{THEME.main_fg}'
cy += 1
for name in cls.swap_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.swap_string[name])))}{Fx.trans(mem.swap_string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{gmv}{str(mem.swap_percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{mem.swap_string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'; cy += 1 if not cls.graph_height else cls.graph_height
if cls.graph_height > 0 and not cy == h: out += f'{Mv.to(y+cy, x+cx)}{gli}'
#* Disks
if CONFIG.show_disks and mem.disks:
cx = x + cls.mem_width - 1; cy = 0
big_disk: bool = cls.disks_width >= 25
gli = f'{Mv.l(2)}{THEME.div_line}{Symbol.title_right}{Symbol.h_line * cls.disks_width}{THEME.mem_box}{Symbol.title_left}{Mv.l(cls.disks_width - 1)}'
if CONFIG.io_mode:
for name in cls.disks_io_order:
item = mem.disks[name]
io_item = mem.disks_io_dict.get(name, {})
if Collector.collect_interrupt: return
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += Fx.trans(f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(str(item["used_percent"])) // 2) - 2)}{Fx.ub}{THEME.main_fg}{item["used_percent"]}%')
cy += 1
if io_item:
if cy > h - 1: break
if CONFIG.io_graph_combined:
if cls.disks_io_h <= 1:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io"] or "RW"}')
cy += cls.disks_io_h
else:
if cls.disks_io_h <= 3:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}{Mv.to(y+cy+1, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["read"](None if cls.redraw else mem.disks_io_dict[name]["read"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io_r"] or "R"}')
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy, x+cx-1)}{Graphs.disk_io[name]["write"](None if cls.redraw else mem.disks_io_dict[name]["write"][-1])}'
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy-1, x+cx-1)}{THEME.main_fg}{item["io_w"] or "W"}'
else:
for name, item in mem.disks.items():
if Collector.collect_interrupt: return
if not name in Meters.disks_used:
continue
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(item["io"]) // 2) - 2)}{Fx.ub}{THEME.main_fg}{Fx.trans(item["io"])}'
cy += 1
if cy > h - 1: break
if CONFIG.show_io_stat and name in Graphs.disk_io:
out += f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{Fx.ub}{" IO: " if big_disk else " IO " + Mv.l(2)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
if not big_disk and item["io"]:
out += f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{THEME.main_fg}{item["io"]}'
cy += 1
if cy > h - 1: break
out += Mv.to(y+cy, x+cx) + (f'Used:{str(item["used_percent"]) + "%":>4} ' if big_disk else "U ")
out += f'{Meters.disks_used[name](None if cls.resized else mem.disks[name]["used_percent"])}{item["used"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 3 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1:
if cy > h - 1: break
out += Mv.to(y+cy, x+cx)
out += f'Free:{str(item["free_percent"]) + "%":>4} ' if big_disk else f'{"F "}'
out += f'{Meters.disks_free[name](None if cls.resized else mem.disks[name]["free_percent"])}{item["free"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 4 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1: cy += 1
except (KeyError, TypeError):
return
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = False
class NetBox(Box, SubBox):
name = "net"
num = 3
height_p = 30
width_p = 45
min_w: int = 36
min_h: int = 6
x = 1
y = 1
resized: bool = True
redraw: bool = True
graph_height: Dict[str, int] = {}
symbols: Dict[str, str] = {"download" : "▼", "upload" : "▲"}
buffer: str = "net"
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "net" in cls.boxes:
cls.width = Term.width
return
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
cls.width = round(Term.width * width_p / 100)
cls.height = Term.height - Box._b_cpu_h - Box._b_mem_h
cls.y = Term.height - cls.height + 1
cls.box_width = 27 if cls.width > 45 else 19
cls.box_height = 9 if cls.height > 10 else cls.height - 2
cls.box_x = cls.width - cls.box_width - 1
cls.box_y = cls.y + ((cls.height - 2) // 2) - cls.box_height // 2 + 1
cls.graph_height["download"] = round((cls.height - 2) / 2)
cls.graph_height["upload"] = cls.height - 2 - cls.graph_height["download"]
cls.redraw = True
@classmethod
def _draw_bg(cls) -> str:
if not "net" in cls.boxes: return ""
return f'{create_box(box=cls, line_color=THEME.net_box)}\
{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title="Download", title2="Upload")}'
@classmethod
def _draw_fg(cls):
if not "net" in cls.boxes: return
net = NetCollector
if net.redraw: cls.redraw = True
if not net.nic: return
out: str = ""
out_misc: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
reset: bool = bool(net.stats[net.nic]["download"]["offset"])
if cls.resized or cls.redraw:
out_misc += cls._draw_bg()
if not "b" in Key.mouse:
Key.mouse["b"] = [[x+w - len(net.nic[:10]) - 9 + i, y-1] for i in range(4)]
Key.mouse["n"] = [[x+w - 5 + i, y-1] for i in range(4)]
Key.mouse["z"] = [[x+w - len(net.nic[:10]) - 14 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 25)}{THEME.net_box}{Symbol.h_line * (10 - len(net.nic[:10]))}{Symbol.title_left}{Fx.b if reset else ""}{THEME.hi_fg("z")}{THEME.title("ero")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}'
f'{THEME.net_box}{Symbol.title_left}{Fx.b}{THEME.hi_fg("<b")} {THEME.title(net.nic[:10])} {THEME.hi_fg("n>")}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 6:
if not "a" in Key.mouse: Key.mouse["a"] = [[x+w - 20 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 21 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if net.auto_min else ""}{THEME.hi_fg("a")}{THEME.title("uto")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 13:
if not "y" in Key.mouse: Key.mouse["y"] = [[x+w - 26 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 27 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if CONFIG.net_sync else ""}{THEME.title("s")}{THEME.hi_fg("y")}{THEME.title("nc")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if net.address and w - len(net.nic[:10]) - len(net.address) - 20 > 15:
out_misc += (f'{Mv.to(y-1, x+7)}{THEME.net_box(Symbol.title_left)}{Fx.b}{THEME.title(net.address)}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
Draw.buffer("net_misc", out_misc, only_save=True)
cy = 0
for direction in ["download", "upload"]:
strings = net.strings[net.nic][direction]
stats = net.stats[net.nic][direction]
if cls.redraw: stats["redraw"] = True
if stats["redraw"] or cls.resized:
Graphs.net[direction] = Graph(w - bw - 3, cls.graph_height[direction], THEME.gradient[direction], stats["speed"], max_value=net.sync_top if CONFIG.net_sync else stats["graph_top"],
invert=direction != "download", color_max_value=net.net_min.get(direction) if CONFIG.net_color_fixed else None, round_up_low=True)
out += f'{Mv.to(y if direction == "download" else y + cls.graph_height["download"], x)}{Graphs.net[direction](None if stats["redraw"] else stats["speed"][-1])}'
out += (f'{Mv.to(by+cy, bx)}{THEME.main_fg}{cls.symbols[direction]} {strings["byte_ps"]:<10.10}' +
("" if bw < 20 else f'{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["bit_ps"] + ")":>12.12}'))
cy += 1 if bh != 3 else 2
if bh >= 6:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Top:"}{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["top"] + ")":>12.12}'
cy += 1
if bh >= 4:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Total:"}{Mv.to(by+cy, bx+bw - 10)}{strings["total"]:>10.10}'
if bh > 2 and bh % 2: cy += 2
else: cy += 1
stats["redraw"] = False
out += (f'{Mv.to(y, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["download"]["graph_top"])}'
f'{Mv.to(y+h-1, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["upload"]["graph_top"])}')
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = False
class ProcBox(Box):
name = "proc"
num = 4
height_p = 68
width_p = 55
min_w: int = 44
min_h: int = 16
x = 1
y = 1
current_y: int = 0
current_h: int = 0
select_max: int = 0
selected: int = 0
selected_pid: int = 0
last_selection: int = 0
filtering: bool = False
moved: bool = False
start: int = 1
count: int = 0
s_len: int = 0
detailed: bool = False
detailed_x: int = 0
detailed_y: int = 0
detailed_width: int = 0
detailed_height: int = 8
resized: bool = True
redraw: bool = True
buffer: str = "proc"
pid_counter: Dict[int, int] = {}
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "proc" in cls.boxes:
cls.width = Term.width
return
width_p: int; height_p: int
if not "net" in cls.boxes and not "mem" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
cls.x = Term.width - cls.width + 1
cls.y = Box._b_cpu_h + 1
cls.current_y = cls.y
cls.current_h = cls.height
cls.select_max = cls.height - 3
cls.redraw = True
cls.resized = True
@classmethod
def _draw_bg(cls) -> str:
if not "proc" in cls.boxes: return ""
return create_box(box=cls, line_color=THEME.proc_box)
@classmethod
def selector(cls, key: str, mouse_pos: Tuple[int, int] = (0, 0)):
old: Tuple[int, int] = (cls.start, cls.selected)
new_sel: int
if key in ["up", "k"]:
if cls.selected == 1 and cls.start > 1:
cls.start -= 1
elif cls.selected == 1:
cls.selected = 0
elif cls.selected > 1:
cls.selected -= 1
elif key in ["down", "j"]:
if cls.selected == 0 and ProcCollector.detailed and cls.last_selection:
cls.selected = cls.last_selection
cls.last_selection = 0
if cls.selected == cls.select_max and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 1
elif cls.selected < cls.select_max:
cls.selected += 1
elif key == "mouse_scroll_up" and cls.start > 1:
cls.start -= 5
elif key == "mouse_scroll_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 5
elif key == "page_up" and cls.start > 1:
cls.start -= cls.select_max
elif key == "page_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += cls.select_max
elif key == "home":
if cls.start > 1: cls.start = 1
elif cls.selected > 0: cls.selected = 0
elif key == "end":
if cls.start < ProcCollector.num_procs - cls.select_max + 1: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.selected < cls.select_max: cls.selected = cls.select_max
elif key == "mouse_click":
if mouse_pos[0] > cls.x + cls.width - 4 and cls.current_y + 1 < mouse_pos[1] < cls.current_y + 1 + cls.select_max + 1:
if mouse_pos[1] == cls.current_y + 2:
cls.start = 1
elif mouse_pos[1] == cls.current_y + 1 + cls.select_max:
cls.start = ProcCollector.num_procs - cls.select_max + 1
else:
cls.start = round((mouse_pos[1] - cls.current_y) * ((ProcCollector.num_procs - cls.select_max - 2) / (cls.select_max - 2)))
else:
new_sel = mouse_pos[1] - cls.current_y - 1 if mouse_pos[1] >= cls.current_y - 1 else 0
if new_sel > 0 and new_sel == cls.selected:
Key.list.insert(0, "enter")
return
elif new_sel > 0 and new_sel != cls.selected:
if cls.last_selection: cls.last_selection = 0
cls.selected = new_sel
elif key == "mouse_unselect":
cls.selected = 0
if cls.start > ProcCollector.num_procs - cls.select_max + 1 and ProcCollector.num_procs > cls.select_max: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.start > ProcCollector.num_procs: cls.start = ProcCollector.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > ProcCollector.num_procs and ProcCollector.num_procs < cls.select_max: cls.selected = ProcCollector.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
if old != (cls.start, cls.selected):
cls.moved = True
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True, only_draw=True)
@classmethod
def _draw_fg(cls):
if not "proc" in cls.boxes: return
proc = ProcCollector
if proc.proc_interrupt: return
if proc.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
n: int = 0
x, y, w, h = cls.x + 1, cls.current_y + 1, cls.width - 2, cls.current_h - 2
prog_len: int; arg_len: int; val: int; c_color: str; m_color: str; t_color: str; sort_pos: int; tree_len: int; is_selected: bool; calc: int
dgx: int; dgw: int; dx: int; dw: int; dy: int
l_count: int = 0
scroll_pos: int = 0
killed: bool = True
indent: str = ""
offset: int = 0
tr_show: bool = True
usr_show: bool = True
vals: List[str]
g_color: str = ""
s_len: int = 0
if proc.search_filter: s_len = len(proc.search_filter[:10])
loc_string: str = f'{cls.start + cls.selected - 1}/{proc.num_procs}'
end: str = ""
if proc.detailed:
dgx, dgw = x, w // 3
dw = w - dgw - 1
if dw > 120:
dw = 120
dgw = w - 121
dx = x + dgw + 2
dy = cls.y + 1
if w > 67:
arg_len = w - 53 - (1 if proc.num_procs > cls.select_max else 0)
prog_len = 15
else:
arg_len = 0
prog_len = w - 38 - (1 if proc.num_procs > cls.select_max else 0)
if prog_len < 15:
tr_show = False
prog_len += 5
if prog_len < 12:
usr_show = False
prog_len += 9
if CONFIG.proc_tree:
tree_len = arg_len + prog_len + 6
arg_len = 0
#* Buttons and titles only redrawn if needed
if cls.resized or cls.redraw:
s_len += len(CONFIG.proc_sorting)
if cls.resized or s_len != cls.s_len or proc.detailed:
cls.s_len = s_len
for k in ["e", "r", "c", "T", "K", "I", "enter", "left", " ", "f", "delete"]:
if k in Key.mouse: del Key.mouse[k]
if proc.detailed:
killed = proc.details.get("killed", False)
main = THEME.main_fg if cls.selected == 0 and not killed else THEME.inactive_fg
hi = THEME.hi_fg if cls.selected == 0 and not killed else THEME.inactive_fg
title = THEME.title if cls.selected == 0 and not killed else THEME.inactive_fg
if cls.current_y != cls.y + 8 or cls.resized or Graphs.detailed_cpu is NotImplemented:
cls.current_y = cls.y + 8
cls.current_h = cls.height - 8
for i in range(7): out_misc += f'{Mv.to(dy+i, x)}{" " * w}'
out_misc += (f'{Mv.to(dy+7, x-1)}{THEME.proc_box}{Symbol.title_right}{Symbol.h_line*w}{Symbol.title_left}'
f'{Mv.to(dy+7, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}{THEME.div_line}')
for i in range(7):
out_misc += f'{Mv.to(dy + i, dgx + dgw + 1)}{Symbol.v_line}'
out_misc += (f'{Mv.to(dy-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(dy-1, dgx + dgw + 1)}{Symbol.div_up}'
f'{Mv.to(dy-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(str(proc.details["pid"]))}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(proc.details["name"][:(dgw - 11)])}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if cls.selected == 0:
Key.mouse["enter"] = [[dx+dw-10 + i, dy-1] for i in range(7)]
if cls.selected == 0 and not killed:
Key.mouse["T"] = [[dx+2 + i, dy-1] for i in range(9)]
out_misc += (f'{Mv.to(dy-1, dx+dw - 11)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{title if cls.selected > 0 else THEME.title}close{Fx.ub} {main if cls.selected > 0 else THEME.main_fg}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(dy-1, dx+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if dw > 28:
if cls.selected == 0 and not killed and not "K" in Key.mouse: Key.mouse["K"] = [[dx + 13 + i, dy-1] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if dw > 39:
if cls.selected == 0 and not killed and not "I" in Key.mouse: Key.mouse["I"] = [[dx + 19 + i, dy-1] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if Graphs.detailed_cpu is NotImplemented or cls.resized:
Graphs.detailed_cpu = Graph(dgw+1, 7, THEME.gradient["cpu"], proc.details_cpu)
Graphs.detailed_mem = Graph(dw // 3, 1, None, proc.details_mem)
cls.select_max = cls.height - 11
y = cls.y + 9
h = cls.height - 10
else:
if cls.current_y != cls.y or cls.resized:
cls.current_y = cls.y
cls.current_h = cls.height
y, h = cls.y + 1, cls.height - 2
out_misc += (f'{Mv.to(y-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(y-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(y+7, x-1)}{THEME.proc_box(Symbol.v_line)}{Mv.r(w)}{THEME.proc_box(Symbol.v_line)}')
cls.select_max = cls.height - 3
sort_pos = x + w - len(CONFIG.proc_sorting) - 7
if not "left" in Key.mouse:
Key.mouse["left"] = [[sort_pos + i, y-1] for i in range(3)]
Key.mouse["right"] = [[sort_pos + len(CONFIG.proc_sorting) + 3 + i, y-1] for i in range(3)]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.h_line * (w - 9))}' +
("" if not proc.detailed else f"{Mv.to(dy+7, dgx + dgw + 1)}{THEME.proc_box(Symbol.div_down)}") +
f'{Mv.to(y-1, sort_pos)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("<")} {THEME.title(CONFIG.proc_sorting)} '
f'{THEME.hi_fg(">")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 29 + s_len:
if not "e" in Key.mouse: Key.mouse["e"] = [[sort_pos - 5 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, sort_pos - 6)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_tree else ""}'
f'{THEME.title("tre")}{THEME.hi_fg("e")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 37 + s_len:
if not "r" in Key.mouse: Key.mouse["r"] = [[sort_pos - 14 + i, y-1] for i in range(7)]
out_misc += (f'{Mv.to(y-1, sort_pos - 15)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_reversed else ""}'
f'{THEME.hi_fg("r")}{THEME.title("everse")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 47 + s_len:
if not "c" in Key.mouse: Key.mouse["c"] = [[sort_pos - 24 + i, y-1] for i in range(8)]
out_misc += (f'{Mv.to(y-1, sort_pos - 25)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_per_core else ""}'
f'{THEME.title("per-")}{THEME.hi_fg("c")}{THEME.title("ore")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if not "f" in Key.mouse or cls.resized: Key.mouse["f"] = [[x+6 + i, y-1] for i in range(6 if not proc.search_filter else 2 + len(proc.search_filter[-10:]))]
if proc.search_filter:
if not "delete" in Key.mouse: Key.mouse["delete"] = [[x+12 + len(proc.search_filter[-10:]) + i, y-1] for i in range(3)]
elif "delete" in Key.mouse:
del Key.mouse["delete"]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.title_left)}{Fx.b if cls.filtering or proc.search_filter else ""}{THEME.hi_fg("F" if cls.filtering and proc.case_sensitive else "f")}{THEME.title}' +
("ilter" if not proc.search_filter and not cls.filtering else f' {proc.search_filter[-(10 if w < 83 else w - 74):]}{(Fx.bl + "█" + Fx.ubl) if cls.filtering else THEME.hi_fg(" del")}') +
f'{THEME.proc_box(Symbol.title_right)}')
main = THEME.inactive_fg if cls.selected == 0 else THEME.main_fg
hi = THEME.inactive_fg if cls.selected == 0 else THEME.hi_fg
title = THEME.inactive_fg if cls.selected == 0 else THEME.title
out_misc += (f'{Mv.to(y+h, x + 1)}{THEME.proc_box}{Symbol.h_line*(w-4)}'
f'{Mv.to(y+h, x+1)}{THEME.proc_box(Symbol.title_left)}{main}{Symbol.up} {Fx.b}{THEME.main_fg("select")} {Fx.ub}'
f'{THEME.inactive_fg if cls.selected == cls.select_max else THEME.main_fg}{Symbol.down}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{title}{Fx.b}info {Fx.ub}{main}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}')
if not "enter" in Key.mouse: Key.mouse["enter"] = [[x + 14 + i, y+h] for i in range(6)]
if w - len(loc_string) > 34:
if not "T" in Key.mouse: Key.mouse["T"] = [[x + 22 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 40:
if not "K" in Key.mouse: Key.mouse["K"] = [[x + 33 + i, y+h] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 51:
if not "I" in Key.mouse: Key.mouse["I"] = [[x + 39 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if CONFIG.proc_tree and w - len(loc_string) > 65:
if not " " in Key.mouse: Key.mouse[" "] = [[x + 50 + i, y+h] for i in range(12)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}spc {title}collapse{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
#* Processes labels
selected: str = CONFIG.proc_sorting
label: str
if selected == "memory": selected = "mem"
if selected == "threads" and not CONFIG.proc_tree and not arg_len: selected = "tr"
if CONFIG.proc_tree:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{" Tree:":<{tree_len-2}}' + (f'{"Threads: ":<9}' if tr_show else " "*4) + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected in ["pid", "program", "arguments"]: selected = "tree"
else:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{"Pid:":>7} {"Program:" if prog_len > 8 else "Prg:":<{prog_len}}' + (f'{"Arguments:":<{arg_len-4}}' if arg_len else "") +
((f'{"Threads:":<9}' if arg_len else f'{"Tr:":^5}') if tr_show else "") + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected == "program" and prog_len <= 8: selected = "prg"
selected = selected.split(" ")[0].capitalize()
if CONFIG.proc_mem_bytes: label = label.replace("Mem%", "MemB")
label = label.replace(selected, f'{Fx.u}{selected}{Fx.uu}')
out_misc += label
Draw.buffer("proc_misc", out_misc, only_save=True)
#* Detailed box draw
if proc.detailed:
if proc.details["status"] == psutil.STATUS_RUNNING: stat_color = Fx.b
elif proc.details["status"] in [psutil.STATUS_DEAD, psutil.STATUS_STOPPED, psutil.STATUS_ZOMBIE]: stat_color = f'{THEME.inactive_fg}'
else: stat_color = ""
expand = proc.expand
iw = (dw - 3) // (4 + expand)
iw2 = iw - 1
out += (f'{Mv.to(dy, dgx)}{Graphs.detailed_cpu(None if cls.moved or proc.details["killed"] else proc.details_cpu[-1])}'
f'{Mv.to(dy, dgx)}{THEME.title}{Fx.b}{0 if proc.details["killed"] else proc.details["cpu_percent"]}%{Mv.r(1)}{"" if SYSTEM == "MacOS" else (("C" if dgw < 20 else "Core") + str(proc.details["cpu_num"]))}')
for i, l in enumerate(["C", "P", "U"]):
out += f'{Mv.to(dy+2+i, dgx)}{l}'
for i, l in enumerate(["C", "M", "D"]):
out += f'{Mv.to(dy+4+i, dx+1)}{l}'
out += (f'{Mv.to(dy, dx+1)} {"Status:":^{iw}.{iw2}}{"Elapsed:":^{iw}.{iw2}}' +
(f'{"Parent:":^{iw}.{iw2}}' if dw > 28 else "") + (f'{"User:":^{iw}.{iw2}}' if dw > 38 else "") +
(f'{"Threads:":^{iw}.{iw2}}' if expand > 0 else "") + (f'{"Nice:":^{iw}.{iw2}}' if expand > 1 else "") +
(f'{"IO Read:":^{iw}.{iw2}}' if expand > 2 else "") + (f'{"IO Write:":^{iw}.{iw2}}' if expand > 3 else "") +
(f'{"TTY:":^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+1, dx+1)}{Fx.ub}{THEME.main_fg}{stat_color}{proc.details["status"]:^{iw}.{iw2}}{Fx.ub}{THEME.main_fg}{proc.details["uptime"]:^{iw}.{iw2}} ' +
(f'{proc.details["parent_name"]:^{iw}.{iw2}}' if dw > 28 else "") + (f'{proc.details["username"]:^{iw}.{iw2}}' if dw > 38 else "") +
(f'{proc.details["threads"]:^{iw}.{iw2}}' if expand > 0 else "") + (f'{proc.details["nice"]:^{iw}.{iw2}}' if expand > 1 else "") +
(f'{proc.details["io_read"]:^{iw}.{iw2}}' if expand > 2 else "") + (f'{proc.details["io_write"]:^{iw}.{iw2}}' if expand > 3 else "") +
(f'{proc.details["terminal"][-(iw2):]:^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+3, dx)}{THEME.title}{Fx.b}{("Memory: " if dw > 42 else "M:") + str(round(proc.details["memory_percent"], 1)) + "%":>{dw//3-1}}{Fx.ub} {THEME.inactive_fg}{"⡀"*(dw//3)}'
f'{Mv.l(dw//3)}{THEME.proc_misc}{Graphs.detailed_mem(None if cls.moved else proc.details_mem[-1])} '
f'{THEME.title}{Fx.b}{proc.details["memory_bytes"]:.{dw//3 - 2}}{THEME.main_fg}{Fx.ub}')
cy = dy + (4 if len(proc.details["cmdline"]) > dw - 5 else 5)
for i in range(ceil(len(proc.details["cmdline"]) / (dw - 5))):
out += f'{Mv.to(cy+i, dx + 3)}{proc.details["cmdline"][((dw-5)*i):][:(dw-5)]:{"^" if i == 0 else "<"}{dw-5}}'
if i == 2: break
#* Checking for selection out of bounds
if cls.start > proc.num_procs - cls.select_max + 1 and proc.num_procs > cls.select_max: cls.start = proc.num_procs - cls.select_max + 1
elif cls.start > proc.num_procs: cls.start = proc.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > proc.num_procs and proc.num_procs < cls.select_max: cls.selected = proc.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
#* Start iteration over all processes and info
cy = 1
for n, (pid, items) in enumerate(proc.processes.items(), start=1):
if n < cls.start: continue
l_count += 1
if l_count == cls.selected:
is_selected = True
cls.selected_pid = pid
else: is_selected = False
indent, name, cmd, threads, username, mem, mem_b, cpu = [items.get(v, d) for v, d in [("indent", ""), ("name", ""), ("cmd", ""), ("threads", 0), ("username", "?"), ("mem", 0.0), ("mem_b", 0), ("cpu", 0.0)]]
if CONFIG.proc_tree:
arg_len = 0
offset = tree_len - len(f'{indent}{pid}')
if offset < 1: offset = 0
indent = f'{indent:.{tree_len - len(str(pid))}}'
if offset - len(name) > 12:
cmd = cmd.split(" ")[0].split("/")[-1]
if not cmd.startswith(name):
offset = len(name)
arg_len = tree_len - len(f'{indent}{pid} {name} ') + 2
cmd = f'({cmd[:(arg_len-4)]})'
else:
offset = prog_len - 1
if cpu > 1.0 or pid in Graphs.pid_cpu:
if pid not in Graphs.pid_cpu:
Graphs.pid_cpu[pid] = Graph(5, 1, None, [0])
cls.pid_counter[pid] = 0
elif cpu < 1.0:
cls.pid_counter[pid] += 1
if cls.pid_counter[pid] > 10:
del cls.pid_counter[pid], Graphs.pid_cpu[pid]
else:
cls.pid_counter[pid] = 0
end = f'{THEME.main_fg}{Fx.ub}' if CONFIG.proc_colors else Fx.ub
if cls.selected > cy: calc = cls.selected - cy
elif 0 < cls.selected <= cy: calc = cy - cls.selected
else: calc = cy
if CONFIG.proc_colors and not is_selected:
vals = []
for v in [int(cpu), int(mem), int(threads // 3)]:
if CONFIG.proc_gradient:
val = ((v if v <= 100 else 100) + 100) - calc * 100 // cls.select_max
vals += [f'{THEME.gradient["proc_color" if val < 100 else "process"][val if val < 100 else val - 100]}']
else:
vals += [f'{THEME.gradient["process"][v if v <= 100 else 100]}']
c_color, m_color, t_color = vals
else:
c_color = m_color = t_color = Fx.b
if CONFIG.proc_gradient and not is_selected:
g_color = f'{THEME.gradient["proc"][calc * 100 // cls.select_max]}'
if is_selected:
c_color = m_color = t_color = g_color = end = ""
out += f'{THEME.selected_bg}{THEME.selected_fg}{Fx.b}'
#* Creates one line for a process with all gathered information
out += (f'{Mv.to(y+cy, x)}{g_color}{indent}{pid:>{(1 if CONFIG.proc_tree else 7)}} ' +
f'{c_color}{name:<{offset}.{offset}} {end}' +
(f'{g_color}{cmd:<{arg_len}.{arg_len-1}}' if arg_len else "") +
(t_color + (f'{threads:>4} ' if threads < 1000 else "999> ") + end if tr_show else "") +
(g_color + (f'{username:<9.9}' if len(username) < 10 else f'{username[:8]:<8}+') if usr_show else "") +
m_color + ((f'{mem:>4.1f}' if mem < 100 else f'{mem:>4.0f} ') if not CONFIG.proc_mem_bytes else f'{floating_humanizer(mem_b, short=True):>4.4}') + end +
f' {THEME.inactive_fg}{"⡀"*5}{THEME.main_fg}{g_color}{c_color}' + (f' {cpu:>4.1f} ' if cpu < 100 else f'{cpu:>5.0f} ') + end +
(" " if proc.num_procs > cls.select_max else ""))
#* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates
if pid in Graphs.pid_cpu:
out += f'{Mv.to(y+cy, x + w - (12 if proc.num_procs > cls.select_max else 11))}{c_color if CONFIG.proc_colors else THEME.proc_misc}{Graphs.pid_cpu[pid](None if cls.moved else round(cpu))}{THEME.main_fg}'
if is_selected: out += f'{Fx.ub}{Term.fg}{Term.bg}{Mv.to(y+cy, x + w - 1)}{" " if proc.num_procs > cls.select_max else ""}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+cy+i, x)}{" " * w}'
#* Draw scrollbar if needed
if proc.num_procs > cls.select_max:
if cls.resized:
Key.mouse["mouse_scroll_up"] = [[x+w-2+i, y] for i in range(3)]
Key.mouse["mouse_scroll_down"] = [[x+w-2+i, y+h-1] for i in range(3)]
scroll_pos = round(cls.start * (cls.select_max - 2) / (proc.num_procs - (cls.select_max - 2)))
if scroll_pos < 0 or cls.start == 1: scroll_pos = 0
elif scroll_pos > h - 3 or cls.start >= proc.num_procs - cls.select_max: scroll_pos = h - 3
out += (f'{Mv.to(y, x+w-1)}{Fx.b}{THEME.main_fg}↑{Mv.to(y+h-1, x+w-1)}↓{Fx.ub}'
f'{Mv.to(y+1+scroll_pos, x+w-1)}█')
elif "scroll_up" in Key.mouse:
del Key.mouse["scroll_up"], Key.mouse["scroll_down"]
#* Draw current selection and number of processes
out += (f'{Mv.to(y+h, x + w - 3 - len(loc_string))}{THEME.proc_box}{Symbol.title_left}{THEME.title}'
f'{Fx.b}{loc_string}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
#* Clean up dead processes graphs and counters
cls.count += 1
if cls.count == 100:
cls.count == 0
for p in list(cls.pid_counter):
if not psutil.pid_exists(p):
del cls.pid_counter[p], Graphs.pid_cpu[p]
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = cls.moved = False
class Collector:
'''Data collector master class
* .start(): Starts collector thread
* .stop(): Stops collector thread
* .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run'''
stopping: bool = False
started: bool = False
draw_now: bool = False
redraw: bool = False
only_draw: bool = False
thread: threading.Thread
collect_run = threading.Event()
collect_idle = threading.Event()
collect_idle.set()
collect_done = threading.Event()
collect_queue: List = []
collect_interrupt: bool = False
proc_interrupt: bool = False
use_draw_list: bool = False
proc_counter: int = 1
@classmethod
def start(cls):
cls.stopping = False
cls.thread = threading.Thread(target=cls._runner, args=())
cls.thread.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.thread.is_alive():
cls.stopping = True
cls.started = False
cls.collect_queue = []
cls.collect_idle.set()
cls.collect_done.set()
try:
cls.thread.join()
except:
pass
@classmethod
def _runner(cls):
'''This is meant to run in it's own thread, collecting and drawing when collect_run is set'''
draw_buffers: List[str] = []
debugged: bool = False
try:
while not cls.stopping:
if CONFIG.draw_clock and CONFIG.update_ms != 1000: Box.draw_clock()
cls.collect_run.wait(0.1)
if not cls.collect_run.is_set():
continue
draw_buffers = []
cls.collect_interrupt = False
cls.collect_run.clear()
cls.collect_idle.clear()
cls.collect_done.clear()
if DEBUG and not debugged: TimeIt.start("Collect and draw")
while cls.collect_queue:
collector = cls.collect_queue.pop()
if not cls.only_draw:
collector._collect()
collector._draw()
if cls.use_draw_list: draw_buffers.append(collector.buffer)
if cls.collect_interrupt: break
if DEBUG and not debugged: TimeIt.stop("Collect and draw"); debugged = True
if cls.draw_now and not Menu.active and not cls.collect_interrupt:
if cls.use_draw_list: Draw.out(*draw_buffers)
else: Draw.out()
if CONFIG.draw_clock and CONFIG.update_ms == 1000: Box.draw_clock()
cls.collect_idle.set()
cls.collect_done.set()
except Exception as e:
errlog.exception(f'Data collection thread failed with exception: {e}')
cls.collect_idle.set()
cls.collect_done.set()
clean_quit(1, thread=True)
@classmethod
def collect(cls, *collectors, draw_now: bool = True, interrupt: bool = False, proc_interrupt: bool = False, redraw: bool = False, only_draw: bool = False):
'''Setup collect queue for _runner'''
cls.collect_interrupt = interrupt
cls.proc_interrupt = proc_interrupt
cls.collect_idle.wait()
cls.collect_interrupt = False
cls.proc_interrupt = False
cls.use_draw_list = False
cls.draw_now = draw_now
cls.redraw = redraw
cls.only_draw = only_draw
if collectors:
cls.collect_queue = [*collectors]
cls.use_draw_list = True
if ProcCollector in cls.collect_queue:
cls.proc_counter = 1
else:
cls.collect_queue = list(cls.__subclasses__())
if CONFIG.proc_update_mult > 1:
if cls.proc_counter > 1:
cls.collect_queue.remove(ProcCollector)
if cls.proc_counter == CONFIG.proc_update_mult:
cls.proc_counter = 0
cls.proc_counter += 1
cls.collect_run.set()
class CpuCollector(Collector):
'''Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps'''
cpu_usage: List[List[int]] = []
cpu_upper: List[int] = []
cpu_lower: List[int] = []
cpu_temp: List[List[int]] = []
cpu_temp_high: int = 0
cpu_temp_crit: int = 0
for _ in range(THREADS + 1):
cpu_usage.append([])
cpu_temp.append([])
freq_error: bool = False
cpu_freq: int = 0
load_avg: List[float] = []
uptime: str = ""
buffer: str = CpuBox.buffer
sensor_method: str = ""
got_sensors: bool = False
sensor_swap: bool = False
cpu_temp_only: bool = False
@classmethod
def get_sensors(cls):
'''Check if we can get cpu temps and return method of getting temps'''
cls.sensor_method = ""
if SYSTEM == "MacOS":
try:
if which("coretemp") and subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip().replace("-", "").isdigit():
cls.sensor_method = "coretemp"
elif which("osx-cpu-temp") and subprocess.check_output("osx-cpu-temp", universal_newlines=True).rstrip().endswith("°C"):
cls.sensor_method = "osx-cpu-temp"
except: pass
elif CONFIG.cpu_sensor != "Auto" and CONFIG.cpu_sensor in CONFIG.cpu_sensors:
cls.sensor_method = "psutil"
elif hasattr(psutil, "sensors_temperatures"):
try:
temps = psutil.sensors_temperatures()
if temps:
for name, entries in temps.items():
if name.lower().startswith("cpu"):
cls.sensor_method = "psutil"
break
for entry in entries:
if entry.label.startswith(("Package", "Core 0", "Tdie", "CPU")):
cls.sensor_method = "psutil"
break
except: pass
if not cls.sensor_method and SYSTEM == "Linux":
try:
if which("vcgencmd") and subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip().endswith("'C"):
cls.sensor_method = "vcgencmd"
except: pass
cls.got_sensors = bool(cls.sensor_method)
@classmethod
def _collect(cls):
cls.cpu_usage[0].append(ceil(psutil.cpu_percent(percpu=False)))
if len(cls.cpu_usage[0]) > Term.width * 4:
del cls.cpu_usage[0][0]
cpu_times_percent = psutil.cpu_times_percent()
for x in ["upper", "lower"]:
if getattr(CONFIG, "cpu_graph_" + x) == "total":
setattr(cls, "cpu_" + x, cls.cpu_usage[0])
else:
getattr(cls, "cpu_" + x).append(ceil(getattr(cpu_times_percent, getattr(CONFIG, "cpu_graph_" + x))))
if len(getattr(cls, "cpu_" + x)) > Term.width * 4:
del getattr(cls, "cpu_" + x)[0]
for n, thread in enumerate(psutil.cpu_percent(percpu=True), start=1):
cls.cpu_usage[n].append(ceil(thread))
if len(cls.cpu_usage[n]) > Term.width * 2:
del cls.cpu_usage[n][0]
try:
if hasattr(psutil.cpu_freq(), "current"):
cls.cpu_freq = round(psutil.cpu_freq().current)
except Exception as e:
if not cls.freq_error:
cls.freq_error = True
errlog.error("Exception while getting cpu frequency!")
errlog.exception(f'{e}')
else:
pass
cls.load_avg = [round(lavg, 2) for lavg in psutil.getloadavg()]
cls.uptime = str(timedelta(seconds=round(time()-psutil.boot_time(),0)))[:-3].replace(" days,", "d").replace(" day,", "d")
if CONFIG.check_temp and cls.got_sensors:
cls._collect_temps()
@classmethod
def _collect_temps(cls):
temp: int = 1000
cores: List[int] = []
core_dict: Dict[int, int] = {}
entry_int: int = 0
cpu_type: str = ""
c_max: int = 0
s_name: str = "_-_"
s_label: str = "_-_"
if cls.sensor_method == "psutil":
try:
if CONFIG.cpu_sensor != "Auto":
s_name, s_label = CONFIG.cpu_sensor.split(":", 1)
for name, entries in psutil.sensors_temperatures().items():
for num, entry in enumerate(entries, 1):
if name == s_name and (entry.label == s_label or str(num) == s_label):
if entry.label.startswith("Package"):
cpu_type = "intel"
elif entry.label.startswith("Tdie"):
cpu_type = "ryzen"
else:
cpu_type = "other"
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
temp = round(entry.current)
elif entry.label.startswith(("Package", "Tdie")) and cpu_type in ["", "other"] and s_name == "_-_" and hasattr(entry, "current"):
if not cls.cpu_temp_high or cls.sensor_swap or cpu_type == "other":
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
cpu_type = "intel" if entry.label.startswith("Package") else "ryzen"
temp = round(entry.current)
elif (entry.label.startswith(("Core", "Tccd", "CPU")) or (name.lower().startswith("cpu") and not entry.label)) and hasattr(entry, "current"):
if entry.label.startswith(("Core", "Tccd")):
entry_int = int(entry.label.replace("Core", "").replace("Tccd", ""))
if entry_int in core_dict and cpu_type != "ryzen":
if c_max == 0:
c_max = max(core_dict) + 1
if c_max < THREADS // 2 and (entry_int + c_max) not in core_dict:
core_dict[(entry_int + c_max)] = round(entry.current)
continue
elif entry_int in core_dict:
continue
core_dict[entry_int] = round(entry.current)
continue
elif cpu_type in ["intel", "ryzen"]:
continue
if not cpu_type:
cpu_type = "other"
if not cls.cpu_temp_high or cls.sensor_swap:
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 60 if name == "cpu_thermal" else 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 80 if name == "cpu_thermal" else 95
temp = round(entry.current)
cores.append(round(entry.current))
if core_dict:
if not temp or temp == 1000:
temp = sum(core_dict.values()) // len(core_dict)
if not cls.cpu_temp_high or not cls.cpu_temp_crit:
cls.cpu_temp_high, cls.cpu_temp_crit = 80, 95
cls.cpu_temp[0].append(temp)
if cpu_type == "ryzen":
ccds: int = len(core_dict)
cores_per_ccd: int = CORES // ccds
z: int = 1
for x in range(THREADS):
if x == CORES:
z = 1
if CORE_MAP[x] + 1 > cores_per_ccd * z:
z += 1
if z in core_dict:
cls.cpu_temp[x+1].append(core_dict[z])
else:
for x in range(THREADS):
if CORE_MAP[x] in core_dict:
cls.cpu_temp[x+1].append(core_dict[CORE_MAP[x]])
elif len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cls.cpu_temp[0].append(temp)
if len(cores) > 1:
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
try:
if cls.sensor_method == "coretemp":
temp = max(0, int(subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip()))
cores = [max(0, int(x)) for x in subprocess.check_output("coretemp", universal_newlines=True).split()]
if len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "osx-cpu-temp":
temp = max(0, round(float(subprocess.check_output("osx-cpu-temp", universal_newlines=True).strip()[:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "vcgencmd":
temp = max(0, round(float(subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip()[5:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 60
cls.cpu_temp_crit = 80
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
if not cores:
cls.cpu_temp[0].append(temp)
if not core_dict and len(cores) <= 1:
cls.cpu_temp_only = True
if len(cls.cpu_temp[0]) > 5:
for n in range(len(cls.cpu_temp)):
if cls.cpu_temp[n]:
del cls.cpu_temp[n][0]
@classmethod
def _draw(cls):
CpuBox._draw_fg()
class MemCollector(Collector):
'''Collects memory and disks information'''
values: Dict[str, int] = {}
vlist: Dict[str, List[int]] = {}
percent: Dict[str, int] = {}
string: Dict[str, str] = {}
swap_values: Dict[str, int] = {}
swap_vlist: Dict[str, List[int]] = {}
swap_percent: Dict[str, int] = {}
swap_string: Dict[str, str] = {}
disks: Dict[str, Dict]
disk_hist: Dict[str, Tuple] = {}
timestamp: float = time()
disks_io_dict: Dict[str, Dict[str, List[int]]] = {}
recheck_diskutil: bool = True
diskutil_map: Dict[str, str] = {}
io_error: bool = False
old_disks: List[str] = []
old_io_disks: List[str] = []
fstab_filter: List[str] = []
excludes: List[str] = ["squashfs", "nullfs"]
if SYSTEM == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
buffer: str = MemBox.buffer
@classmethod
def _collect(cls):
#* Collect memory
mem = psutil.virtual_memory()
if hasattr(mem, "cached"):
cls.values["cached"] = mem.cached
else:
cls.values["cached"] = mem.active
cls.values["total"], cls.values["free"], cls.values["available"] = mem.total, mem.free, mem.available
cls.values["used"] = cls.values["total"] - cls.values["available"]
for key, value in cls.values.items():
cls.string[key] = floating_humanizer(value)
if key == "total": continue
cls.percent[key] = round(value * 100 / cls.values["total"])
if CONFIG.mem_graphs:
if not key in cls.vlist: cls.vlist[key] = []
cls.vlist[key].append(cls.percent[key])
if len(cls.vlist[key]) > MemBox.width: del cls.vlist[key][0]
#* Collect swap
if CONFIG.show_swap or CONFIG.swap_disk:
swap = psutil.swap_memory()
cls.swap_values["total"], cls.swap_values["free"] = swap.total, swap.free
cls.swap_values["used"] = cls.swap_values["total"] - cls.swap_values["free"]
if swap.total:
if not MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = True
for key, value in cls.swap_values.items():
cls.swap_string[key] = floating_humanizer(value)
if key == "total": continue
cls.swap_percent[key] = round(value * 100 / cls.swap_values["total"])
if CONFIG.mem_graphs:
if not key in cls.swap_vlist: cls.swap_vlist[key] = []
cls.swap_vlist[key].append(cls.swap_percent[key])
if len(cls.swap_vlist[key]) > MemBox.width: del cls.swap_vlist[key][0]
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
if not CONFIG.show_disks: return
#* Collect disks usage
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
filtering: Tuple = ()
filter_exclude: bool = False
io_string_r: str
io_string_w: str
u_percent: int
cls.disks = {}
if CONFIG.disks_filter:
if CONFIG.disks_filter.startswith("exclude="):
filter_exclude = True
filtering = tuple(v.strip() for v in CONFIG.disks_filter.replace("exclude=", "").strip().split(","))
else:
filtering = tuple(v.strip() for v in CONFIG.disks_filter.strip().split(","))
try:
io_counters = psutil.disk_io_counters(perdisk=SYSTEM != "BSD", nowrap=True)
except ValueError as e:
if not cls.io_error:
cls.io_error = True
errlog.error(f'Non fatal error during disk io collection!')
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
errlog.error(f'Caused by outdated psutil version.')
errlog.exception(f'{e}')
io_counters = None
if SYSTEM == "MacOS" and cls.recheck_diskutil:
cls.recheck_diskutil = False
try:
dutil_out = subprocess.check_output(["diskutil", "list", "physical"], universal_newlines=True)
for line in dutil_out.split("\n"):
line = line.replace("\u2068", "").replace("\u2069", "")
if line.startswith("/dev/"):
xdisk = line.split()[0].replace("/dev/", "")
elif "Container" in line:
ydisk = line.split()[3]
if xdisk and ydisk:
cls.diskutil_map[xdisk] = ydisk
xdisk = ydisk = ""
except:
pass
if CONFIG.use_fstab and SYSTEM != "MacOS" and not cls.fstab_filter:
try:
with open('/etc/fstab','r') as fstab:
for line in fstab:
line = line.strip()
if line and not line.startswith('#'):
mount_data = (line.split())
if mount_data[2].lower() != "swap":
cls.fstab_filter += [mount_data[1]]
errlog.debug(f'new fstab_filter set : {cls.fstab_filter}')
except IOError:
CONFIG.use_fstab = False
errlog.warning(f'Error reading fstab, use_fstab flag reset to {CONFIG.use_fstab}')
if not CONFIG.use_fstab and cls.fstab_filter:
cls.fstab_filter = []
errlog.debug(f'use_fstab flag has been turned to {CONFIG.use_fstab}, fstab_filter cleared')
for disk in psutil.disk_partitions(all=CONFIG.use_fstab or not CONFIG.only_physical):
disk_io = None
io_string_r = io_string_w = ""
if CONFIG.use_fstab and disk.mountpoint not in cls.fstab_filter:
continue
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
if cls.excludes and disk.fstype in cls.excludes:
continue
if filtering and ((not filter_exclude and not disk.mountpoint in filtering) or (filter_exclude and disk.mountpoint in filtering)):
continue
if SYSTEM == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
u_percent = round(getattr(disk_u, "percent", 0))
cls.disks[disk.device] = { "name" : disk_name, "used_percent" : u_percent, "free_percent" : 100 - u_percent }
for name in ["total", "used", "free"]:
cls.disks[disk.device][name] = floating_humanizer(getattr(disk_u, name, 0))
#* Collect disk io
if io_counters:
try:
if SYSTEM != "BSD":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if not dev_name in io_counters:
for names in io_counters:
if names in dev_name:
disk_io = io_counters[names]
break
else:
if cls.diskutil_map:
for names, items in cls.diskutil_map.items():
if items in dev_name and names in io_counters:
disk_io = io_counters[names]
else:
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = round((disk_io.read_bytes - cls.disk_hist[disk.device][0]) / (time() - cls.timestamp)) #type: ignore
disk_write = round((disk_io.write_bytes - cls.disk_hist[disk.device][1]) / (time() - cls.timestamp)) #type: ignore
if not disk.device in cls.disks_io_dict:
cls.disks_io_dict[disk.device] = {"read" : [], "write" : [], "rw" : []}
cls.disks_io_dict[disk.device]["read"].append(disk_read >> 20)
cls.disks_io_dict[disk.device]["write"].append(disk_write >> 20)
cls.disks_io_dict[disk.device]["rw"].append((disk_read + disk_write) >> 20)
if len(cls.disks_io_dict[disk.device]["read"]) > MemBox.width:
del cls.disks_io_dict[disk.device]["read"][0], cls.disks_io_dict[disk.device]["write"][0], cls.disks_io_dict[disk.device]["rw"][0]
except:
disk_read = disk_write = 0
else:
disk_read = disk_write = 0
if disk_io:
cls.disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
if CONFIG.io_mode or MemBox.disks_width > 30:
if disk_read > 0:
io_string_r = f'▲{floating_humanizer(disk_read, short=True)}'
if disk_write > 0:
io_string_w = f'▼{floating_humanizer(disk_write, short=True)}'
if CONFIG.io_mode:
cls.disks[disk.device]["io_r"] = io_string_r
cls.disks[disk.device]["io_w"] = io_string_w
elif disk_read + disk_write > 0:
io_string_r += f'▼▲{floating_humanizer(disk_read + disk_write, short=True)}'
cls.disks[disk.device]["io"] = io_string_r + (" " if io_string_w and io_string_r else "") + io_string_w
if CONFIG.swap_disk and MemBox.swap_on:
cls.disks["__swap"] = { "name" : "swap", "used_percent" : cls.swap_percent["used"], "free_percent" : cls.swap_percent["free"], "io" : "" }
for name in ["total", "used", "free"]:
cls.disks["__swap"][name] = cls.swap_string[name]
if len(cls.disks) > 2:
try:
new = { list(cls.disks)[0] : cls.disks.pop(list(cls.disks)[0])}
new["__swap"] = cls.disks.pop("__swap")
new.update(cls.disks)
cls.disks = new
except:
pass
if cls.old_disks != list(cls.disks) or cls.old_io_disks != list(cls.disks_io_dict):
MemBox.redraw = True
cls.recheck_diskutil = True
cls.old_disks = list(cls.disks)
cls.old_io_disks = list(cls.disks_io_dict)
cls.timestamp = time()
@classmethod
def _draw(cls):
MemBox._draw_fg()
class NetCollector(Collector):
'''Collects network stats'''
buffer: str = NetBox.buffer
nics: List[str] = []
nic_i: int = 0
nic: str = ""
new_nic: str = ""
nic_error: bool = False
reset: bool = False
graph_raise: Dict[str, int] = {"download" : 5, "upload" : 5}
graph_lower: Dict[str, int] = {"download" : 5, "upload" : 5}
#min_top: int = 10<<10
#* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool
stats: Dict[str, Dict[str, Dict[str, Any]]] = {}
#* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str
strings: Dict[str, Dict[str, Dict[str, str]]] = {}
switched: bool = False
timestamp: float = time()
net_min: Dict[str, int] = {"download" : -1, "upload" : -1}
auto_min: bool = CONFIG.net_auto
net_iface: str = CONFIG.net_iface
sync_top: int = 0
sync_string: str = ""
address: str = ""
@classmethod
def _get_nics(cls):
'''Get a list of all network devices sorted by highest throughput'''
cls.nic_i = 0
cls.nics = []
cls.nic = ""
try:
io_all = psutil.net_io_counters(pernic=True)
except Exception as e:
if not cls.nic_error:
cls.nic_error = True
errlog.exception(f'{e}')
if not io_all: return
up_stat = psutil.net_if_stats()
for nic in sorted(io_all.keys(), key=lambda nic: (getattr(io_all[nic], "bytes_recv", 0) + getattr(io_all[nic], "bytes_sent", 0)), reverse=True):
if nic not in up_stat or not up_stat[nic].isup:
continue
cls.nics.append(nic)
if not cls.nics: cls.nics = [""]
cls.nic = cls.nics[cls.nic_i]
if cls.net_iface and cls.net_iface in cls.nics:
cls.nic = cls.net_iface
cls.nic_i = cls.nics.index(cls.nic)
@classmethod
def switch(cls, key: str):
if cls.net_iface: cls.net_iface = ""
if len(cls.nics) < 2 and cls.nic in cls.nics:
return
if cls.nic_i == -1:
cls.nic_i = 0 if key == "n" else -1
else:
cls.nic_i += +1 if key == "n" else -1
cls.nic_i %= len(cls.nics)
cls.new_nic = cls.nics[cls.nic_i]
cls.switched = True
Collector.collect(NetCollector, redraw=True)
@classmethod
def _collect(cls):
speed: int
stat: Dict
up_stat = psutil.net_if_stats()
if sorted(cls.nics) != sorted(nic for nic in up_stat if up_stat[nic].isup):
old_nic = cls.nic
cls._get_nics()
cls.nic = old_nic
if cls.nic not in cls.nics:
cls.nic_i = -1
else:
cls.nic_i = cls.nics.index(cls.nic)
if cls.switched:
cls.nic = cls.new_nic
cls.switched = False
if not cls.nic or cls.nic not in up_stat:
cls._get_nics()
if not cls.nic: return
try:
io_all = psutil.net_io_counters(pernic=True)[cls.nic]
except KeyError:
pass
return
if not cls.nic in cls.stats:
cls.stats[cls.nic] = {}
cls.strings[cls.nic] = { "download" : {}, "upload" : {}}
for direction, value in ["download", io_all.bytes_recv], ["upload", io_all.bytes_sent]:
cls.stats[cls.nic][direction] = { "total" : value, "last" : value, "top" : 0, "graph_top" : 0, "offset" : 0, "speed" : [], "redraw" : True, "graph_raise" : 0, "graph_lower" : 7 }
for v in ["total", "byte_ps", "bit_ps", "top", "graph_top"]:
cls.strings[cls.nic][direction][v] = ""
cls.stats[cls.nic]["download"]["total"] = io_all.bytes_recv
cls.stats[cls.nic]["upload"]["total"] = io_all.bytes_sent
if cls.nic in psutil.net_if_addrs():
cls.address = getattr(psutil.net_if_addrs()[cls.nic][0], "address", "")
for direction in ["download", "upload"]:
stat = cls.stats[cls.nic][direction]
strings = cls.strings[cls.nic][direction]
#* Calculate current speed
stat["speed"].append(round((stat["total"] - stat["last"]) / (time() - cls.timestamp)))
stat["last"] = stat["total"]
speed = stat["speed"][-1]
if cls.net_min[direction] == -1:
cls.net_min[direction] = units_to_bytes(getattr(CONFIG, "net_" + direction))
stat["graph_top"] = cls.net_min[direction]
stat["graph_lower"] = 7
if not cls.auto_min:
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
if stat["offset"] and stat["offset"] > stat["total"]:
cls.reset = True
if cls.reset:
if not stat["offset"]:
stat["offset"] = stat["total"]
else:
stat["offset"] = 0
if direction == "upload":
cls.reset = False
NetBox.redraw = True
if len(stat["speed"]) > NetBox.width * 2:
del stat["speed"][0]
strings["total"] = floating_humanizer(stat["total"] - stat["offset"])
strings["byte_ps"] = floating_humanizer(stat["speed"][-1], per_second=True)
strings["bit_ps"] = floating_humanizer(stat["speed"][-1], bit=True, per_second=True)
if speed > stat["top"] or not stat["top"]:
stat["top"] = speed
strings["top"] = floating_humanizer(stat["top"], bit=True, per_second=True)
if cls.auto_min:
if speed > stat["graph_top"]:
stat["graph_raise"] += 1
if stat["graph_lower"] > 0: stat["graph_lower"] -= 1
elif speed < stat["graph_top"] // 10:
stat["graph_lower"] += 1
if stat["graph_raise"] > 0: stat["graph_raise"] -= 1
if stat["graph_raise"] >= 5 or stat["graph_lower"] >= 5:
if stat["graph_raise"] >= 5:
stat["graph_top"] = round(max(stat["speed"][-5:]) / 0.8)
elif stat["graph_lower"] >= 5:
stat["graph_top"] = max(10 << 10, max(stat["speed"][-5:]) * 3)
stat["graph_raise"] = 0
stat["graph_lower"] = 0
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
cls.timestamp = time()
if CONFIG.net_sync:
c_max: int = max(cls.stats[cls.nic]["download"]["graph_top"], cls.stats[cls.nic]["upload"]["graph_top"])
if c_max != cls.sync_top:
cls.sync_top = c_max
cls.sync_string = floating_humanizer(cls.sync_top, short=True)
NetBox.redraw = True
@classmethod
def _draw(cls):
NetBox._draw_fg()
class ProcCollector(Collector):
'''Collects process stats'''
buffer: str = ProcBox.buffer
search_filter: str = ""
case_sensitive: bool = False
processes: Dict = {}
num_procs: int = 0
det_cpu: float = 0.0
detailed: bool = False
detailed_pid: Union[int, None] = None
details: Dict[str, Any] = {}
details_cpu: List[int] = []
details_mem: List[int] = []
expand: int = 0
collapsed: Dict = {}
tree_counter: int = 0
p_values: List[str] = ["pid", "name", "cmdline", "num_threads", "username", "memory_percent", "cpu_percent", "cpu_times", "create_time"]
sort_expr: Dict = {}
sort_expr["pid"] = compile("p.info['pid']", "str", "eval")
sort_expr["program"] = compile("'' if p.info['name'] == 0.0 else p.info['name']", "str", "eval")
sort_expr["arguments"] = compile("' '.join(str(p.info['cmdline'])) or ('' if p.info['name'] == 0.0 else p.info['name'])", "str", "eval")
sort_expr["threads"] = compile("0 if p.info['num_threads'] == 0.0 else p.info['num_threads']", "str", "eval")
sort_expr["user"] = compile("'' if p.info['username'] == 0.0 else p.info['username']", "str", "eval")
sort_expr["memory"] = compile("p.info['memory_percent']", "str", "eval")
sort_expr["cpu lazy"] = compile("(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time() - p.info['create_time']))", "str", "eval")
sort_expr["cpu responsive"] = compile("(p.info['cpu_percent'] if CONFIG.proc_per_core else (p.info['cpu_percent'] / THREADS))", "str", "eval")
@classmethod
def _collect(cls):
'''List all processess with pid, name, arguments, threads, username, memory percent and cpu percent'''
if not "proc" in Box.boxes: return
out: Dict = {}
cls.det_cpu = 0.0
sorting: str = CONFIG.proc_sorting
reverse: bool = not CONFIG.proc_reversed
proc_per_cpu: bool = CONFIG.proc_per_core
search: List[str] = []
if cls.search_filter:
if cls.case_sensitive:
search = [i.strip() for i in cls.search_filter.split(",")]
else:
search = [i.strip() for i in cls.search_filter.lower().split(",")]
err: float = 0.0
n: int = 0
if CONFIG.proc_tree and sorting == "arguments":
sorting = "program"
sort_cmd = cls.sort_expr[sorting]
if CONFIG.proc_tree:
cls._tree(sort_cmd=sort_cmd, reverse=reverse, proc_per_cpu=proc_per_cpu, search=search)
else:
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt or cls.proc_interrupt:
return
if p.info["name"] == "idle" or p.info["name"] == err or p.info["pid"] == err:
continue
if p.info["cmdline"] == err:
p.info["cmdline"] = ""
if p.info["username"] == err:
p.info["username"] = ""
if p.info["num_threads"] == err:
p.info["num_threads"] = 0
if search:
if cls.detailed and p.info["pid"] == cls.detailed_pid:
cls.det_cpu = p.info["cpu_percent"]
for value in [ p.info["name"], " ".join(p.info["cmdline"]), str(p.info["pid"]), p.info["username"] ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
break
else: continue
break
else: continue
cpu = p.info["cpu_percent"] if proc_per_cpu else round(p.info["cpu_percent"] / THREADS, 2)
mem = p.info["memory_percent"]
if CONFIG.proc_mem_bytes and hasattr(p.info["memory_info"], "rss"):
mem_b = p.info["memory_info"].rss
else:
mem_b = 0
cmd = " ".join(p.info["cmdline"]) or "[" + p.info["name"] + "]"
out[p.info["pid"]] = {
"name" : p.info["name"],
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : p.info["num_threads"],
"username" : p.info["username"],
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu }
n += 1
cls.num_procs = n
cls.processes = out.copy()
if cls.detailed:
cls.expand = ((ProcBox.width - 2) - ((ProcBox.width - 2) // 3) - 40) // 10
if cls.expand > 5: cls.expand = 5
if cls.detailed and not cls.details.get("killed", False):
try:
c_pid = cls.detailed_pid
det = psutil.Process(c_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
cls.details["killed"] = True
cls.details["status"] = psutil.STATUS_DEAD
ProcBox.redraw = True
else:
attrs: List[str] = ["status", "memory_info", "create_time"]
if not SYSTEM == "MacOS": attrs.extend(["cpu_num"])
if cls.expand:
attrs.extend(["nice", "terminal"])
if not SYSTEM == "MacOS": attrs.extend(["io_counters"])
if not c_pid in cls.processes: attrs.extend(["pid", "name", "cmdline", "num_threads", "username", "memory_percent"])
cls.details = det.as_dict(attrs=attrs, ad_value="")
if det.parent() != None: cls.details["parent_name"] = det.parent().name()
else: cls.details["parent_name"] = ""
cls.details["pid"] = c_pid
if c_pid in cls.processes:
cls.details["name"] = cls.processes[c_pid]["name"]
cls.details["cmdline"] = cls.processes[c_pid]["cmd"]
cls.details["threads"] = f'{cls.processes[c_pid]["threads"]}'
cls.details["username"] = cls.processes[c_pid]["username"]
cls.details["memory_percent"] = cls.processes[c_pid]["mem"]
cls.details["cpu_percent"] = round(cls.processes[c_pid]["cpu"] * (1 if CONFIG.proc_per_core else THREADS))
else:
cls.details["cmdline"] = " ".join(cls.details["cmdline"]) or "[" + cls.details["name"] + "]"
cls.details["threads"] = f'{cls.details["num_threads"]}'
cls.details["cpu_percent"] = round(cls.det_cpu)
cls.details["killed"] = False
if SYSTEM == "MacOS":
cls.details["cpu_num"] = -1
cls.details["io_counters"] = ""
if hasattr(cls.details["memory_info"], "rss"): cls.details["memory_bytes"] = floating_humanizer(cls.details["memory_info"].rss) # type: ignore
else: cls.details["memory_bytes"] = "? Bytes"
if isinstance(cls.details["create_time"], float):
uptime = timedelta(seconds=round(time()-cls.details["create_time"],0))
if uptime.days > 0: cls.details["uptime"] = f'{uptime.days}d {str(uptime).split(",")[1][:-3].strip()}'
else: cls.details["uptime"] = f'{uptime}'
else: cls.details["uptime"] = "??:??:??"
if cls.expand:
if cls.expand > 1 : cls.details["nice"] = f'{cls.details["nice"]}'
if SYSTEM == "BSD":
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_count"): cls.details["io_read"] = f'{cls.details["io_counters"].read_count}'
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_count"): cls.details["io_write"] = f'{cls.details["io_counters"].write_count}'
else: cls.details["io_write"] = "?"
else:
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_bytes"): cls.details["io_read"] = floating_humanizer(cls.details["io_counters"].read_bytes)
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_bytes"): cls.details["io_write"] = floating_humanizer(cls.details["io_counters"].write_bytes)
else: cls.details["io_write"] = "?"
if cls.expand > 4 : cls.details["terminal"] = f'{cls.details["terminal"]}'.replace("/dev/", "")
cls.details_cpu.append(cls.details["cpu_percent"])
mem = cls.details["memory_percent"]
if mem > 80: mem = round(mem)
elif mem > 60: mem = round(mem * 1.2)
elif mem > 30: mem = round(mem * 1.5)
elif mem > 10: mem = round(mem * 2)
elif mem > 5: mem = round(mem * 10)
else: mem = round(mem * 20)
cls.details_mem.append(mem)
if len(cls.details_cpu) > ProcBox.width: del cls.details_cpu[0]
if len(cls.details_mem) > ProcBox.width: del cls.details_mem[0]
@classmethod
def _tree(cls, sort_cmd, reverse: bool, proc_per_cpu: bool, search: List[str]):
'''List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent'''
out: Dict = {}
err: float = 0.0
det_cpu: float = 0.0
infolist: Dict = {}
cls.tree_counter += 1
tree = defaultdict(list)
n: int = 0
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt: return
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
n += 1
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
def create_tree(pid: int, tree: defaultdict, indent: str = "", inindent: str = " ", found: bool = False, depth: int = 0, collapse_to: Union[None, int] = None):
nonlocal infolist, proc_per_cpu, search, out, det_cpu
name: str; threads: int; username: str; mem: float; cpu: float; collapse: bool = False
cont: bool = True
getinfo: Dict = {}
if cls.collect_interrupt: return
try:
name = psutil.Process(pid).name()
if name == "idle": return
except psutil.Error:
pass
cont = False
name = ""
if pid in infolist:
getinfo = infolist[pid]
if search and not found:
if cls.detailed and pid == cls.detailed_pid:
det_cpu = getinfo["cpu_percent"]
if "username" in getinfo and isinstance(getinfo["username"], float): getinfo["username"] = ""
if "cmdline" in getinfo and isinstance(getinfo["cmdline"], float): getinfo["cmdline"] = ""
for value in [ name, str(pid), getinfo.get("username", ""), " ".join(getinfo.get("cmdline", "")) ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
found = True
break
else: continue
break
else: cont = False
if cont:
if getinfo:
if getinfo["num_threads"] == err: threads = 0
else: threads = getinfo["num_threads"]
if getinfo["username"] == err: username = ""
else: username = getinfo["username"]
cpu = getinfo["cpu_percent"] if proc_per_cpu else round(getinfo["cpu_percent"] / THREADS, 2)
mem = getinfo["memory_percent"]
if getinfo["cmdline"] == err: cmd = ""
else: cmd = " ".join(getinfo["cmdline"]) or "[" + getinfo["name"] + "]"
if CONFIG.proc_mem_bytes and hasattr(getinfo["memory_info"], "rss"):
mem_b = getinfo["memory_info"].rss
else:
mem_b = 0
else:
threads = mem_b = 0
username = ""
mem = cpu = 0.0
if pid in cls.collapsed:
collapse = cls.collapsed[pid]
else:
collapse = depth > CONFIG.tree_depth
cls.collapsed[pid] = collapse
if collapse_to and not search:
out[collapse_to]["threads"] += threads
out[collapse_to]["mem"] += mem
out[collapse_to]["mem_b"] += mem_b
out[collapse_to]["cpu"] += cpu
else:
if pid in tree and len(tree[pid]) > 0:
sign: str = "+" if collapse else "-"
inindent = inindent.replace(" ├─ ", "[" + sign + "]─").replace(" └─ ", "[" + sign + "]─")
out[pid] = {
"indent" : inindent,
"name": name,
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : threads,
"username" : username,
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu,
"depth" : depth,
}
if search: collapse = False
elif collapse and not collapse_to:
collapse_to = pid
if pid not in tree:
return
children = tree[pid][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found, depth=depth+1, collapse_to=collapse_to)
create_tree(tree[pid][-1], tree, indent + " ", indent + " └─ ", depth=depth+1, collapse_to=collapse_to)
create_tree(min(tree), tree)
cls.det_cpu = det_cpu
if cls.collect_interrupt: return
if cls.tree_counter >= 100:
cls.tree_counter = 0
for pid in list(cls.collapsed):
if not psutil.pid_exists(pid):
del cls.collapsed[pid]
cls.num_procs = len(out)
cls.processes = out.copy()
@classmethod
def sorting(cls, key: str):
index: int = CONFIG.sorting_options.index(CONFIG.proc_sorting) + (1 if key in ["right", "l"] else -1)
if index >= len(CONFIG.sorting_options): index = 0
elif index < 0: index = len(CONFIG.sorting_options) - 1
CONFIG.proc_sorting = CONFIG.sorting_options[index]
if "left" in Key.mouse: del Key.mouse["left"]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
@classmethod
def _draw(cls):
ProcBox._draw_fg()
class Menu:
'''Holds all menus'''
active: bool = False
close: bool = False
resized: bool = True
menus: Dict[str, Dict[str, str]] = {}
menu_length: Dict[str, int] = {}
background: str = ""
for name, menu in MENUS.items():
menu_length[name] = len(menu["normal"][0])
menus[name] = {}
for sel in ["normal", "selected"]:
menus[name][sel] = ""
for i in range(len(menu[sel])):
menus[name][sel] += Fx.trans(f'{Color.fg(MENU_COLORS[sel][i])}{menu[sel][i]}')
if i < len(menu[sel]) - 1: menus[name][sel] += f'{Mv.d(1)}{Mv.l(len(menu[sel][i]))}'
@classmethod
def main(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
banner: str = ""
redraw: bool = True
key: str = ""
mx: int = 0
my: int = 0
skip: bool = False
mouse_over: bool = False
mouse_items: Dict[str, Dict[str, int]] = {}
cls.active = True
cls.resized = True
menu_names: List[str] = list(cls.menus.keys())
menu_index: int = 0
menu_current: str = menu_names[0]
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
while not cls.close:
key = ""
if cls.resized:
banner = (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
if UpdateChecker.version != VERSION:
banner += f'{Mv.to(Term.height, 1)}{Fx.b}{THEME.title}New release {UpdateChecker.version} available at https://github.com/aristocratos/bpytop{Fx.ub}{Term.fg}'
cy = 0
for name, menu in cls.menus.items():
ypos = Term.height // 2 - 2 + cy
xpos = Term.width // 2 - (cls.menu_length[name] // 2)
mouse_items[name] = { "x1" : xpos, "x2" : xpos + cls.menu_length[name] - 1, "y1" : ypos, "y2" : ypos + 2 }
cy += 3
redraw = True
cls.resized = False
if redraw:
out = ""
for name, menu in cls.menus.items():
out += f'{Mv.to(mouse_items[name]["y1"], mouse_items[name]["x1"])}{menu["selected" if name == menu_current else "normal"]}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{banner}{out}')
skip = redraw = False
if Key.input_wait(Timer.left(), mouse=True):
if Key.mouse_moved():
mx, my = Key.get_mouse()
for name, pos in mouse_items.items():
if pos["x1"] <= mx <= pos["x2"] and pos["y1"] <= my <= pos["y2"]:
mouse_over = True
if name != menu_current:
menu_current = name
menu_index = menu_names.index(name)
redraw = True
break
else:
mouse_over = False
else:
key = Key.get()
if key == "mouse_click" and not mouse_over:
key = "M"
if key == "q":
clean_quit()
elif key in ["escape", "M"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "shift_tab"]:
menu_index -= 1
if menu_index < 0: menu_index = len(menu_names) - 1
menu_current = menu_names[menu_index]
redraw = True
elif key in ["down", "mouse_scroll_down", "tab"]:
menu_index += 1
if menu_index > len(menu_names) - 1: menu_index = 0
menu_current = menu_names[menu_index]
redraw = True
elif key == "enter" or (key == "mouse_click" and mouse_over):
if menu_current == "quit":
clean_quit()
elif menu_current == "options":
cls.options()
cls.resized = True
elif menu_current == "help":
cls.help()
cls.resized = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def help(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
help_items: Dict[str, str] = {
"(Mouse 1)" : "Clicks buttons and selects in process list.",
"Selected (Mouse 1)" : "Show detailed information for selected process.",
"(Mouse scroll)" : "Scrolls any scrollable list/text under cursor.",
"(Esc, shift+m)" : "Toggles main menu.",
"(m)" : "Cycle view presets, order: full->proc->stat->user.",
"(1)" : "Toggle CPU box.",
"(2)" : "Toggle MEM box.",
"(3)" : "Toggle NET box.",
"(4)" : "Toggle PROC box.",
"(d)" : "Toggle disks view in MEM box.",
"(F2, o)" : "Shows options.",
"(F1, shift+h)" : "Shows this window.",
"(ctrl+z)" : "Sleep program and put in background.",
"(ctrl+c, q)" : "Quits program.",
"(+) / (-)" : "Add/Subtract 100ms to/from update timer.",
"(Up, k) (Down, j)" : "Select in process list.",
"(Enter)" : "Show detailed information for selected process.",
"(Spacebar)" : "Expand/collapse the selected process in tree view.",
"(Pg Up) (Pg Down)" : "Jump 1 page in process list.",
"(Home) (End)" : "Jump to first or last page in process list.",
"(Left, h) (Right, l)" : "Select previous/next sorting column.",
"(b) (n)" : "Select previous/next network device.",
"(s)" : "Toggle showing swap as a disk.",
"(i)" : "Toggle disks io mode with big graphs.",
"(z)" : "Toggle totals reset for current network device",
"(a)" : "Toggle auto scaling for the network graphs.",
"(y)" : "Toggle synced scaling mode for network graphs.",
"(f)" : "Input a NON case-sensitive process filter.",
"(shift+f)" : "Input a case-sensitive process filter.",
"(c)" : "Toggle per-core cpu usage of processes.",
"(r)" : "Reverse sorting order in processes box.",
"(e)" : "Toggle processes tree view.",
"(delete)" : "Clear any entered filter.",
"Selected (shift+t)" : "Terminate selected process with SIGTERM - 15.",
"Selected (shift+k)" : "Kill selected process with SIGKILL - 9.",
"Selected (shift+i)" : "Interrupt selected process with SIGINT - 2.",
"_1" : " ",
"_2" : "For bug reporting and project updates, visit:",
"_3" : "https://github.com/aristocratos/bpytop",
}
while not cls.close:
key = ""
if cls.resized:
y = 8 if Term.height < len(help_items) + 10 else Term.height // 2 - len(help_items) // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-36
h, w = Term.height-2-y, 72
if len(help_items) > h:
pages = ceil(len(help_items) / h)
else:
h = len(help_items)
pages = 0
page = 1
out_misc += create_box(x, y, w, h+3, "help", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
if pages:
out += (f'{Mv.to(y, x+56)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, (keys, desc) in enumerate(help_items.items()):
if pages and n < (page - 1) * h: continue
out += f'{Mv.to(y+2+cy, x+1)}{Fx.b}{("" if keys.startswith("_") else keys):^20.20}{Fx.ub}{desc:50.50}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+2+cy+i, x+1)}{" " * (w-2)}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
if key == "mouse_click":
mx, my = Key.get_mouse()
if x <= mx < x + w and y <= my < y + h + 3:
if pages and my == y and x + 56 < mx < x + 61:
key = "up"
elif pages and my == y and x + 63 < mx < x + 68:
key = "down"
else:
key = "escape"
if key == "q":
clean_quit()
elif key in ["escape", "M", "enter", "backspace", "H", "f1"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
redraw = True
elif key in ["down", "mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
redraw = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def options(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
selected_cat: str = ""
selected_int: int = 0
option_items: Dict[str, List[str]] = {}
cat_list: List[str] = []
cat_int: int = 0
change_cat: bool = False
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
d_quote: str
inputting: bool = False
input_val: str = ""
Theme.refresh()
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
categories: Dict[str, Dict[str, List[str]]] = {
"system" : {
"color_theme" : [
'Set color theme.',
'',
'Choose from all theme files in',
'"/usr/[local/]share/bpytop/themes" and',
'"~/.config/bpytop/themes".',
'',
'"Default" for builtin default theme.',
'User themes are prefixed by a plus sign "+".',
'',
'For theme updates see:',
'https://github.com/aristocratos/bpytop'],
"theme_background" : [
'If the theme set background should be shown.',
'',
'Set to False if you want terminal background',
'transparency.'],
"truecolor" : [
'Sets if 24-bit truecolor should be used.',
'(Requires restart to take effect!)',
'',
'Will convert 24-bit colors to 256 color',
'(6x6x6 color cube) if False.',
'',
'Set to False if your terminal doesn\'t have',
'truecolor support and can\'t convert to',
'256-color.'],
"shown_boxes" : [
'Manually set which boxes to show.',
'',
'Available values are "cpu mem net proc".',
'Seperate values with whitespace.',
'',
'Toggle between presets with mode key "m".'],
"update_ms" : [
'Update time in milliseconds.',
'',
'Recommended 2000 ms or above for better sample',
'times for graphs.',
'',
'Min value: 100 ms',
'Max value: 86400000 ms = 24 hours.'],
"draw_clock" : [
'Draw a clock at top of screen.',
'(Only visible if cpu box is enabled!)',
'',
'Formatting according to strftime, empty',
'string to disable.',
'',
'Custom formatting options:',
'"/host" = hostname',
'"/user" = username',
'"/uptime" = system uptime',
'',
'Examples of strftime formats:',
'"%X" = locale HH:MM:SS',
'"%H" = 24h hour, "%I" = 12h hour',
'"%M" = minute, "%S" = second',
'"%d" = day, "%m" = month, "%y" = year'],
"background_update" : [
'Update main ui when menus are showing.',
'',
'True or False.',
'',
'Set this to false if the menus is flickering',
'too much for a comfortable experience.'],
"show_battery" : [
'Show battery stats.',
'(Only visible if cpu box is enabled!)',
'',
'Show battery stats in the top right corner',
'if a battery is present.'],
"show_init" : [
'Show init screen at startup.',
'',
'The init screen is purely cosmetical and',
'slows down start to show status messages.'],
"update_check" : [
'Check for updates at start.',
'',
'Checks for latest version from:',
'https://github.com/aristocratos/bpytop'],
"log_level" : [
'Set loglevel for error.log',
'',
'Levels are: "ERROR" "WARNING" "INFO" "DEBUG".',
'The level set includes all lower levels,',
'i.e. "DEBUG" will show all logging info.']
},
"cpu" : {
"cpu_graph_upper" : [
'Sets the CPU stat shown in upper half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_graph_lower" : [
'Sets the CPU stat shown in lower half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_invert_lower" : [
'Toggles orientation of the lower CPU graph.',
'',
'True or False.'],
"cpu_single_graph" : [
'Completely disable the lower CPU graph.',
'',
'Shows only upper CPU graph and resizes it',
'to fit to box height.',
'',
'True or False.'],
"check_temp" : [
'Enable cpu temperature reporting.',
'',
'True or False.'],
"cpu_sensor" : [
'Cpu temperature sensor',
'',
'Select the sensor that corresponds to',
'your cpu temperature.',
'Set to "Auto" for auto detection.'],
"show_coretemp" : [
'Show temperatures for cpu cores.',
'',
'Only works if check_temp is True and',
'the system is reporting core temps.'],
"temp_scale" : [
'Which temperature scale to use.',
'',
'Celsius, default scale.',
'',
'Fahrenheit, the american one.',
'',
'Kelvin, 0 = absolute zero, 1 degree change',
'equals 1 degree change in Celsius.',
'',
'Rankine, 0 = abosulte zero, 1 degree change',
'equals 1 degree change in Fahrenheit.'],
"custom_cpu_name" : [
'Custom cpu model name in cpu percentage box.',
'',
'Empty string to disable.'],
"show_uptime" : [
'Shows the system uptime in the CPU box.',
'',
'Can also be shown in the clock by using',
'"/uptime" in the formatting.',
'',
'True or False.'],
},
"mem" : {
"mem_graphs" : [
'Show graphs for memory values.',
'',
'True or False.'],
"show_disks" : [
'Split memory box to also show disks.',
'',
'True or False.'],
"show_io_stat" : [
'Toggle small IO stat graphs.',
'',
'Toggles the small IO graphs for the regular',
'disk usage view.',
'',
'True or False.'],
"io_mode" : [
'Toggles io mode for disks.',
'',
'Shows big graphs for disk read/write speeds',
'instead of used/free percentage meters.',
'',
'True or False.'],
"io_graph_combined" : [
'Toggle combined read and write graphs.',
'',
'Only has effect if "io mode" is True.',
'',
'True or False.'],
"io_graph_speeds" : [
'Set top speeds for the io graphs.',
'',
'Manually set which speed in MiB/s that equals',
'100 percent in the io graphs.',
'(10 MiB/s by default).',
'',
'Format: "device:speed" seperate disks with a',
'comma ",".',
'',
'Example: "/dev/sda:100, /dev/sdb:20".'],
"show_swap" : [
'If swap memory should be shown in memory box.',
'',
'True or False.'],
"swap_disk" : [
'Show swap as a disk.',
'',
'Ignores show_swap value above.',
'Inserts itself after first disk.'],
"only_physical" : [
'Filter out non physical disks.',
'',
'Set this to False to include network disks,',
'RAM disks and similar.',
'',
'True or False.'],
"use_fstab" : [
'Read disks list from /etc/fstab.',
'(Has no effect on macOS X)',
'',
'This also disables only_physical.',
'',
'True or False.'],
"disks_filter" : [
'Optional filter for shown disks.',
'',
'Should be full path of a mountpoint,',
'"root" replaces "/", separate multiple values',
'with a comma ",".',
'Begin line with "exclude=" to change to exclude',
'filter.',
'Oterwise defaults to "most include" filter.',
'',
'Example: disks_filter="exclude=/boot, /home/user"'],
},
"net" : {
"net_download" : [
'Fixed network graph download value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_upload" : [
'Fixed network graph upload value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_auto" : [
'Start in network graphs auto rescaling mode.',
'',
'Ignores any values set above at start and',
'rescales down to 10KibiBytes at the lowest.',
'',
'True or False.'],
"net_sync" : [
'Network scale sync.',
'',
'Syncs the scaling for download and upload to',
'whichever currently has the highest scale.',
'',
'True or False.'],
"net_color_fixed" : [
'Set network graphs color gradient to fixed.',
'',
'If True the network graphs color is based',
'on the total bandwidth usage instead of',
'the current autoscaling.',
'',
'The bandwidth usage is based on the',
'"net_download" and "net_upload" values set',
'above.'],
"net_iface" : [
'Network Interface.',
'',
'Manually set the starting Network Interface.',
'Will otherwise automatically choose the NIC',
'with the highest total download since boot.'],
},
"proc" : {
"proc_update_mult" : [
'Processes update multiplier.',
'Sets how often the process list is updated as',
'a multiplier of "update_ms".',
'',
'Set to 2 or higher to greatly decrease bpytop',
'cpu usage. (Only integers)'],
"proc_sorting" : [
'Processes sorting option.',
'',
'Possible values: "pid", "program", "arguments",',
'"threads", "user", "memory", "cpu lazy" and',
'"cpu responsive".',
'',
'"cpu lazy" updates top process over time,',
'"cpu responsive" updates top process directly.'],
"proc_reversed" : [
'Reverse processes sorting order.',
'',
'True or False.'],
"proc_tree" : [
'Processes tree view.',
'',
'Set true to show processes grouped by parents,',
'with lines drawn between parent and child',
'process.'],
"tree_depth" : [
'Process tree auto collapse depth.',
'',
'Sets the depth were the tree view will auto',
'collapse processes at.'],
"proc_colors" : [
'Enable colors in process view.',
'',
'Uses the cpu graph gradient colors.'],
"proc_gradient" : [
'Enable process view gradient fade.',
'',
'Fades from top or current selection.',
'Max fade value is equal to current themes',
'"inactive_fg" color value.'],
"proc_per_core" : [
'Process usage per core.',
'',
'If process cpu usage should be of the core',
'it\'s running on or usage of the total',
'available cpu power.',
'',
'If true and process is multithreaded',
'cpu usage can reach over 100%.'],
"proc_mem_bytes" : [
'Show memory as bytes in process list.',
' ',
'True or False.'],
}
}
loglevel_i: int = CONFIG.log_levels.index(CONFIG.log_level)
cpu_sensor_i: int = CONFIG.cpu_sensors.index(CONFIG.cpu_sensor)
cpu_graph_i: Dict[str, int] = { "cpu_graph_upper" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_upper),
"cpu_graph_lower" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_lower)}
temp_scale_i: int = CONFIG.temp_scales.index(CONFIG.temp_scale)
color_i: int
max_opt_len: int = max([len(categories[x]) for x in categories]) * 2
cat_list = list(categories)
while not cls.close:
key = ""
if cls.resized or change_cat:
cls.resized = change_cat = False
selected_cat = list(categories)[cat_int]
option_items = categories[cat_list[cat_int]]
option_len: int = len(option_items) * 2
y = 12 if Term.height < max_opt_len + 13 else Term.height // 2 - max_opt_len // 2 + 7
out_misc = (f'{Banner.draw(y-10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-38
x2 = x + 27
h, w, w2 = min(Term.height-1-y, option_len), 26, 50
h -= h % 2
color_i = list(Theme.themes).index(THEME.current)
out_misc += create_box(x, y - 3, w+w2+1, 3, f'tab{Symbol.right}', line_color=THEME.div_line)
out_misc += create_box(x, y, w, h+2, "options", line_color=THEME.div_line)
redraw = True
cat_width = floor((w+w2) / len(categories))
out_misc += f'{Fx.b}'
for cx, cat in enumerate(categories):
out_misc += f'{Mv.to(y-2, x + 1 + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 ))}'
if cat == selected_cat:
out_misc += f'{THEME.hi_fg}[{THEME.title}{Fx.u}{cat}{Fx.uu}{THEME.hi_fg}]'
else:
out_misc += f'{THEME.hi_fg}{SUPERSCRIPT[cx+1]}{THEME.title}{cat}'
out_misc += f'{Fx.ub}'
if option_len > h:
pages = ceil(option_len / h)
else:
h = option_len
pages = 0
page = pages if selected_int == -1 and pages > 0 else 1
selected_int = 0 if selected_int >= 0 else len(option_items) - 1
if redraw:
out = ""
cy = 0
selected = list(option_items)[selected_int]
if pages:
out += (f'{Mv.to(y+h+1, x+11)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
#out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, opt in enumerate(option_items):
if pages and n < (page - 1) * ceil(h / 2): continue
value = getattr(CONFIG, opt)
t_color = f'{THEME.selected_bg}{THEME.selected_fg}' if opt == selected else f'{THEME.title}'
v_color = "" if opt == selected else f'{THEME.title}'
d_quote = '"' if isinstance(value, str) else ""
if opt == "color_theme":
counter = f' {color_i + 1}/{len(Theme.themes)}'
elif opt == "proc_sorting":
counter = f' {CONFIG.sorting_options.index(CONFIG.proc_sorting) + 1}/{len(CONFIG.sorting_options)}'
elif opt == "log_level":
counter = f' {loglevel_i + 1}/{len(CONFIG.log_levels)}'
elif opt == "cpu_sensor":
counter = f' {cpu_sensor_i + 1}/{len(CONFIG.cpu_sensors)}'
elif opt in ["cpu_graph_upper", "cpu_graph_lower"]:
counter = f' {cpu_graph_i[opt] + 1}/{len(CONFIG.cpu_percent_fields)}'
elif opt == "temp_scale":
counter = f' {temp_scale_i + 1}/{len(CONFIG.temp_scales)}'
else:
counter = ""
out += f'{Mv.to(y+1+cy, x+1)}{t_color}{Fx.b}{opt.replace("_", " ").capitalize() + counter:^24.24}{Fx.ub}{Mv.to(y+2+cy, x+1)}{v_color}'
if opt == selected:
if isinstance(value, bool) or opt in ["color_theme", "proc_sorting", "log_level", "cpu_sensor", "cpu_graph_upper", "cpu_graph_lower", "temp_scale"]:
out += f'{t_color} {Symbol.left}{v_color}{d_quote + str(value) + d_quote:^20.20}{t_color}{Symbol.right} '
elif inputting:
out += f'{str(input_val)[-17:] + Fx.bl + "█" + Fx.ubl + "" + Symbol.enter:^33.33}'
else:
out += ((f'{t_color} {Symbol.left}{v_color}' if type(value) is int else " ") +
f'{str(value) + " " + Symbol.enter:^20.20}' + (f'{t_color}{Symbol.right} ' if type(value) is int else " "))
else:
out += f'{d_quote + str(value) + d_quote:^24.24}'
out += f'{Term.bg}'
if opt == selected:
h2 = len(option_items[opt]) + 2
y2 = y + (selected_int * 2) - ((page-1) * h)
if y2 + h2 > Term.height: y2 = Term.height - h2
out += f'{create_box(x2, y2, w2, h2, "description", line_color=THEME.div_line)}{THEME.main_fg}'
for n, desc in enumerate(option_items[opt]):
out += f'{Mv.to(y2+1+n, x2+2)}{desc:.48}'
cy += 2
if cy >= h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+1+cy+i, x+1)}{" " * (w-2)}'
if not skip or redraw:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
redraw = True
has_sel = False
if key == "mouse_click" and not inputting:
mx, my = Key.get_mouse()
if x < mx < x + w + w2 and y - 4 < my < y:
# if my == y - 2:
for cx, cat in enumerate(categories):
ccx = x + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 )
if ccx - 2 < mx < ccx + 2 + len(cat):
key = str(cx+1)
break
elif x < mx < x + w and y < my < y + h + 2:
mouse_sel = ceil((my - y) / 2) - 1 + ceil((page-1) * (h / 2))
if pages and my == y+h+1 and x+11 < mx < x+16:
key = "page_up"
elif pages and my == y+h+1 and x+19 < mx < x+24:
key = "page_down"
elif my == y+h+1:
pass
elif mouse_sel == selected_int:
if mx < x + 6:
key = "left"
elif mx > x + 19:
key = "right"
else:
key = "enter"
elif mouse_sel < len(option_items):
selected_int = mouse_sel
has_sel = True
else:
key = "escape"
if inputting:
if key in ["escape", "mouse_click"]:
inputting = False
elif key == "enter":
inputting = False
if str(getattr(CONFIG, selected)) != input_val:
if selected == "update_ms":
if not input_val or int(input_val) < 100:
CONFIG.update_ms = 100
elif int(input_val) > 86399900:
CONFIG.update_ms = 86399900
else:
CONFIG.update_ms = int(input_val)
elif selected == "proc_update_mult":
if not input_val or int(input_val) < 1:
CONFIG.proc_update_mult = 1
else:
CONFIG.proc_update_mult = int(input_val)
Collector.proc_counter = 1
elif selected == "tree_depth":
if not input_val or int(input_val) < 0:
CONFIG.tree_depth = 0
else:
CONFIG.tree_depth = int(input_val)
ProcCollector.collapsed = {}
elif selected == "shown_boxes":
new_boxes: List = []
for box in input_val.split():
if box in ["cpu", "mem", "net", "proc"]:
new_boxes.append(box)
CONFIG.shown_boxes = " ".join(new_boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
elif isinstance(getattr(CONFIG, selected), str):
setattr(CONFIG, selected, input_val)
if selected.startswith("net_"):
NetCollector.net_min = {"download" : -1, "upload" : -1}
elif selected == "draw_clock":
Box.clock_on = len(CONFIG.draw_clock) > 0
if not Box.clock_on: Draw.clear("clock", saved=True)
elif selected == "io_graph_speeds":
MemBox.graph_speeds = {}
Term.refresh(force=True)
cls.resized = False
elif key == "backspace" and len(input_val):
input_val = input_val[:-1]
elif key == "delete":
input_val = ""
elif isinstance(getattr(CONFIG, selected), str) and len(key) == 1:
input_val += key
elif isinstance(getattr(CONFIG, selected), int) and key.isdigit():
input_val += key
elif key == "q":
clean_quit()
elif key in ["escape", "o", "M", "f2"]:
cls.close = True
break
elif key == "tab" or (key == "down" and selected_int == len(option_items) - 1 and (page == pages or pages == 0)):
if cat_int == len(categories) - 1:
cat_int = 0
else:
cat_int += 1
change_cat = True
elif key == "shift_tab" or (key == "up" and selected_int == 0 and page == 1):
if cat_int == 0:
cat_int = len(categories) - 1
else:
cat_int -= 1
change_cat = True
selected_int = -1 if key != "shift_tab" else 0
elif key in list(map(str, range(1, len(cat_list)+1))) and key != str(cat_int + 1):
cat_int = int(key) - 1
change_cat = True
elif key == "enter" and selected in ["update_ms", "disks_filter", "custom_cpu_name", "net_download",
"net_upload", "draw_clock", "tree_depth", "proc_update_mult", "shown_boxes", "net_iface", "io_graph_speeds"]:
inputting = True
input_val = str(getattr(CONFIG, selected))
elif key == "left" and selected == "update_ms" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key == "right" and selected == "update_ms" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "left" and selected == "proc_update_mult" and CONFIG.proc_update_mult > 1:
CONFIG.proc_update_mult -= 1
Collector.proc_counter = 1
elif key == "right" and selected == "proc_update_mult":
CONFIG.proc_update_mult += 1
Collector.proc_counter = 1
elif key == "left" and selected == "tree_depth" and CONFIG.tree_depth > 0:
CONFIG.tree_depth -= 1
ProcCollector.collapsed = {}
elif key == "right" and selected == "tree_depth":
CONFIG.tree_depth += 1
ProcCollector.collapsed = {}
elif key in ["left", "right"] and isinstance(getattr(CONFIG, selected), bool):
setattr(CONFIG, selected, not getattr(CONFIG, selected))
if selected == "check_temp":
if CONFIG.check_temp:
CpuCollector.get_sensors()
else:
CpuCollector.sensor_method = ""
CpuCollector.got_sensors = False
if selected in ["net_auto", "net_color_fixed", "net_sync"]:
if selected == "net_auto": NetCollector.auto_min = CONFIG.net_auto
NetBox.redraw = True
if selected == "theme_background":
Term.bg = f'{THEME.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(Term.bg)
if selected == "show_battery":
Draw.clear("battery", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "color_theme" and len(Theme.themes) > 1:
if key == "left":
color_i -= 1
if color_i < 0: color_i = len(Theme.themes) - 1
elif key == "right":
color_i += 1
if color_i > len(Theme.themes) - 1: color_i = 0
Collector.collect_idle.wait()
CONFIG.color_theme = list(Theme.themes)[color_i]
THEME(CONFIG.color_theme)
Term.refresh(force=True)
Timer.finish()
elif key in ["left", "right"] and selected == "proc_sorting":
ProcCollector.sorting(key)
elif key in ["left", "right"] and selected == "log_level":
if key == "left":
loglevel_i -= 1
if loglevel_i < 0: loglevel_i = len(CONFIG.log_levels) - 1
elif key == "right":
loglevel_i += 1
if loglevel_i > len(CONFIG.log_levels) - 1: loglevel_i = 0
CONFIG.log_level = CONFIG.log_levels[loglevel_i]
errlog.setLevel(getattr(logging, CONFIG.log_level))
errlog.info(f'Loglevel set to {CONFIG.log_level}')
elif key in ["left", "right"] and selected in ["cpu_graph_upper", "cpu_graph_lower"]:
if key == "left":
cpu_graph_i[selected] -= 1
if cpu_graph_i[selected] < 0: cpu_graph_i[selected] = len(CONFIG.cpu_percent_fields) - 1
if key == "right":
cpu_graph_i[selected] += 1
if cpu_graph_i[selected] > len(CONFIG.cpu_percent_fields) - 1: cpu_graph_i[selected] = 0
setattr(CONFIG, selected, CONFIG.cpu_percent_fields[cpu_graph_i[selected]])
setattr(CpuCollector, selected.replace("_graph", ""), [])
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "temp_scale":
if key == "left":
temp_scale_i -= 1
if temp_scale_i < 0: temp_scale_i = len(CONFIG.temp_scales) - 1
if key == "right":
temp_scale_i += 1
if temp_scale_i > len(CONFIG.temp_scales) - 1: temp_scale_i = 0
CONFIG.temp_scale = CONFIG.temp_scales[temp_scale_i]
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "cpu_sensor" and len(CONFIG.cpu_sensors) > 1:
if key == "left":
cpu_sensor_i -= 1
if cpu_sensor_i < 0: cpu_sensor_i = len(CONFIG.cpu_sensors) - 1
elif key == "right":
cpu_sensor_i += 1
if cpu_sensor_i > len(CONFIG.cpu_sensors) - 1: cpu_sensor_i = 0
Collector.collect_idle.wait()
CpuCollector.sensor_swap = True
CONFIG.cpu_sensor = CONFIG.cpu_sensors[cpu_sensor_i]
if CONFIG.check_temp and (CpuCollector.sensor_method != "psutil" or CONFIG.cpu_sensor == "Auto"):
CpuCollector.get_sensors()
Term.refresh(force=True)
cls.resized = False
elif key in ["up", "mouse_scroll_up"]:
selected_int -= 1
if selected_int < 0: selected_int = len(option_items) - 1
page = floor(selected_int * 2 / h) + 1
elif key in ["down", "mouse_scroll_down"]:
selected_int += 1
if selected_int > len(option_items) - 1: selected_int = 0
page = floor(selected_int * 2 / h) + 1
elif key == "page_up":
if not pages or page == 1:
selected_int = 0
else:
page -= 1
if page < 1: page = pages
selected_int = (page-1) * ceil(h / 2)
elif key == "page_down":
if not pages or page == pages:
selected_int = len(option_items) - 1
else:
page += 1
if page > pages: page = 1
selected_int = (page-1) * ceil(h / 2)
elif has_sel:
pass
else:
redraw = False
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
class Timer:
timestamp: float
return_zero = False
@classmethod
def stamp(cls):
cls.timestamp = time()
@classmethod
def not_zero(cls) -> bool:
if cls.return_zero:
cls.return_zero = False
return False
return cls.timestamp + (CONFIG.update_ms / 1000) > time()
@classmethod
def left(cls) -> float:
return cls.timestamp + (CONFIG.update_ms / 1000) - time()
@classmethod
def finish(cls):
cls.return_zero = True
cls.timestamp = time() - (CONFIG.update_ms / 1000)
Key.break_wait()
class UpdateChecker:
version: str = VERSION
thread: threading.Thread
@classmethod
def run(cls):
cls.thread = threading.Thread(target=cls._checker)
cls.thread.start()
@classmethod
def _checker(cls):
try:
with urllib.request.urlopen("https://github.com/aristocratos/bpytop/raw/master/bpytop.py", timeout=5) as source: # type: ignore
for line in source:
line = line.decode("utf-8")
if line.startswith("VERSION: str ="):
cls.version = line[(line.index("=")+1):].strip('" \n')
break
except Exception as e:
errlog.exception(f'{e}')
else:
if cls.version != VERSION and which("notify-send"):
try:
subprocess.run(["notify-send", "-u", "normal", "BpyTop Update!",
f'New version of BpyTop available!\nCurrent version: {VERSION}\nNew version: {cls.version}\nDownload at github.com/aristocratos/bpytop',
"-i", "update-notifier", "-t", "10000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
errlog.exception(f'{e}')
class Init:
running: bool = True
initbg_colors: List[str] = []
initbg_data: List[int]
initbg_up: Graph
initbg_down: Graph
resized = False
@classmethod
def start(cls):
Draw.buffer("init", z=1)
Draw.buffer("initbg", z=10)
for i in range(51):
for _ in range(2): cls.initbg_colors.append(Color.fg(i, i, i))
Draw.buffer("banner", (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(11)}{Colors.black_bg}{Colors.default}'
f'{Fx.b}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}{Color.fg("#50")}'), z=2)
for _i in range(7):
perc = f'{str(round((_i + 1) * 14 + 2)) + "%":>5}'
Draw.buffer("+banner", f'{Mv.to(Term.height // 2 - 2 + _i, Term.width // 2 - 28)}{Fx.trans(perc)}{Symbol.v_line}')
Draw.out("banner")
Draw.buffer("+init!", f'{Color.fg("#cc")}{Fx.b}{Mv.to(Term.height // 2 - 2, Term.width // 2 - 21)}{Mv.save}')
cls.initbg_data = [randint(0, 100) for _ in range(Term.width * 2)]
cls.initbg_up = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=True)
cls.initbg_down = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=False)
@classmethod
def success(cls):
if not CONFIG.show_init or cls.resized: return
cls.draw_bg(5)
Draw.buffer("+init!", f'{Mv.restore}{Symbol.ok}\n{Mv.r(Term.width // 2 - 22)}{Mv.save}')
@staticmethod
def fail(err):
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Symbol.fail}')
sleep(2)
errlog.exception(f'{err}')
clean_quit(1, errmsg=f'Error during init! See {CONFIG_DIR}/error.log for more information.')
@classmethod
def draw_bg(cls, times: int = 5):
for _ in range(times):
sleep(0.05)
x = randint(0, 100)
Draw.buffer("initbg", f'{Fx.ub}{Mv.to(0, 0)}{cls.initbg_up(x)}{Mv.to(Term.height // 2, 0)}{cls.initbg_down(x)}')
Draw.out("initbg", "banner", "init")
@classmethod
def done(cls):
cls.running = False
if not CONFIG.show_init: return
if cls.resized:
Draw.now(Term.clear)
else:
cls.draw_bg(10)
Draw.clear("initbg", "banner", "init", saved=True)
if cls.resized: return
del cls.initbg_up, cls.initbg_down, cls.initbg_data, cls.initbg_colors
#? Functions ------------------------------------------------------------------------------------->
def get_cpu_name() -> str:
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
nlist: List = []
command: str = ""
cmd_out: str = ""
rem_line: str = ""
if SYSTEM == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif SYSTEM == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif SYSTEM == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
try:
cmd_out = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
except:
pass
if rem_line:
for line in cmd_out.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = cmd_out
nlist = name.split(" ")
try:
if "Xeon" in name and "CPU" in name:
name = nlist[nlist.index("CPU")+(-1 if name.endswith(("CPU", "z")) else 1)]
elif "Ryzen" in name:
name = " ".join(nlist[nlist.index("Ryzen"):nlist.index("Ryzen")+3])
elif "Duo" in name and "@" in name:
name = " ".join(nlist[:nlist.index("@")])
elif "CPU" in name and not nlist[0] == "CPU" and not nlist[nlist.index("CPU")-1].isdigit():
name = nlist[nlist.index("CPU")-1]
except:
pass
name = name.replace("Processor", "").replace("CPU", "").replace("(R)", "").replace("(TM)", "").replace("Intel", "")
name = re.sub(r"\d?\.?\d+[mMgG][hH][zZ]", "", name)
name = " ".join(name.split())
return name
def get_cpu_core_mapping() -> List[int]:
mapping: List[int] = []
core_ids: List[int] = []
if SYSTEM == "Linux" and os.path.isfile("/proc/cpuinfo"):
try:
mapping = [0] * THREADS
num = 0
with open("/proc/cpuinfo", "r") as f:
for line in f:
if line.startswith("processor"):
num = int(line.strip()[(line.index(": ")+2):])
if num > THREADS - 1:
break
elif line.startswith("core id"):
core_id = int(line.strip()[(line.index(": ")+2):])
if core_id not in core_ids:
core_ids.append(core_id)
mapping[num] = core_ids.index(core_id)
if num < THREADS - 1:
raise Exception
except:
mapping = []
if not mapping:
mapping = []
for _ in range(THREADS // CORES):
mapping.extend([x for x in range(CORES)])
return mapping
def create_box(x: int = 0, y: int = 0, width: int = 0, height: int = 0, title: str = "", title2: str = "", line_color: Color = None, title_color: Color = None, fill: bool = True, box = None) -> str:
'''Create a box from a box object or by given arguments'''
out: str = f'{Term.fg}{Term.bg}'
num: int = 0
if not line_color: line_color = THEME.div_line
if not title_color: title_color = THEME.title
#* Get values from box class if given
if box:
x = box.x
y = box.y
width = box.width
height = box.height
title = box.name
num = box.num
hlines: Tuple[int, int] = (y, y + height - 1)
out += f'{line_color}'
#* Draw all horizontal lines
for hpos in hlines:
out += f'{Mv.to(hpos, x)}{Symbol.h_line * (width - 1)}'
#* Draw all vertical lines and fill if enabled
for hpos in range(hlines[0]+1, hlines[1]):
out += f'{Mv.to(hpos, x)}{Symbol.v_line}{" " * (width-2) if fill else Mv.r(width-2)}{Symbol.v_line}'
#* Draw corners
out += f'{Mv.to(y, x)}{Symbol.left_up}\
{Mv.to(y, x + width - 1)}{Symbol.right_up}\
{Mv.to(y + height - 1, x)}{Symbol.left_down}\
{Mv.to(y + height - 1, x + width - 1)}{Symbol.right_down}'
#* Draw titles if enabled
if title:
numbered: str = "" if not num else f'{THEME.hi_fg(SUPERSCRIPT[num])}'
out += f'{Mv.to(y, x + 2)}{Symbol.title_left}{Fx.b}{numbered}{title_color}{title}{Fx.ub}{line_color}{Symbol.title_right}'
if title2:
out += f'{Mv.to(hlines[1], x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title2}{Fx.ub}{line_color}{Symbol.title_right}'
return f'{out}{Term.fg}{Mv.to(y + 1, x + 1)}'
def now_sleeping(signum, frame):
"""Reset terminal settings and stop background input read before putting to sleep"""
Key.stop()
Collector.stop()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
os.kill(os.getpid(), signal.SIGSTOP)
def now_awake(signum, frame):
"""Set terminal settings and restart background input read"""
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Key.start()
Term.refresh()
Box.calc_sizes()
Box.draw_bg()
Collector.start()
def quit_sigint(signum, frame):
"""SIGINT redirection to clean_quit()"""
clean_quit()
def clean_quit(errcode: int = 0, errmsg: str = "", thread: bool = False):
"""Stop background input read, save current config and reset terminal settings before quitting"""
global THREAD_ERROR
if thread:
THREAD_ERROR = errcode
interrupt_main()
return
if THREAD_ERROR: errcode = THREAD_ERROR
Key.stop()
Collector.stop()
if not errcode: CONFIG.save_config()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
if errcode == 0:
errlog.info(f'Exiting. Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
else:
errlog.warning(f'Exiting with errorcode ({errcode}). Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
if not errmsg: errmsg = f'Bpytop exited with errorcode ({errcode}). See {CONFIG_DIR}/error.log for more information!'
if errmsg: print(errmsg)
raise SystemExit(errcode)
def floating_humanizer(value: Union[float, int], bit: bool = False, per_second: bool = False, start: int = 0, short: bool = False) -> str:
'''Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed
* bit=True or defaults to bytes
* start=int to set 1024 multiplier starting unit
* short=True always returns 0 decimals and shortens unit to 1 character
'''
out: str = ""
mult: int = 8 if bit else 1
selector: int = start
unit: Tuple[str, ...] = UNITS["bit"] if bit else UNITS["byte"]
if isinstance(value, float): value = round(value * 100 * mult)
elif value > 0: value *= 100 * mult
else: value = 0
while len(f'{value}') > 5 and value >= 102400:
value >>= 10
if value < 100:
out = f'{value}'
break
selector += 1
else:
if len(f'{value}') == 4 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2]
elif len(f'{value}') == 3 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2:]
elif len(f'{value}') >= 2:
out = f'{value}'[:-2]
else:
out = f'{value}'
if short:
if "." in out:
out = f'{round(float(out))}'
if len(out) > 3:
out = f'{int(out[0]) + 1}'
selector += 1
out += f'{"" if short else " "}{unit[selector][0] if short else unit[selector]}'
if per_second: out += "ps" if bit else "/s"
return out
def units_to_bytes(value: str) -> int:
if not value: return 0
out: int = 0
mult: int = 0
bit: bool = False
value_i: int = 0
units: Dict[str, int] = {"k" : 1, "m" : 2, "g" : 3}
try:
if value.lower().endswith("s"):
value = value[:-1]
if value.lower().endswith("bit"):
bit = True
value = value[:-3]
elif value.lower().endswith("byte"):
value = value[:-4]
if value[-1].lower() in units:
mult = units[value[-1].lower()]
value = value[:-1]
if "." in value and value.replace(".", "").isdigit():
if mult > 0:
value_i = round(float(value) * 1024)
mult -= 1
else:
value_i = round(float(value))
elif value.isdigit():
value_i = int(value)
out = int(value_i) << (10 * mult)
if bit: out = round(out / 8)
except ValueError:
out = 0
return out
def min_max(value: int, min_value: int=0, max_value: int=100) -> int:
return max(min_value, min(value, max_value))
def readfile(file: str, default: str = "") -> str:
out: Union[str, None] = None
if os.path.isfile(file):
try:
with open(file, "r") as f:
out = f.read().strip()
except:
pass
return default if out is None else out
def temperature(value: int, scale: str = "celsius") -> Tuple[int, str]:
"""Returns a tuple with integer value and string unit converted from an integer in celsius to: celsius, fahrenheit, kelvin or rankine."""
if scale == "celsius":
return (value, "°C")
elif scale == "fahrenheit":
return (round(value * 1.8 + 32), "°F")
elif scale == "kelvin":
return (round(value + 273.15), "°K")
elif scale == "rankine":
return (round(value * 1.8 + 491.67), "°R")
else:
return (0, "")
def process_keys():
mouse_pos: Tuple[int, int] = (0, 0)
filtered: bool = False
box_keys = {"1" : "cpu", "2" : "mem", "3" : "net", "4" : "proc"}
while Key.has_key():
key = Key.get()
found: bool = True
if key in ["mouse_scroll_up", "mouse_scroll_down", "mouse_click"]:
mouse_pos = Key.get_mouse()
if mouse_pos[0] >= ProcBox.x and ProcBox.current_y + 1 <= mouse_pos[1] < ProcBox.current_y + ProcBox.current_h - 1:
pass
elif key == "mouse_click":
key = "mouse_unselect"
else:
key = "_null"
if ProcBox.filtering:
if key in ["enter", "mouse_click", "mouse_unselect"]:
ProcBox.filtering = False
Collector.collect(ProcCollector, redraw=True, only_draw=True)
continue
elif key in ["escape", "delete"]:
ProcCollector.search_filter = ""
ProcBox.filtering = False
elif len(key) == 1:
ProcCollector.search_filter += key
elif key == "backspace" and len(ProcCollector.search_filter) > 0:
ProcCollector.search_filter = ProcCollector.search_filter[:-1]
else:
continue
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
if filtered: Collector.collect_done.wait(0.1)
filtered = True
continue
if key == "_null":
continue
elif key == "q":
clean_quit()
elif key == "+" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "-" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key in ["M", "escape"]:
Menu.main()
elif key in ["o", "f2"]:
Menu.options()
elif key in ["H", "f1"]:
Menu.help()
elif key == "m":
if list(Box.view_modes).index(Box.view_mode) + 1 > len(list(Box.view_modes)) - 1:
Box.view_mode = list(Box.view_modes)[0]
else:
Box.view_mode = list(Box.view_modes)[(list(Box.view_modes).index(Box.view_mode) + 1)]
CONFIG.shown_boxes = " ".join(Box.view_modes[Box.view_mode])
Draw.clear(saved=True)
Term.refresh(force=True)
elif key in box_keys:
boxes = CONFIG.shown_boxes.split()
if box_keys[key] in boxes:
boxes.remove(box_keys[key])
else:
boxes.append(box_keys[key])
CONFIG.shown_boxes = " ".join(boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
Term.refresh(force=True)
else:
found = False
if found: continue
if "proc" in Box.boxes:
if key in ["left", "right", "h", "l"]:
ProcCollector.sorting(key)
elif key == " " and CONFIG.proc_tree and ProcBox.selected > 0:
if ProcBox.selected_pid in ProcCollector.collapsed:
ProcCollector.collapsed[ProcBox.selected_pid] = not ProcCollector.collapsed[ProcBox.selected_pid]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "e":
CONFIG.proc_tree = not CONFIG.proc_tree
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "r":
CONFIG.proc_reversed = not CONFIG.proc_reversed
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "c":
CONFIG.proc_per_core = not CONFIG.proc_per_core
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key in ["f", "F"]:
ProcBox.filtering = True
ProcCollector.case_sensitive = key == "F"
if not ProcCollector.search_filter: ProcBox.start = 0
Collector.collect(ProcCollector, redraw=True, only_draw=True)
elif key in ["T", "K", "I"] and (ProcBox.selected > 0 or ProcCollector.detailed):
pid: int = ProcBox.selected_pid if ProcBox.selected > 0 else ProcCollector.detailed_pid # type: ignore
if psutil.pid_exists(pid):
if key == "T": sig = signal.SIGTERM
elif key == "K": sig = signal.SIGKILL
elif key == "I": sig = signal.SIGINT
try:
os.kill(pid, sig)
except Exception as e:
errlog.error(f'Exception when sending signal {sig} to pid {pid}')
errlog.exception(f'{e}')
elif key == "delete" and ProcCollector.search_filter:
ProcCollector.search_filter = ""
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key == "enter":
if ProcBox.selected > 0 and ProcCollector.detailed_pid != ProcBox.selected_pid and psutil.pid_exists(ProcBox.selected_pid):
ProcCollector.detailed = True
ProcBox.last_selection = ProcBox.selected
ProcBox.selected = 0
ProcCollector.detailed_pid = ProcBox.selected_pid
ProcBox.resized = True
Collector.proc_counter = 1
elif ProcCollector.detailed:
ProcBox.selected = ProcBox.last_selection
ProcBox.last_selection = 0
ProcCollector.detailed = False
ProcCollector.detailed_pid = None
ProcBox.resized = True
Collector.proc_counter = 1
else:
continue
ProcCollector.details = {}
ProcCollector.details_cpu = []
ProcCollector.details_mem = []
Graphs.detailed_cpu = NotImplemented
Graphs.detailed_mem = NotImplemented
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key in ["up", "down", "mouse_scroll_up", "mouse_scroll_down", "page_up", "page_down", "home", "end", "mouse_click", "mouse_unselect", "j", "k"]:
ProcBox.selector(key, mouse_pos)
if "net" in Box.boxes:
if key in ["b", "n"]:
NetCollector.switch(key)
elif key == "z":
NetCollector.reset = not NetCollector.reset
Collector.collect(NetCollector, redraw=True)
elif key == "y":
CONFIG.net_sync = not CONFIG.net_sync
Collector.collect(NetCollector, redraw=True)
elif key == "a":
NetCollector.auto_min = not NetCollector.auto_min
NetCollector.net_min = {"download" : -1, "upload" : -1}
Collector.collect(NetCollector, redraw=True)
if "mem" in Box.boxes:
if key == "g":
CONFIG.mem_graphs = not CONFIG.mem_graphs
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "s":
Collector.collect_idle.wait()
CONFIG.swap_disk = not CONFIG.swap_disk
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "d":
Collector.collect_idle.wait()
CONFIG.show_disks = not CONFIG.show_disks
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "i":
Collector.collect_idle.wait()
CONFIG.io_mode = not CONFIG.io_mode
Collector.collect(MemCollector, interrupt=True, redraw=True)
#? Pre main -------------------------------------------------------------------------------------->
CPU_NAME: str = get_cpu_name()
CORE_MAP: List[int] = get_cpu_core_mapping()
THEME: Theme
def main():
global THEME
Term.width = os.get_terminal_size().columns
Term.height = os.get_terminal_size().lines
#? Init -------------------------------------------------------------------------------------->
if DEBUG: TimeIt.start("Init")
#? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
#Term.refresh(force=True)
#? Start a thread checking for updates while running init
if CONFIG.update_check: UpdateChecker.run()
#? Draw banner and init status
if CONFIG.show_init and not Init.resized:
Init.start()
#? Load theme
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Loading theme and creating colors... ")}{Mv.save}')
try:
THEME = Theme(CONFIG.color_theme)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup boxes
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Doing some maths and drawing... ")}{Mv.save}')
try:
if CONFIG.check_temp: CpuCollector.get_sensors()
Box.calc_sizes()
Box.draw_bg(now=False)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Setting up signal handlers... ")}{Mv.save}')
try:
signal.signal(signal.SIGTSTP, now_sleeping) #* Ctrl-Z
signal.signal(signal.SIGCONT, now_awake) #* Resume
signal.signal(signal.SIGINT, quit_sigint) #* Ctrl-C
signal.signal(signal.SIGWINCH, Term.refresh) #* Terminal resized
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for reading keyboard input
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting input reader thread... ")}{Mv.save}')
try:
if isinstance(sys.stdin, io.TextIOWrapper) and sys.version_info >= (3, 7):
sys.stdin.reconfigure(errors="ignore") # type: ignore
Key.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for data collection and drawing
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting data collection and drawer thread... ")}{Mv.save}')
try:
Collector.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Collect data and draw to buffer
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Collecting data and drawing... ")}{Mv.save}')
try:
Collector.collect(draw_now=False)
pass
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Draw to screen
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Finishing up... ")}{Mv.save}')
try:
Collector.collect_done.wait()
except Exception as e:
Init.fail(e)
else:
Init.success()
Init.done()
Term.refresh()
Draw.out(clear=True)
if CONFIG.draw_clock:
Box.clock_on = True
if DEBUG: TimeIt.stop("Init")
#? Main loop ------------------------------------------------------------------------------------->
def run():
while not False:
Term.refresh()
Timer.stamp()
while Timer.not_zero():
if Key.input_wait(Timer.left()):
process_keys()
Collector.collect()
#? Start main loop
try:
run()
except Exception as e:
errlog.exception(f'{e}')
clean_quit(1)
else:
#? Quit cleanly even if false starts being true...
clean_quit()
if __name__ == "__main__":
main()
|
en
| 0.622187
|
#!/usr/bin/env python3 # pylint: disable=not-callable, no-member, unsubscriptable-object # indent = tab # tab-size = 4 # Copyright 2020 Aristocratos (<EMAIL>) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore #? Argument parser -------------------------------------------------------------------------------> #? Variables -------------------------------------------------------------------------------------> #*?This is the template used to create the config file #* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme. #* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai" color_theme="$color_theme" #* If the theme set background should be shown, set to False if you want terminal background transparency theme_background=$theme_background #* Sets if 24-bit truecolor should be used, will convert 24-bit colors to 256 color (6x6x6 color cube) if false. truecolor=$truecolor #* Manually set which boxes to show. Available values are "cpu mem net proc", seperate values with whitespace. shown_boxes="$shown_boxes" #* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs. update_ms=$update_ms #* Processes update multiplier, sets how often the process list is updated as a multiplier of "update_ms". #* Set to 2 or higher to greatly decrease bpytop cpu usage. (Only integers) proc_update_mult=$proc_update_mult #* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive", #* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly. proc_sorting="$proc_sorting" #* Reverse sorting order, True or False. proc_reversed=$proc_reversed #* Show processes as a tree proc_tree=$proc_tree #* Which depth the tree view should auto collapse processes at tree_depth=$tree_depth #* Use the cpu graph colors in the process list. proc_colors=$proc_colors #* Use a darkening gradient in the process list. proc_gradient=$proc_gradient #* If process cpu usage should be of the core it's running on or usage of the total available cpu power. proc_per_core=$proc_per_core #* Show process memory as bytes instead of percent proc_mem_bytes=$proc_mem_bytes #* Sets the CPU stat shown in upper half of the CPU graph, "total" is always available, see: #* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms. #* Select from a list of detected attributes from the options menu cpu_graph_upper="$cpu_graph_upper" #* Sets the CPU stat shown in lower half of the CPU graph, "total" is always available, see: #* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms. #* Select from a list of detected attributes from the options menu cpu_graph_lower="$cpu_graph_lower" #* Toggles if the lower CPU graph should be inverted. cpu_invert_lower=$cpu_invert_lower #* Set to True to completely disable the lower CPU graph. cpu_single_graph=$cpu_single_graph #* Shows the system uptime in the CPU box. show_uptime=$show_uptime #* Check cpu temperature, needs "osx-cpu-temp" on MacOS X. check_temp=$check_temp #* Which sensor to use for cpu temperature, use options menu to select from list of available sensors. cpu_sensor=$cpu_sensor #* Show temperatures for cpu cores also if check_temp is True and sensors has been found show_coretemp=$show_coretemp #* Which temperature scale to use, available values: "celsius", "fahrenheit", "kelvin" and "rankine" temp_scale="$temp_scale" #* Draw a clock at top of screen, formatting according to strftime, empty string to disable. draw_clock="$draw_clock" #* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort. background_update=$background_update #* Custom cpu model name, empty string to disable. custom_cpu_name="$custom_cpu_name" #* Optional filter for shown disks, should be full path of a mountpoint, separate multiple values with a comma ",". #* Begin line with "exclude=" to change to exclude filter, oterwise defaults to "most include" filter. Example: disks_filter="exclude=/boot, /home/user" disks_filter="$disks_filter" #* Show graphs instead of meters for memory values. mem_graphs=$mem_graphs #* If swap memory should be shown in memory box. show_swap=$show_swap #* Show swap as a disk, ignores show_swap value above, inserts itself after first disk. swap_disk=$swap_disk #* If mem box should be split to also show disks info. show_disks=$show_disks #* Filter out non physical disks. Set this to False to include network disks, RAM disks and similar. only_physical=$only_physical #* Read disks list from /etc/fstab. This also disables only_physical. use_fstab=$use_fstab #* Toggles if io stats should be shown in regular disk usage view show_io_stat=$show_io_stat #* Toggles io mode for disks, showing only big graphs for disk read/write speeds. io_mode=$io_mode #* Set to True to show combined read/write io graphs in io mode. io_graph_combined=$io_graph_combined #* Set the top speed for the io graphs in MiB/s (10 by default), use format "device:speed" seperate disks with a comma ",". #* Example: "/dev/sda:100, /dev/sdb:20" io_graph_speeds="$io_graph_speeds" #* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit" net_download="$net_download" net_upload="$net_upload" #* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest. net_auto=$net_auto #* Sync the scaling for download and upload to whichever currently has the highest scale net_sync=$net_sync #* If the network graphs color gradient should scale to bandwith usage or auto scale, bandwith usage is based on "net_download" and "net_upload" values net_color_fixed=$net_color_fixed #* Starts with the Network Interface specified here. net_iface="$net_iface" #* Show battery stats in top right if battery is present show_battery=$show_battery #* Show init screen at startup, the init screen is purely cosmetical show_init=$show_init #* Enable check for new version from github.com/aristocratos/bpytop at start. update_check=$update_check #* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG". #* The level set includes all lower levels, i.e. "DEBUG" will show all logging info. log_level=$log_level #? Units for floating_humanizer function #? Setup error logger ----------------------------------------------------------------> #? Timers for testing and debugging --------------------------------------------------------------> #? Set up config class and load config -----------------------------------------------------------> Holds all config variables and functions for loading from and saving to disk Load config from file, set correct types for values and return a dict # type: ignore #type: ignore Save current config to config file if difference in values or version, creates a new file if not found #? Classes ---------------------------------------------------------------------------------------> Terminal info and commands #* Default foreground color #* Default background color #* Hide terminal cursor #* Show terminal cursor #* Switch to alternate screen #* Switch to normal screen #* Clear screen and set cursor to position 0,0 #* Enable reporting of mouse position on click and release #* Disable mouse reporting #* Enable reporting of mouse position at any movement #* Disable direct mouse reporting Update width, height and set resized flag if terminal has been resized Toggle input echo # type: ignore # type: ignore Text effects * trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace. * uncolor(string: str) : Removes all 24-bit color and returns string . #* Escape sequence start #* Escape sequence separator #* Escape sequence end #* Reset foreground/background color and text effects #* Bold on #* Bold off #* Dark on #* Dark off #* Italic on #* Italic off #* Underline on #* Underline off #* Blink on #* Blink off #* Strike / crossed-out on #* Strike / crossed-out off #* Precompiled regex for finding a 24-bit color escape sequence in a string Set raw input mode for device Set nonblocking mode for device Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore() #* Move cursor to line, column #* Move cursor right x columns #* Move cursor left x columns #* Move cursor up x lines #* Move cursor down x lines #* Save cursor position #* Restore saved cursor postion Handles the threaded input reader for keypresses and mouse events Returns True if key is detected else waits out timer and returns False Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread. #* Wait 100ms for input on stdin then restart loop to check for stop flag #* Read 1 key safely with blocking on #* If first character is a escape sequence keep reading #* Report IO block in progress to prevent Draw functions from getting a IO Block error #* Wait for Draw function to finish if busy #* Set non blocking to prevent read stall #* Report IO blocking done #errlog.debug(f'{repr(input_key)}') #* Key is "escape" key if only containing \033 #* Detected mouse event #* Detected mouse move in mouse direct mode #* Detected mouse scroll up #* Detected mouse scroll down #* Detected mouse click release #* Check if mouse position is clickable #* Clean up "\" to not return escaped #* Go trough dict of escape codes to get the cleaned key name #* If not found in escape dict and length of key is 1, assume regular character #* Store up to 10 keys in input queue for later processing #* Set threading event to interrupt main thread sleep Holds the draw buffer and manages IO blocking queue * .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer * - Adding "+" prefix to name sets append to True and appends to name's current string * - Adding "!" suffix to name sets now to True and print name's current string * .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after * .now(*args) : Prints all arguments as a string * .clear(*names) : Clear named buffers, all if no argument * .last_screen() : Prints all saved buffers Wait for input reader and self to be idle then print to screen #type: ignore #type: ignore #type: ignore Holds representations for a 24-bit color value __init__(color, depth="fg", default=False) -- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string. -- depth accepts "fg" or "bg" __call__(*args) joins str arguments to a string and apply color __str__ returns escape sequence to set color __iter__ returns iteration over red, green and blue in integer values of 0-255. * Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str #type: ignore Returns escape sequence to set color * accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF" * or decimal RGB: r=0-255, g=0-255, b=0-255 * depth="fg" or "bg" Standard colors for menus and dialogs __init__ accepts a dict containing { "color_element" : "color" } #if CONFIG.color_theme != theme: CONFIG.color_theme = theme #* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict #* Create color gradients from one, two or three colors, 101 values indexed 0-100 #* Set terminal colors Sets themes dict with names and paths to all found themes Load a bashtop formatted theme file and return a dict Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False]) Class for creating and adding to graphs * __str__ : returns graph as a string * add(value: int) : adds a value to graph and returns it as a string * __call__ : same as add #* Convert values to percentage values of max_value with max_value as ceiling #* Calculate colors of graph #* If the size of given data set is bigger then width of graph, shrink data set #* If the size of given data set is smaller then width of graph, fill graph with whitespace #* Create the graph #* Switch between True and False graphs # type: ignore Holds all graphs and lists of graphs for dynamically created graphs Creates a percentage meter __init__(value, width, theme, gradient_name) to create new meter __call__(value) to set value and return meter as a string __str__ returns last set meter as a string Box class with all needed attributes for create_box() function Calculate sizes of boxes # type: ignore # type: ignore Draw all boxes outlines and titles # type: ignore #THREADS = 64 #* Mem #* Swap #* Disks #* Buttons and titles only redrawn if needed #* Processes labels #* Detailed box draw #* Checking for selection out of bounds #* Start iteration over all processes and info #* Creates one line for a process with all gathered information #* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates #* Draw scrollbar if needed #* Draw current selection and number of processes #* Clean up dead processes graphs and counters Data collector master class * .start(): Starts collector thread * .stop(): Stops collector thread * .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run This is meant to run in it's own thread, collecting and drawing when collect_run is set Setup collect queue for _runner Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps Check if we can get cpu temps and return method of getting temps Collects memory and disks information #* Collect memory #* Collect swap #* Collect disks usage #* Collect disk io #type: ignore #type: ignore Collects network stats #min_top: int = 10<<10 #* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool #* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str Get a list of all network devices sorted by highest throughput #* Calculate current speed Collects process stats List all processess with pid, name, arguments, threads, username, memory percent and cpu percent # type: ignore List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent Holds all menus #out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}' # if my == y - 2: # type: ignore #? Functions -------------------------------------------------------------------------------------> Fetch a suitable CPU identifier from the CPU model name string Create a box from a box object or by given arguments #* Get values from box class if given #* Draw all horizontal lines #* Draw all vertical lines and fill if enabled #* Draw corners #* Draw titles if enabled Reset terminal settings and stop background input read before putting to sleep Set terminal settings and restart background input read SIGINT redirection to clean_quit() Stop background input read, save current config and reset terminal settings before quitting Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed * bit=True or defaults to bytes * start=int to set 1024 multiplier starting unit * short=True always returns 0 decimals and shortens unit to 1 character Returns a tuple with integer value and string unit converted from an integer in celsius to: celsius, fahrenheit, kelvin or rankine. # type: ignore #? Pre main --------------------------------------------------------------------------------------> #? Init --------------------------------------------------------------------------------------> #? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo #Term.refresh(force=True) #? Start a thread checking for updates while running init #? Draw banner and init status #? Load theme #? Setup boxes #? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH #* Ctrl-Z #* Resume #* Ctrl-C #* Terminal resized #? Start a separate thread for reading keyboard input # type: ignore #? Start a separate thread for data collection and drawing #? Collect data and draw to buffer #? Draw to screen #? Main loop -------------------------------------------------------------------------------------> #? Start main loop #? Quit cleanly even if false starts being true...
| 2.016348
| 2
|
autoindex.py
|
langsci/152
| 0
|
6628798
|
<reponame>langsci/152<filename>autoindex.py
#!/usr/bin/python3
import glob
import re
lgs=open("locallanguages.txt").read().split('\n')
terms=open("localsubjectterms.txt").read().split('\n')[::-1]#reverse to avoid double indexing
print("found %i language names for autoindexing" % len(lgs))
print("found %i subject terms for autoindexing" % len(terms))
files = glob.glob('chapters/*tex')
SUBJECTP = re.compile
for f in files:
print("indexing %s" % f)
#strip preamble of edited volume chapters to avoid indexing there
a = open(f).read().split(r"\begin{document}")
content = a[-1]
preamble = ''
joiner = ''
if len(a) == 2:
preamble = a[0]
joiner = r"\begin{document}"
lines = content.split('\n')
excluders = ("section","caption","chapter","langinfo")
newlines = []
for line in lines:
included = True
for excluder in excluders:
if "%s{"%excluder in line:
included = False
print line
if included:
for lg in lgs:
lg = lg.strip()
if lg == '':
continue
line = re.sub('(?<!ili{)%s(?![\w}])'%lg, '\ili{%s}'%lg, line)
for term in terms:
term = term.strip()
if term == '':
continue
line = re.sub('(?<!isi{|...[A-Za-z])%s(?![-_\w}])'%term, '\isi{%s}'%term, line)
newlines.append(line)
content = "\n".join(newlines)
nlg = len(re.findall('\\ili{',content))
nt = len(re.findall('\\isi{',content))
outfile = open(f.replace('chapters','indexed'), 'w')
outfile.write(preamble)
outfile.write(joiner)
outfile.write(content)
outfile.close()
print(" %s now contains %i indexed languages and %i indexed subject terms"%(f.split('/')[-1],nlg,nt))
print("indexed files are in the folder 'indexed'")
|
#!/usr/bin/python3
import glob
import re
lgs=open("locallanguages.txt").read().split('\n')
terms=open("localsubjectterms.txt").read().split('\n')[::-1]#reverse to avoid double indexing
print("found %i language names for autoindexing" % len(lgs))
print("found %i subject terms for autoindexing" % len(terms))
files = glob.glob('chapters/*tex')
SUBJECTP = re.compile
for f in files:
print("indexing %s" % f)
#strip preamble of edited volume chapters to avoid indexing there
a = open(f).read().split(r"\begin{document}")
content = a[-1]
preamble = ''
joiner = ''
if len(a) == 2:
preamble = a[0]
joiner = r"\begin{document}"
lines = content.split('\n')
excluders = ("section","caption","chapter","langinfo")
newlines = []
for line in lines:
included = True
for excluder in excluders:
if "%s{"%excluder in line:
included = False
print line
if included:
for lg in lgs:
lg = lg.strip()
if lg == '':
continue
line = re.sub('(?<!ili{)%s(?![\w}])'%lg, '\ili{%s}'%lg, line)
for term in terms:
term = term.strip()
if term == '':
continue
line = re.sub('(?<!isi{|...[A-Za-z])%s(?![-_\w}])'%term, '\isi{%s}'%term, line)
newlines.append(line)
content = "\n".join(newlines)
nlg = len(re.findall('\\ili{',content))
nt = len(re.findall('\\isi{',content))
outfile = open(f.replace('chapters','indexed'), 'w')
outfile.write(preamble)
outfile.write(joiner)
outfile.write(content)
outfile.close()
print(" %s now contains %i indexed languages and %i indexed subject terms"%(f.split('/')[-1],nlg,nt))
print("indexed files are in the folder 'indexed'")
|
en
| 0.647531
|
#!/usr/bin/python3 #reverse to avoid double indexing #strip preamble of edited volume chapters to avoid indexing there
| 2.562613
| 3
|
src/an_SumArea.py
|
mbonnema/SWAV
| 0
|
6628799
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 8 11:37:11 2021
@author: mbonnema
"""
import numpy as np
def SumArea(A_int,D_int):
A_total = 0
Dates = 0
for key in A_int:
Ai = np.reshape(A_int[key],[37,1])
A_total = A_total + Ai
if len(D_int[key]) == 37:
Dates = D_int[key]
Asum = []
if hasattr(A_total, "__len__"):
for a in A_total:
try:
Asum.append(a[0][0])
except:
Asum.append(a)
else:
Asum = 0
return Asum,Dates
def SumAreaSq(A_int,D_int):
A_total = 0
Dates = 0
for key in A_int:
Ai = np.reshape(A_int[key],[37,1])
Ai = np.square(Ai)
A_total = A_total + Ai
if len(D_int[key]) == 37:
Dates = D_int[key]
Asum = []
if hasattr(A_total, "__len__"):
for a in A_total:
try:
Asum.append(a[0][0])
except:
Asum.append(a)
else:
Asum = 0
return np.sqrt(Asum),Dates
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 8 11:37:11 2021
@author: mbonnema
"""
import numpy as np
def SumArea(A_int,D_int):
A_total = 0
Dates = 0
for key in A_int:
Ai = np.reshape(A_int[key],[37,1])
A_total = A_total + Ai
if len(D_int[key]) == 37:
Dates = D_int[key]
Asum = []
if hasattr(A_total, "__len__"):
for a in A_total:
try:
Asum.append(a[0][0])
except:
Asum.append(a)
else:
Asum = 0
return Asum,Dates
def SumAreaSq(A_int,D_int):
A_total = 0
Dates = 0
for key in A_int:
Ai = np.reshape(A_int[key],[37,1])
Ai = np.square(Ai)
A_total = A_total + Ai
if len(D_int[key]) == 37:
Dates = D_int[key]
Asum = []
if hasattr(A_total, "__len__"):
for a in A_total:
try:
Asum.append(a[0][0])
except:
Asum.append(a)
else:
Asum = 0
return np.sqrt(Asum),Dates
|
en
| 0.610808
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Wed Dec 8 11:37:11 2021 @author: mbonnema
| 3.445489
| 3
|
Text-Transformation/testing_framework.py
|
cam626/RPEye-Link-Analysis
| 2
|
6628800
|
from text_extract import TextExtractor
import ngrams
import json
import compare
import os
import unittest
import argparse
################################################
#
# UTILITY FUNCTIONS
#
################################################
'''
Given a path to an html file, open it and return its
contents as a string
'''
def openHtmlFileAsString(file_path):
with open(file_path, 'r') as infile:
input_string = infile.read()
return input_string
'''
Given a path to an txt file, open it and return its
contents as a list of words
'''
def openTxtFileAsList(file_path):
with open(file_path, 'r') as infile:
words = infile.read().split()
return words
'''
Given a path to an txt file, open it and return its
contents as a list of words
'''
def openTxtFileAsString(file_path):
with open(file_path, 'r') as infile:
words = infile.read()
return words
'''
Given a path to an json file, open it and return its
contents as a dict
'''
def openJsonFile(file_path):
with open(file_path) as data_file:
data = json.load(data_file)
return data
################################################
#
# TEXT EXTRACTOR TESTS
#
################################################
class TextExtractorTests(unittest.TestCase):
def test1(self):
test_file = 'example1'
print(" BEGINNING TEXT EXTRACTION TESTS")
print("Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test2(self):
test_file = 'example2'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test3(self):
test_file = 'example3'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test4(self):
test_file = 'example4'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test5(self):
test_file = 'example5'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test6(self):
test_file = 'example6'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test7(self):
test_file = 'example7'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test8(self):
test_file = 'example8'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test9(self):
test_file = 'example9'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
################################################
#
# NGRAM TESTS
#
################################################
class NGramTests(unittest.TestCase):
def test1(self):
print(" BEGINNING NGRAM TESTS")
test_file = 'example1'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test2(self):
test_file = 'example2'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test3(self):
test_file = 'example3'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test4(self):
test_file = 'example4'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test5(self):
test_file = 'example5'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test6(self):
test_file = 'example6'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
################################################
#
# TEXT EXTRACTOR TESTS
#
################################################
class EndToEnd(unittest.TestCase):
def test1(self):
test_file = 'example1'
print(" BEGINNING END TO END TESTS")
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test2(self):
test_file = 'example2'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test3(self):
test_file = 'example3'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test4(self):
test_file = 'example4'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test5(self):
test_file = 'example5'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test6(self):
test_file = 'example6'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test7(self):
test_file = 'example7'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
'''
Utility to generate all expected output files.
'''
def generate_expected_output():
if not os.path.exists('Examples'):
print("ERROR: please invoke this script one level below the 'Examples' directory")
sys.exit(1)
if not os.path.exists(os.path.join('Examples','expected_html_output')):
os.mkdir(os.path.join('Examples','expected_html_output'))
if not os.path.exists(os.path.join('Examples','expected_ngram_output')):
os.mkdir(os.path.join('Examples','expected_ngram_output'))
if not os.path.exists(os.path.join('Examples', 'expected_e2e_test_output')):
os.mkdir(os.path.join('Examples', 'expected_e2e_test_output'))
#TEXT EXTRACTION TESTS
#for all of the html files
for filename in os.listdir(os.path.join('Examples', 'html_files')):
file_path = os.path.join('Examples', 'html_files', filename)
#grab the contents
with open(file_path, 'r') as infile:
input_string = infile.read()
#extract them
te = TextExtractor(input_string)
#the output file name will be the input file name, but .txt instead of .html.
outfile_name = "{0}.txt".format(filename.split('.')[0])
#the output file goes to the expected_html output directory.
outfile_name = os.path.join('Examples','expected_html_output', outfile_name)
#dump the results into the output file.
with open(outfile_name, 'w') as outfile:
outfile.write(str(te))
print("Wrote {0}".format(outfile_name))
#NGRAM TESTS
#for every text input file.
for filename in os.listdir(os.path.join('Examples', 'txt_files')):
file_path = os.path.join('Examples', 'txt_files', filename)
#grab its contents.
with open(file_path, 'r') as infile:
input_string = infile.read().split()
#Feed them into the ngram generator. We test 5 grams, because that's what's in the system spec.
data = ngrams.generate_ngrams(input_string, 5)
#output file name is the input file name .json
outfile_name = "{0}.json".format(filename.split('.')[0])
#the output file goes in the expected_ngram_output directory
outfile_name = os.path.join('Examples','expected_ngram_output', outfile_name)
#dump a json to the output file.
with open(outfile_name, 'w') as outfile:
json.dump(data, outfile)
print("Wrote {0}".format(outfile_name))
#E2E TESTS
#for every html file
for filename in os.listdir(os.path.join('Examples', 'html_files')):
file_path = os.path.join('Examples', 'html_files', filename)
#grab the contents
with open(file_path, 'r') as infile:
input_string = infile.read()
#extract them
te = TextExtractor(input_string)
#ngram them
data = ngrams.generate_ngrams(te.getListOfWords(), 5)
#the output file name will be the input file name, but .txt instead of .html.
outfile_name = "{0}.json".format(filename.split('.')[0])
#the output file goes to the expected_html output directory.
outfile_name = os.path.join('Examples', 'expected_e2e_test_output', outfile_name)
#dump the results into the output file.
with open(outfile_name, 'w') as outfile:
json.dump(data, outfile)
print("Wrote {0}".format(outfile_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Unit Testing Framework for the Red M Text Transformation Module. To run tests, invoke using python3 -m unittest {0}'.format(os.path.basename(__file__)))
parser.add_argument('--generate', action="store_true", default=False, help="Generate the expected output for testing")
args = parser.parse_args()
if args.generate:
generate_expected_output()
else:
print("ERROR: to generate expected output files, please use the --generate option.\nOtherwise, please call this script as follows:\npython3 -m unittest {0}".format(os.path.basename(__file__)))
|
from text_extract import TextExtractor
import ngrams
import json
import compare
import os
import unittest
import argparse
################################################
#
# UTILITY FUNCTIONS
#
################################################
'''
Given a path to an html file, open it and return its
contents as a string
'''
def openHtmlFileAsString(file_path):
with open(file_path, 'r') as infile:
input_string = infile.read()
return input_string
'''
Given a path to an txt file, open it and return its
contents as a list of words
'''
def openTxtFileAsList(file_path):
with open(file_path, 'r') as infile:
words = infile.read().split()
return words
'''
Given a path to an txt file, open it and return its
contents as a list of words
'''
def openTxtFileAsString(file_path):
with open(file_path, 'r') as infile:
words = infile.read()
return words
'''
Given a path to an json file, open it and return its
contents as a dict
'''
def openJsonFile(file_path):
with open(file_path) as data_file:
data = json.load(data_file)
return data
################################################
#
# TEXT EXTRACTOR TESTS
#
################################################
class TextExtractorTests(unittest.TestCase):
def test1(self):
test_file = 'example1'
print(" BEGINNING TEXT EXTRACTION TESTS")
print("Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test2(self):
test_file = 'example2'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test3(self):
test_file = 'example3'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test4(self):
test_file = 'example4'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test5(self):
test_file = 'example5'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test6(self):
test_file = 'example6'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test7(self):
test_file = 'example7'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test8(self):
test_file = 'example8'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
def test9(self):
test_file = 'example9'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_html_output','{0}.txt'.format(test_file))
te = TextExtractor(openHtmlFileAsString(html_file))
expected_output = openTxtFileAsString(expected_output_file)
self.assertEqual(str(te),expected_output)
################################################
#
# NGRAM TESTS
#
################################################
class NGramTests(unittest.TestCase):
def test1(self):
print(" BEGINNING NGRAM TESTS")
test_file = 'example1'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test2(self):
test_file = 'example2'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test3(self):
test_file = 'example3'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test4(self):
test_file = 'example4'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test5(self):
test_file = 'example5'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
def test6(self):
test_file = 'example6'
print(" Testing {0}.txt".format(test_file))
txt_file = os.path.join('Examples','txt_files','{0}.txt'.format(test_file))
expected_output_file = os.path.join('Examples','expected_ngram_output','{0}.json'.format(test_file))
data = ngrams.generate_ngrams(openTxtFileAsList(txt_file),5)
expected_output = openJsonFile(expected_output_file)
self.assertDictEqual(data,expected_output)
################################################
#
# TEXT EXTRACTOR TESTS
#
################################################
class EndToEnd(unittest.TestCase):
def test1(self):
test_file = 'example1'
print(" BEGINNING END TO END TESTS")
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test2(self):
test_file = 'example2'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test3(self):
test_file = 'example3'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test4(self):
test_file = 'example4'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test5(self):
test_file = 'example5'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test6(self):
test_file = 'example6'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
def test7(self):
test_file = 'example7'
print(" Testing {0}.html".format(test_file))
html_file = os.path.join('Examples','html_files','{0}.html'.format(test_file))
expected_output_file = os.path.join('Examples','expected_e2e_test_output','{0}.json'.format(test_file))
expected_output = openJsonFile(expected_output_file)
te = TextExtractor(openHtmlFileAsString(html_file))
data = ngrams.generate_ngrams(te.getListOfWords(),5)
self.assertDictEqual(data,expected_output)
'''
Utility to generate all expected output files.
'''
def generate_expected_output():
if not os.path.exists('Examples'):
print("ERROR: please invoke this script one level below the 'Examples' directory")
sys.exit(1)
if not os.path.exists(os.path.join('Examples','expected_html_output')):
os.mkdir(os.path.join('Examples','expected_html_output'))
if not os.path.exists(os.path.join('Examples','expected_ngram_output')):
os.mkdir(os.path.join('Examples','expected_ngram_output'))
if not os.path.exists(os.path.join('Examples', 'expected_e2e_test_output')):
os.mkdir(os.path.join('Examples', 'expected_e2e_test_output'))
#TEXT EXTRACTION TESTS
#for all of the html files
for filename in os.listdir(os.path.join('Examples', 'html_files')):
file_path = os.path.join('Examples', 'html_files', filename)
#grab the contents
with open(file_path, 'r') as infile:
input_string = infile.read()
#extract them
te = TextExtractor(input_string)
#the output file name will be the input file name, but .txt instead of .html.
outfile_name = "{0}.txt".format(filename.split('.')[0])
#the output file goes to the expected_html output directory.
outfile_name = os.path.join('Examples','expected_html_output', outfile_name)
#dump the results into the output file.
with open(outfile_name, 'w') as outfile:
outfile.write(str(te))
print("Wrote {0}".format(outfile_name))
#NGRAM TESTS
#for every text input file.
for filename in os.listdir(os.path.join('Examples', 'txt_files')):
file_path = os.path.join('Examples', 'txt_files', filename)
#grab its contents.
with open(file_path, 'r') as infile:
input_string = infile.read().split()
#Feed them into the ngram generator. We test 5 grams, because that's what's in the system spec.
data = ngrams.generate_ngrams(input_string, 5)
#output file name is the input file name .json
outfile_name = "{0}.json".format(filename.split('.')[0])
#the output file goes in the expected_ngram_output directory
outfile_name = os.path.join('Examples','expected_ngram_output', outfile_name)
#dump a json to the output file.
with open(outfile_name, 'w') as outfile:
json.dump(data, outfile)
print("Wrote {0}".format(outfile_name))
#E2E TESTS
#for every html file
for filename in os.listdir(os.path.join('Examples', 'html_files')):
file_path = os.path.join('Examples', 'html_files', filename)
#grab the contents
with open(file_path, 'r') as infile:
input_string = infile.read()
#extract them
te = TextExtractor(input_string)
#ngram them
data = ngrams.generate_ngrams(te.getListOfWords(), 5)
#the output file name will be the input file name, but .txt instead of .html.
outfile_name = "{0}.json".format(filename.split('.')[0])
#the output file goes to the expected_html output directory.
outfile_name = os.path.join('Examples', 'expected_e2e_test_output', outfile_name)
#dump the results into the output file.
with open(outfile_name, 'w') as outfile:
json.dump(data, outfile)
print("Wrote {0}".format(outfile_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Unit Testing Framework for the Red M Text Transformation Module. To run tests, invoke using python3 -m unittest {0}'.format(os.path.basename(__file__)))
parser.add_argument('--generate', action="store_true", default=False, help="Generate the expected output for testing")
args = parser.parse_args()
if args.generate:
generate_expected_output()
else:
print("ERROR: to generate expected output files, please use the --generate option.\nOtherwise, please call this script as follows:\npython3 -m unittest {0}".format(os.path.basename(__file__)))
|
en
| 0.528363
|
################################################ # # UTILITY FUNCTIONS # ################################################ Given a path to an html file, open it and return its contents as a string Given a path to an txt file, open it and return its contents as a list of words Given a path to an txt file, open it and return its contents as a list of words Given a path to an json file, open it and return its contents as a dict ################################################ # # TEXT EXTRACTOR TESTS # ################################################ ################################################ # # NGRAM TESTS # ################################################ ################################################ # # TEXT EXTRACTOR TESTS # ################################################ Utility to generate all expected output files. #TEXT EXTRACTION TESTS #for all of the html files #grab the contents #extract them #the output file name will be the input file name, but .txt instead of .html. #the output file goes to the expected_html output directory. #dump the results into the output file. #NGRAM TESTS #for every text input file. #grab its contents. #Feed them into the ngram generator. We test 5 grams, because that's what's in the system spec. #output file name is the input file name .json #the output file goes in the expected_ngram_output directory #dump a json to the output file. #E2E TESTS #for every html file #grab the contents #extract them #ngram them #the output file name will be the input file name, but .txt instead of .html. #the output file goes to the expected_html output directory. #dump the results into the output file.
| 3.166755
| 3
|
9term/fipt/P2PLending/partnership/serializers.py
|
nik-sergeson/bsuir-informatics-labs
| 0
|
6628801
|
from P2PLending.partnership.models import MoneyRequestPartnershipSuggestion, MoneyProposalPartnershipSuggestion
from rest_framework.serializers import ModelSerializer
class RequestPartnershipSuggestionSerializer(ModelSerializer):
class Meta:
model = MoneyRequestPartnershipSuggestion
fields = ('creation_date', 'user', 'rate')
class ProposalPartnershipSuggestionSerializer(ModelSerializer):
class Meta:
model = MoneyProposalPartnershipSuggestion
fields = ('creation_date', 'user', 'amount', 'term', 'return_probability')
|
from P2PLending.partnership.models import MoneyRequestPartnershipSuggestion, MoneyProposalPartnershipSuggestion
from rest_framework.serializers import ModelSerializer
class RequestPartnershipSuggestionSerializer(ModelSerializer):
class Meta:
model = MoneyRequestPartnershipSuggestion
fields = ('creation_date', 'user', 'rate')
class ProposalPartnershipSuggestionSerializer(ModelSerializer):
class Meta:
model = MoneyProposalPartnershipSuggestion
fields = ('creation_date', 'user', 'amount', 'term', 'return_probability')
|
none
| 1
| 2.055885
| 2
|
|
db/problems.py
|
pan-lei/doctor
| 2
|
6628802
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/12 16:00
# @Author : 潘磊
# @function: 问题文档结构
from mongoengine import *
class Problem(Document):
doctor = StringField(required=True,max_length=5)
hospital = StringField(required=True,max_length=50)
date = DateTimeField(required=True)
url = StringField(required=True, max_length=100)
meta = {
'collection': 'problems' # 指定集合名称
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/12 16:00
# @Author : 潘磊
# @function: 问题文档结构
from mongoengine import *
class Problem(Document):
doctor = StringField(required=True,max_length=5)
hospital = StringField(required=True,max_length=50)
date = DateTimeField(required=True)
url = StringField(required=True, max_length=100)
meta = {
'collection': 'problems' # 指定集合名称
}
|
zh
| 0.47816
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/4/12 16:00 # @Author : 潘磊 # @function: 问题文档结构 # 指定集合名称
| 2.164306
| 2
|
scripts/print_epoch_acc.py
|
searchivarius/OpenNMT-py
| 1
|
6628803
|
#!/usr/bin/env python
import sys
acc = []
perpl = []
with open(sys.argv[1]) as f:
ln=0
for line in f:
line = line.strip()
ln += 1
if line.startswith('Validation perplexity:'):
if len(acc) != len(perpl):
raise Exception('Bad file format, unbalanced val. perpl. msg in line %d' % ln)
perpl.append(line.split(':')[1].strip())
if line.startswith('Validation accuracy:'):
if len(acc) != len(perpl) - 1:
raise Exception('Bad file format, unbalanced val. acc. msg in line %d' % ln)
acc.append(line.split(':')[1].strip())
epochs = len(acc)
print('\t'.join([str(i+1) for i in range(epochs)]))
print('\t'.join(acc))
print('\t'.join(perpl))
|
#!/usr/bin/env python
import sys
acc = []
perpl = []
with open(sys.argv[1]) as f:
ln=0
for line in f:
line = line.strip()
ln += 1
if line.startswith('Validation perplexity:'):
if len(acc) != len(perpl):
raise Exception('Bad file format, unbalanced val. perpl. msg in line %d' % ln)
perpl.append(line.split(':')[1].strip())
if line.startswith('Validation accuracy:'):
if len(acc) != len(perpl) - 1:
raise Exception('Bad file format, unbalanced val. acc. msg in line %d' % ln)
acc.append(line.split(':')[1].strip())
epochs = len(acc)
print('\t'.join([str(i+1) for i in range(epochs)]))
print('\t'.join(acc))
print('\t'.join(perpl))
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.723889
| 3
|
dtlpy/entities/annotation_definitions/base_annotation_definition.py
|
dataloop-ai/dtlpy
| 10
|
6628804
|
<gh_stars>1-10
import logging
import numpy as np
logger = logging.getLogger(name=__name__)
class BaseAnnotationDefinition:
def __init__(self, description=None, attributes=None):
self.description = description
self._top = 0
self._left = 0
self._bottom = 0
self._right = 0
if attributes is None:
attributes = list()
self.attributes = attributes
@property
def top(self):
return self._top
@top.setter
def top(self, v):
self._top = v
@property
def left(self):
return self._left
@left.setter
def left(self, v):
self._left = v
@property
def bottom(self):
return self._bottom
@bottom.setter
def bottom(self, v):
self._bottom = v
@property
def right(self):
return self._right
@right.setter
def right(self, v):
self._right = v
@property
def height(self):
return np.round(self.bottom - self.top)
@property
def width(self):
return np.round(self.right - self.left)
@staticmethod
def add_text_to_image(image, annotation):
"""
:param image:
:param annotation:
"""
try:
import cv2
except (ImportError, ModuleNotFoundError):
logger.error(
'Import Error! Cant import cv2. Annotations operations will be limited. import manually and fix errors')
raise
text = '{label}-{attributes}'.format(label=annotation.label, attributes=','.join(annotation.attributes))
top = annotation.top
left = annotation.left
if top == 0:
top = image.shape[0] / 10
if left == 0:
left = image.shape[1] / 10
return cv2.putText(img=image,
text=text,
org=tuple([int(np.round(top)), int(np.round(left))]),
color=(255, 0, 0),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=1,
thickness=2)
@property
def logger(self):
return logger
|
import logging
import numpy as np
logger = logging.getLogger(name=__name__)
class BaseAnnotationDefinition:
def __init__(self, description=None, attributes=None):
self.description = description
self._top = 0
self._left = 0
self._bottom = 0
self._right = 0
if attributes is None:
attributes = list()
self.attributes = attributes
@property
def top(self):
return self._top
@top.setter
def top(self, v):
self._top = v
@property
def left(self):
return self._left
@left.setter
def left(self, v):
self._left = v
@property
def bottom(self):
return self._bottom
@bottom.setter
def bottom(self, v):
self._bottom = v
@property
def right(self):
return self._right
@right.setter
def right(self, v):
self._right = v
@property
def height(self):
return np.round(self.bottom - self.top)
@property
def width(self):
return np.round(self.right - self.left)
@staticmethod
def add_text_to_image(image, annotation):
"""
:param image:
:param annotation:
"""
try:
import cv2
except (ImportError, ModuleNotFoundError):
logger.error(
'Import Error! Cant import cv2. Annotations operations will be limited. import manually and fix errors')
raise
text = '{label}-{attributes}'.format(label=annotation.label, attributes=','.join(annotation.attributes))
top = annotation.top
left = annotation.left
if top == 0:
top = image.shape[0] / 10
if left == 0:
left = image.shape[1] / 10
return cv2.putText(img=image,
text=text,
org=tuple([int(np.round(top)), int(np.round(left))]),
color=(255, 0, 0),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=1,
thickness=2)
@property
def logger(self):
return logger
|
en
| 0.576414
|
:param image: :param annotation:
| 2.739262
| 3
|
pkgs/sdk-pkg/src/genie/libs/sdk/triggers/ha/reload/iosxe/c3850/reload.py
|
jbronikowski/genielibs
| 94
|
6628805
|
<reponame>jbronikowski/genielibs
'''IOSXE implementation for Reload triggers'''
# import python
import logging
# import pyats
from pyats import aetest
from pyats.utils.objects import R
# Genie Libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.ha.ha import \
TriggerReload as CommonReload, \
TriggerReloadLc
# from genie.libs import parser
from genie.libs.parser.iosxe.show_platform import ShowPlatform
log = logging.getLogger(__name__)
# Trigger required data settings
# Which key to exclude for Platform Ops comparison
platform_exclude = ['maker', 'rp_uptime', 'sn', 'main_mem', 'issu',
'switchover_reason', 'config_register', 'chassis_sn',
'sn', 'name']
class TriggerReload(CommonReload):
"""Reload the whole device."""
__description__ = """Reload the whole device.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
steps:
1. Learn Platform Ops object and store the "ok, active|ok, standby|Ready"
switch(es) if has any, otherwise, SKIP the trigger
2. Do reload by command "reload"
3. Learn Platform Ops again and verify the state of RP(s) is
"ok, active|ok, standby", verify every member status is "Ready",
and verify left attributes from the ops are the same as the Ops in step 1
4. Update platform PTS if feature pts is enabled,
Update global/local veirifications if enabled
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<rp>.*)',
'state', '(?P<state>ok, active|ok, standby|Ready)'],
],
'all_keys': True,
'exclude': platform_exclude}},
verify_ops={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<rp>.*)',
'state', '(ok, active|ok, standby|Ready)'],
['slot', 'rp', '(?P<rp>.*)',
'swstack_role', '(Active|Standby|Member)']],
'exclude': platform_exclude}},
num_values={'rp': 'all'})
class TriggerReloadWithPriority(TriggerReloadLc):
@aetest.setup
def verify_prerequisite(self, uut, abstract, steps, timeout):
'''Learn Ops object and verify the requirements.
If the requirements are not satisfied, then skip to the next
testcase.
Args:
uut (`obj`): Device object.
abstract (`obj`): Abstract object.
steps (`step obj`): aetest step object
timeout (`timeout obj`): Timeout Object
Returns:
None
Raises:
pyATS Results
'''
self.timeout = timeout
try:
self.pre_snap = self.mapping.learn_ops(device=uut,
abstract=abstract,
steps=steps,
timeout=self.timeout)
except Exception as e:
self.skipped('Cannot learn the feature', from_exception=e,
goto=['next_tc'])
for stp in steps.details:
if stp.result.name == 'skipped':
self.skipped('Cannot learn the feature', goto=['next_tc'])
self.print_local_verifications()
# get and store the member priority list
try:
out = ShowPlatform(device=uut).parse()
# inital priority storage
priority_dict = {}
priority_list = []
if 'slot' in out:
for slot in out['slot']:
for rp in out['slot'][slot]['rp']:
if out['slot'][slot]['rp'][rp]['role'] == 'Member':
priority_dict[slot] = \
out['slot'][slot]['rp'][rp]['priority']
priority_list.append(out['slot'][slot]['rp'][rp]['priority'])
if len(list(set(priority_list))) != 1:
# sorted the slot priority
# sorted with priority from low to high
# [(<switch_number>, <priority>)]
# [('2', '1'), ('3', '1'), ('1', '3')]
priority_list = sorted(priority_dict.items(), key=lambda x: x[1])
# update the verify_ops requirements
# The next standby switch will be the memeber
# with highest priority
for ops, requirements in self.mapping._verify_ops_dict.items():
if 'platform' in ops:
self.mapping._verify_ops_dict[ops]['requirements'].append(
['slot', 'rp', '{}'.format(priority_list[-1][0]),
'swstack_role', 'Standby'])
else:
# update the verify_ops requirements
# If all memeber with same priority, the standby will be
# randomly from the members
for ops, requirements in self.mapping._verify_ops_dict.items():
if 'platform' in ops:
self.mapping._verify_ops_dict[ops]['requirements'].append(
['slot', 'rp', '(?P<members>.*)',
'swstack_role', '(Standby|Member)'])
except Exception as e:
log.warn('Cannot get the member priority. \n{}'.format(str(e)))
class TriggerReloadActiveRP(TriggerReloadWithPriority):
"""Reload active switch on device."""
__description__ = """Reload active switch on device.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
lcRole (`str`): The role of LC which is 'active'
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
active_rp: `str`
standby_rp: `str`
members: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Platform Ops object and store the "active" and "standby" switch
if has any, otherwise, SKIP the trigger
2. Do reload by command "reload slot <lc>"
3. Learn Platform Ops again and verify the role of "active" switch changes to "standby",
verify the role of "standby" switch changes to "member",
verify the role of "member" switch with highest priority changes to "standby",
and verify left attributes from the ops are the same as the Ops in step 1
4. Update platform PTS if feature pts is enabled,
Update global/local veirifications if enabled
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.platform.platform.Platform':{
'requirements': [\
[['slot', 'rp', '(?P<active_rp>.*)',
'swstack_role', 'Active'],
['slot', 'rp', '(?P<active_rp>.*)',
'state', 'Ready']],
[['slot', 'rp', '(?P<standby_rp>.*)',
'swstack_role', 'Standby'],
['slot', 'rp', '(?P<standby_rp>.*)',
'state', 'Ready']],
[['slot', 'rp', '(?P<members>.*)',
'swstack_role', 'Member'],
['slot', 'rp', '(?P<members>.*)',
'state', 'Ready']],
[['redundancy_communication', True]],
],
'all_keys': True,
'exclude': platform_exclude}},
verify_ops={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<active_rp>.*)',
'swstack_role', 'Member'],
['slot', 'rp', '(?P<standby_rp>.*)',
'swstack_role', 'Active']],
'exclude': platform_exclude}},
num_values={'active_rp':1, 'standby_rp':1, 'members': 'all'})
class TriggerReloadStandbyRP(TriggerReloadWithPriority):
"""Reload standby switch on device."""
__description__ = """Reload standby switch on device.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
lcRole (`str`): The role of LC which is 'standby'
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
standby_rp: `str`
members: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Platform Ops object and store the "standby" switch and "member" switch(es)
if has any, otherwise, SKIP the trigger
2. Do reload by command "reload slot <lc>"
3. Learn Platform Ops again and verify role of "standby" switch changes to "member",
verify the role of "member" switch with highest priority changes to "standby",
and verify left attributes from the ops are the same as the Ops in step 1
4. Update platform PTS if feature pts is enabled,
Update global/local veirifications if enabled
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.platform.platform.Platform':{
'requirements': [\
[['slot', 'rp', '(?P<standby_rp>.*)',
'swstack_role', 'Standby'],
['slot', 'rp', '(?P<standby_rp>.*)',
'state', 'Ready']],
[['slot', 'rp', '(?P<members>.*)',
'swstack_role', 'Member'],
['slot', 'rp', '(?P<members>.*)',
'state', 'Ready']],
[['redundancy_communication', True]],
],
'all_keys': True,
'exclude': platform_exclude}},
verify_ops={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<standby_rp>.*)',
'swstack_role', 'Member']],
'exclude': platform_exclude}},
num_values={'standby_rp':1, 'members': 'all'})
class TriggerReloadMember(TriggerReloadLc):
"""Reload member switch on device."""
__description__ = """Reload member switch on device.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
members: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Platform Ops object and store the "member" switch(es)
if has any, otherwise, SKIP the trigger
2. Do reload by command "reload slot <lc>"
3. Learn Platform Ops again and the ops are the same as the Ops in step 1
4. Update platform PTS if feature pts is enabled,
Update global/local veirifications if enabled
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<members>.*)',
'swstack_role', 'Member'],
['slot', 'rp', '(?P<members>.*)',
'state', 'Ready']
],
'all_keys': True,
'exclude': platform_exclude}},
verify_ops={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<members>.*)',
'state', 'Ready']],
'exclude': platform_exclude}},
num_values={'members': 1})
|
'''IOSXE implementation for Reload triggers'''
# import python
import logging
# import pyats
from pyats import aetest
from pyats.utils.objects import R
# Genie Libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.ha.ha import \
TriggerReload as CommonReload, \
TriggerReloadLc
# from genie.libs import parser
from genie.libs.parser.iosxe.show_platform import ShowPlatform
log = logging.getLogger(__name__)
# Trigger required data settings
# Which key to exclude for Platform Ops comparison
platform_exclude = ['maker', 'rp_uptime', 'sn', 'main_mem', 'issu',
'switchover_reason', 'config_register', 'chassis_sn',
'sn', 'name']
class TriggerReload(CommonReload):
"""Reload the whole device."""
__description__ = """Reload the whole device.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
steps:
1. Learn Platform Ops object and store the "ok, active|ok, standby|Ready"
switch(es) if has any, otherwise, SKIP the trigger
2. Do reload by command "reload"
3. Learn Platform Ops again and verify the state of RP(s) is
"ok, active|ok, standby", verify every member status is "Ready",
and verify left attributes from the ops are the same as the Ops in step 1
4. Update platform PTS if feature pts is enabled,
Update global/local veirifications if enabled
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<rp>.*)',
'state', '(?P<state>ok, active|ok, standby|Ready)'],
],
'all_keys': True,
'exclude': platform_exclude}},
verify_ops={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<rp>.*)',
'state', '(ok, active|ok, standby|Ready)'],
['slot', 'rp', '(?P<rp>.*)',
'swstack_role', '(Active|Standby|Member)']],
'exclude': platform_exclude}},
num_values={'rp': 'all'})
class TriggerReloadWithPriority(TriggerReloadLc):
@aetest.setup
def verify_prerequisite(self, uut, abstract, steps, timeout):
'''Learn Ops object and verify the requirements.
If the requirements are not satisfied, then skip to the next
testcase.
Args:
uut (`obj`): Device object.
abstract (`obj`): Abstract object.
steps (`step obj`): aetest step object
timeout (`timeout obj`): Timeout Object
Returns:
None
Raises:
pyATS Results
'''
self.timeout = timeout
try:
self.pre_snap = self.mapping.learn_ops(device=uut,
abstract=abstract,
steps=steps,
timeout=self.timeout)
except Exception as e:
self.skipped('Cannot learn the feature', from_exception=e,
goto=['next_tc'])
for stp in steps.details:
if stp.result.name == 'skipped':
self.skipped('Cannot learn the feature', goto=['next_tc'])
self.print_local_verifications()
# get and store the member priority list
try:
out = ShowPlatform(device=uut).parse()
# inital priority storage
priority_dict = {}
priority_list = []
if 'slot' in out:
for slot in out['slot']:
for rp in out['slot'][slot]['rp']:
if out['slot'][slot]['rp'][rp]['role'] == 'Member':
priority_dict[slot] = \
out['slot'][slot]['rp'][rp]['priority']
priority_list.append(out['slot'][slot]['rp'][rp]['priority'])
if len(list(set(priority_list))) != 1:
# sorted the slot priority
# sorted with priority from low to high
# [(<switch_number>, <priority>)]
# [('2', '1'), ('3', '1'), ('1', '3')]
priority_list = sorted(priority_dict.items(), key=lambda x: x[1])
# update the verify_ops requirements
# The next standby switch will be the memeber
# with highest priority
for ops, requirements in self.mapping._verify_ops_dict.items():
if 'platform' in ops:
self.mapping._verify_ops_dict[ops]['requirements'].append(
['slot', 'rp', '{}'.format(priority_list[-1][0]),
'swstack_role', 'Standby'])
else:
# update the verify_ops requirements
# If all memeber with same priority, the standby will be
# randomly from the members
for ops, requirements in self.mapping._verify_ops_dict.items():
if 'platform' in ops:
self.mapping._verify_ops_dict[ops]['requirements'].append(
['slot', 'rp', '(?P<members>.*)',
'swstack_role', '(Standby|Member)'])
except Exception as e:
log.warn('Cannot get the member priority. \n{}'.format(str(e)))
class TriggerReloadActiveRP(TriggerReloadWithPriority):
"""Reload active switch on device."""
__description__ = """Reload active switch on device.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
lcRole (`str`): The role of LC which is 'active'
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
active_rp: `str`
standby_rp: `str`
members: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Platform Ops object and store the "active" and "standby" switch
if has any, otherwise, SKIP the trigger
2. Do reload by command "reload slot <lc>"
3. Learn Platform Ops again and verify the role of "active" switch changes to "standby",
verify the role of "standby" switch changes to "member",
verify the role of "member" switch with highest priority changes to "standby",
and verify left attributes from the ops are the same as the Ops in step 1
4. Update platform PTS if feature pts is enabled,
Update global/local veirifications if enabled
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.platform.platform.Platform':{
'requirements': [\
[['slot', 'rp', '(?P<active_rp>.*)',
'swstack_role', 'Active'],
['slot', 'rp', '(?P<active_rp>.*)',
'state', 'Ready']],
[['slot', 'rp', '(?P<standby_rp>.*)',
'swstack_role', 'Standby'],
['slot', 'rp', '(?P<standby_rp>.*)',
'state', 'Ready']],
[['slot', 'rp', '(?P<members>.*)',
'swstack_role', 'Member'],
['slot', 'rp', '(?P<members>.*)',
'state', 'Ready']],
[['redundancy_communication', True]],
],
'all_keys': True,
'exclude': platform_exclude}},
verify_ops={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<active_rp>.*)',
'swstack_role', 'Member'],
['slot', 'rp', '(?P<standby_rp>.*)',
'swstack_role', 'Active']],
'exclude': platform_exclude}},
num_values={'active_rp':1, 'standby_rp':1, 'members': 'all'})
class TriggerReloadStandbyRP(TriggerReloadWithPriority):
"""Reload standby switch on device."""
__description__ = """Reload standby switch on device.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
lcRole (`str`): The role of LC which is 'standby'
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
standby_rp: `str`
members: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Platform Ops object and store the "standby" switch and "member" switch(es)
if has any, otherwise, SKIP the trigger
2. Do reload by command "reload slot <lc>"
3. Learn Platform Ops again and verify role of "standby" switch changes to "member",
verify the role of "member" switch with highest priority changes to "standby",
and verify left attributes from the ops are the same as the Ops in step 1
4. Update platform PTS if feature pts is enabled,
Update global/local veirifications if enabled
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.platform.platform.Platform':{
'requirements': [\
[['slot', 'rp', '(?P<standby_rp>.*)',
'swstack_role', 'Standby'],
['slot', 'rp', '(?P<standby_rp>.*)',
'state', 'Ready']],
[['slot', 'rp', '(?P<members>.*)',
'swstack_role', 'Member'],
['slot', 'rp', '(?P<members>.*)',
'state', 'Ready']],
[['redundancy_communication', True]],
],
'all_keys': True,
'exclude': platform_exclude}},
verify_ops={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<standby_rp>.*)',
'swstack_role', 'Member']],
'exclude': platform_exclude}},
num_values={'standby_rp':1, 'members': 'all'})
class TriggerReloadMember(TriggerReloadLc):
"""Reload member switch on device."""
__description__ = """Reload member switch on device.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
members: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Platform Ops object and store the "member" switch(es)
if has any, otherwise, SKIP the trigger
2. Do reload by command "reload slot <lc>"
3. Learn Platform Ops again and the ops are the same as the Ops in step 1
4. Update platform PTS if feature pts is enabled,
Update global/local veirifications if enabled
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<members>.*)',
'swstack_role', 'Member'],
['slot', 'rp', '(?P<members>.*)',
'state', 'Ready']
],
'all_keys': True,
'exclude': platform_exclude}},
verify_ops={'ops.platform.platform.Platform':{
'requirements': [\
['slot', 'rp', '(?P<members>.*)',
'state', 'Ready']],
'exclude': platform_exclude}},
num_values={'members': 1})
|
en
| 0.786459
|
IOSXE implementation for Reload triggers # import python # import pyats # Genie Libs # from genie.libs import parser # Trigger required data settings # Which key to exclude for Platform Ops comparison Reload the whole device. Reload the whole device. trigger_datafile: Mandatory: timeout: max_time (`int`): Maximum wait time for the trigger, in second. Default: 180 interval (`int`): Wait time between iteration when looping is needed, in second. Default: 15 Optional: tgn_timeout (`int`): Maximum wait time for all traffic threads to be restored to the reference rate, in second. Default: 60 tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed, in second. Default: 10 steps: 1. Learn Platform Ops object and store the "ok, active|ok, standby|Ready" switch(es) if has any, otherwise, SKIP the trigger 2. Do reload by command "reload" 3. Learn Platform Ops again and verify the state of RP(s) is "ok, active|ok, standby", verify every member status is "Ready", and verify left attributes from the ops are the same as the Ops in step 1 4. Update platform PTS if feature pts is enabled, Update global/local veirifications if enabled # Mapping of Information between Ops and Conf # Also permit to dictates which key to verify Learn Ops object and verify the requirements. If the requirements are not satisfied, then skip to the next testcase. Args: uut (`obj`): Device object. abstract (`obj`): Abstract object. steps (`step obj`): aetest step object timeout (`timeout obj`): Timeout Object Returns: None Raises: pyATS Results # get and store the member priority list # inital priority storage # sorted the slot priority # sorted with priority from low to high # [(<switch_number>, <priority>)] # [('2', '1'), ('3', '1'), ('1', '3')] # update the verify_ops requirements # The next standby switch will be the memeber # with highest priority # update the verify_ops requirements # If all memeber with same priority, the standby will be # randomly from the members Reload active switch on device. Reload active switch on device. trigger_datafile: Mandatory: timeout: max_time (`int`): Maximum wait time for the trigger, in second. Default: 180 interval (`int`): Wait time between iteration when looping is needed, in second. Default: 15 lcRole (`str`): The role of LC which is 'active' Optional: tgn_timeout (`int`): Maximum wait time for all traffic threads to be restored to the reference rate, in second. Default: 60 tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed, in second. Default: 10 static: The keys below are dynamically learnt by default. However, they can also be set to a custom value when provided in the trigger datafile. active_rp: `str` standby_rp: `str` members: `str` (e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported) OR interface: 'Ethernet1/1/1' (Specific value) steps: 1. Learn Platform Ops object and store the "active" and "standby" switch if has any, otherwise, SKIP the trigger 2. Do reload by command "reload slot <lc>" 3. Learn Platform Ops again and verify the role of "active" switch changes to "standby", verify the role of "standby" switch changes to "member", verify the role of "member" switch with highest priority changes to "standby", and verify left attributes from the ops are the same as the Ops in step 1 4. Update platform PTS if feature pts is enabled, Update global/local veirifications if enabled # Mapping of Information between Ops and Conf # Also permit to dictates which key to verify Reload standby switch on device. Reload standby switch on device. trigger_datafile: Mandatory: timeout: max_time (`int`): Maximum wait time for the trigger, in second. Default: 180 interval (`int`): Wait time between iteration when looping is needed, in second. Default: 15 lcRole (`str`): The role of LC which is 'standby' Optional: tgn_timeout (`int`): Maximum wait time for all traffic threads to be restored to the reference rate, in second. Default: 60 tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed, in second. Default: 10 static: The keys below are dynamically learnt by default. However, they can also be set to a custom value when provided in the trigger datafile. standby_rp: `str` members: `str` (e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported) OR interface: 'Ethernet1/1/1' (Specific value) steps: 1. Learn Platform Ops object and store the "standby" switch and "member" switch(es) if has any, otherwise, SKIP the trigger 2. Do reload by command "reload slot <lc>" 3. Learn Platform Ops again and verify role of "standby" switch changes to "member", verify the role of "member" switch with highest priority changes to "standby", and verify left attributes from the ops are the same as the Ops in step 1 4. Update platform PTS if feature pts is enabled, Update global/local veirifications if enabled # Mapping of Information between Ops and Conf # Also permit to dictates which key to verify Reload member switch on device. Reload member switch on device. trigger_datafile: Mandatory: timeout: max_time (`int`): Maximum wait time for the trigger, in second. Default: 180 interval (`int`): Wait time between iteration when looping is needed, in second. Default: 15 Optional: tgn_timeout (`int`): Maximum wait time for all traffic threads to be restored to the reference rate, in second. Default: 60 tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed, in second. Default: 10 static: The keys below are dynamically learnt by default. However, they can also be set to a custom value when provided in the trigger datafile. members: `str` (e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported) OR interface: 'Ethernet1/1/1' (Specific value) steps: 1. Learn Platform Ops object and store the "member" switch(es) if has any, otherwise, SKIP the trigger 2. Do reload by command "reload slot <lc>" 3. Learn Platform Ops again and the ops are the same as the Ops in step 1 4. Update platform PTS if feature pts is enabled, Update global/local veirifications if enabled # Mapping of Information between Ops and Conf # Also permit to dictates which key to verify
| 2.270993
| 2
|
gracz.py
|
cz-maria/Pawel_the_Mage
| 0
|
6628806
|
import pygame
import stale
class Player(pygame.sprite.Sprite):
""" <NAME> """
walking_frames_l = []
walking_frames_r = []
direction = "R"
# -- Methods
def __init__(self,color):
super().__init__()
if color == 0:
image_r = pygame.image.load("mage1.png").convert()
elif color == 1:
image_r = pygame.image.load("mage3.png").convert()
else:
image_r = pygame.image.load("mage2.png").convert()
image_r.set_colorkey(stale.RED)
self.walking_frames_r.append(image_r)
image_l = pygame.transform.flip(image_r, True, False)
self.walking_frames_l.append(image_l)
self.image = player_image = self.walking_frames_r[0]
self.rect = self.image.get_rect()
# Set speed vector of player
self.change_x = 0
self.change_y = 0
# List of sprites we can bump against
self.level = None
def update(self):
""" Move the player. """
# Gravity
self.calc_grav()
# Move left/right
self.rect.x += self.change_x
#pos = self.rect.x
if self.direction == "R":
self.image = self.walking_frames_r[0]
else:
self.image = self.walking_frames_l[0]
# See if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If we are moving right,
# set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
elif self.change_x < 0:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Move up/down
self.rect.y += self.change_y
# Co robic jak natrafimy na platforme
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
self.change_y = 0
elif self.change_y < 0:
self.rect.top = block.rect.bottom
#Stop our vertical movement
# self.change_y = 0
def calc_grav(self):
""" Calculate effect of gravity. """
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# See if we are on the ground.
if self.rect.y >= stale.SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = stale.SCREEN_HEIGHT - self.rect.height
def jump(self):
""" Called when user hits 'jump' button. """
# move down a bit and see if there is a platform below us.
# Move down 2 pixels because it doesn't work well if we only move down
# 1 when working with a platform moving down.
self.rect.y += 2
platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
self.rect.y -= 2
# If it is ok to jump, set our speed upwards
if len(platform_hit_list) > 0 or self.rect.bottom >= stale.SCREEN_HEIGHT:
self.change_y = -10
# Player-controlled movement:
def go_left(self):
""" Called when the user hits the left arrow. """
self.change_x = -4
self.direction = "L"
def go_right(self):
""" Called when the user hits the right arrow. """
self.change_x = 4
self.direction = "R"
def stop(self):
""" Called when the user lets off the keyboard. """
self.change_x = 0
#def odpalpocisk(self):
# self.fire = True
|
import pygame
import stale
class Player(pygame.sprite.Sprite):
""" <NAME> """
walking_frames_l = []
walking_frames_r = []
direction = "R"
# -- Methods
def __init__(self,color):
super().__init__()
if color == 0:
image_r = pygame.image.load("mage1.png").convert()
elif color == 1:
image_r = pygame.image.load("mage3.png").convert()
else:
image_r = pygame.image.load("mage2.png").convert()
image_r.set_colorkey(stale.RED)
self.walking_frames_r.append(image_r)
image_l = pygame.transform.flip(image_r, True, False)
self.walking_frames_l.append(image_l)
self.image = player_image = self.walking_frames_r[0]
self.rect = self.image.get_rect()
# Set speed vector of player
self.change_x = 0
self.change_y = 0
# List of sprites we can bump against
self.level = None
def update(self):
""" Move the player. """
# Gravity
self.calc_grav()
# Move left/right
self.rect.x += self.change_x
#pos = self.rect.x
if self.direction == "R":
self.image = self.walking_frames_r[0]
else:
self.image = self.walking_frames_l[0]
# See if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If we are moving right,
# set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
elif self.change_x < 0:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Move up/down
self.rect.y += self.change_y
# Co robic jak natrafimy na platforme
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
self.change_y = 0
elif self.change_y < 0:
self.rect.top = block.rect.bottom
#Stop our vertical movement
# self.change_y = 0
def calc_grav(self):
""" Calculate effect of gravity. """
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# See if we are on the ground.
if self.rect.y >= stale.SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = stale.SCREEN_HEIGHT - self.rect.height
def jump(self):
""" Called when user hits 'jump' button. """
# move down a bit and see if there is a platform below us.
# Move down 2 pixels because it doesn't work well if we only move down
# 1 when working with a platform moving down.
self.rect.y += 2
platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
self.rect.y -= 2
# If it is ok to jump, set our speed upwards
if len(platform_hit_list) > 0 or self.rect.bottom >= stale.SCREEN_HEIGHT:
self.change_y = -10
# Player-controlled movement:
def go_left(self):
""" Called when the user hits the left arrow. """
self.change_x = -4
self.direction = "L"
def go_right(self):
""" Called when the user hits the right arrow. """
self.change_x = 4
self.direction = "R"
def stop(self):
""" Called when the user lets off the keyboard. """
self.change_x = 0
#def odpalpocisk(self):
# self.fire = True
|
en
| 0.842164
|
<NAME> # -- Methods # Set speed vector of player # List of sprites we can bump against Move the player. # Gravity # Move left/right #pos = self.rect.x # See if we hit anything # If we are moving right, # set our right side to the left side of the item we hit # Otherwise if we are moving left, do the opposite. # Move up/down # Co robic jak natrafimy na platforme # Reset our position based on the top/bottom of the object. #Stop our vertical movement # self.change_y = 0 Calculate effect of gravity. # See if we are on the ground. Called when user hits 'jump' button. # move down a bit and see if there is a platform below us. # Move down 2 pixels because it doesn't work well if we only move down # 1 when working with a platform moving down. # If it is ok to jump, set our speed upwards # Player-controlled movement: Called when the user hits the left arrow. Called when the user hits the right arrow. Called when the user lets off the keyboard. #def odpalpocisk(self): # self.fire = True
| 3.109126
| 3
|
qpython/utils.py
|
nugend/qPython
| 5
|
6628807
|
<reponame>nugend/qPython
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
def uncompress(data, uncompressed_size):
_0 = numpy.intc(0)
_1 = numpy.intc(1)
_2 = numpy.intc(2)
_128 = numpy.intc(128)
_255 = numpy.intc(255)
n, r, s, p = _0, _0, _0, _0
i, d = _1, _1
f = _255 & data[_0]
ptrs = numpy.zeros(256, dtype=numpy.intc)
uncompressed = numpy.zeros(uncompressed_size, dtype=numpy.uint8)
idx = numpy.arange(uncompressed_size, dtype=numpy.intc)
while s < uncompressed_size:
pp = p + _1
if f & i:
r = ptrs[data[d]]
n = _2 + data[d + _1]
uncompressed[idx[s : s + n]] = uncompressed[r : r + n]
ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p
if s == pp:
ptrs[(uncompressed[pp]) ^ (uncompressed[pp + _1])] = pp
d += _2
r += _2
s = s + n
p = s
else:
uncompressed[s] = data[d]
if pp == s:
ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p
p = pp
s += _1
d += _1
if i == _128:
if s < uncompressed_size:
f = _255 & data[d]
d += _1
i = _1
else:
i += i
return uncompressed
|
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
def uncompress(data, uncompressed_size):
_0 = numpy.intc(0)
_1 = numpy.intc(1)
_2 = numpy.intc(2)
_128 = numpy.intc(128)
_255 = numpy.intc(255)
n, r, s, p = _0, _0, _0, _0
i, d = _1, _1
f = _255 & data[_0]
ptrs = numpy.zeros(256, dtype=numpy.intc)
uncompressed = numpy.zeros(uncompressed_size, dtype=numpy.uint8)
idx = numpy.arange(uncompressed_size, dtype=numpy.intc)
while s < uncompressed_size:
pp = p + _1
if f & i:
r = ptrs[data[d]]
n = _2 + data[d + _1]
uncompressed[idx[s : s + n]] = uncompressed[r : r + n]
ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p
if s == pp:
ptrs[(uncompressed[pp]) ^ (uncompressed[pp + _1])] = pp
d += _2
r += _2
s = s + n
p = s
else:
uncompressed[s] = data[d]
if pp == s:
ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p
p = pp
s += _1
d += _1
if i == _128:
if s < uncompressed_size:
f = _255 & data[d]
d += _1
i = _1
else:
i += i
return uncompressed
|
en
| 0.834662
|
# Copyright (c) 2011-2014 Exxeleron GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
| 2.65697
| 3
|
{{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/pipelines.py
|
naidu-rohit/ds_template
| 0
|
6628808
|
import dagster as dag
import yaml
from datetime import datetime
from dagster import pipeline, PresetDefinition
from {{cookiecutter.package_name}}.tasks.features import *
from {{cookiecutter.package_name}}.tasks.preprocess import *
from {{cookiecutter.package_name}}.tasks.train import *
from {{cookiecutter.package_name}}.utils.common import *
@pipeline(
preset_defs=[
PresetDefinition(
"dev", environment_dict=read_config("configs/preprocess.yaml"),
),
]
)
def preprocess_pipeline():
pass
@pipeline(
preset_defs=[
PresetDefinition("dev", environment_dict=read_config("configs/features.yaml"),),
]
)
def feature_eng_pipeline():
pass
@pipeline(
preset_defs=[
PresetDefinition("dev", environment_dict=read_config("configs/train.yaml"),),
]
)
def train_pipeline():
pass
@pipeline
def predict_pipeline():
pass
|
import dagster as dag
import yaml
from datetime import datetime
from dagster import pipeline, PresetDefinition
from {{cookiecutter.package_name}}.tasks.features import *
from {{cookiecutter.package_name}}.tasks.preprocess import *
from {{cookiecutter.package_name}}.tasks.train import *
from {{cookiecutter.package_name}}.utils.common import *
@pipeline(
preset_defs=[
PresetDefinition(
"dev", environment_dict=read_config("configs/preprocess.yaml"),
),
]
)
def preprocess_pipeline():
pass
@pipeline(
preset_defs=[
PresetDefinition("dev", environment_dict=read_config("configs/features.yaml"),),
]
)
def feature_eng_pipeline():
pass
@pipeline(
preset_defs=[
PresetDefinition("dev", environment_dict=read_config("configs/train.yaml"),),
]
)
def train_pipeline():
pass
@pipeline
def predict_pipeline():
pass
|
none
| 1
| 2.106354
| 2
|
|
mooringlicensing/migrations/0232_auto_20210825_1609.py
|
jawaidm/mooringlicensing
| 0
|
6628809
|
<filename>mooringlicensing/migrations/0232_auto_20210825_1609.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-25 08:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0231_auto_20210825_1144'),
]
operations = [
migrations.CreateModel(
name='FeeItemStickerReplacement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, default='0.00', help_text='$', max_digits=8)),
('date_of_enforcement', models.DateField(blank=True, null=True)),
],
options={
'verbose_name': 'Fee (sticker replacement)',
},
),
migrations.AlterModelOptions(
name='applicationtype',
options={'verbose_name': 'Oracle code'},
),
]
|
<filename>mooringlicensing/migrations/0232_auto_20210825_1609.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-25 08:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0231_auto_20210825_1144'),
]
operations = [
migrations.CreateModel(
name='FeeItemStickerReplacement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, default='0.00', help_text='$', max_digits=8)),
('date_of_enforcement', models.DateField(blank=True, null=True)),
],
options={
'verbose_name': 'Fee (sticker replacement)',
},
),
migrations.AlterModelOptions(
name='applicationtype',
options={'verbose_name': 'Oracle code'},
),
]
|
en
| 0.686419
|
# -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-08-25 08:09
| 1.476777
| 1
|
services/core-api/app/api/now_applications/resources/now_application_type_resource.py
|
parc-jason/mds
| 0
|
6628810
|
from flask_restplus import Resource
from app.extensions import api
from app.api.utils.access_decorators import requires_role_view_all
from app.api.utils.resources_mixins import UserMixin
from app.api.now_applications.models.now_application_type import NOWApplicationType
from app.api.now_applications.response_models import NOW_APPLICATION_TYPES
class NOWApplicationTypeResource(Resource, UserMixin):
@api.doc(description='Get a list of all Notice of Work activity type codes.', params={})
@requires_role_view_all
@api.marshal_with(NOW_APPLICATION_TYPES, code=200, envelope='records')
def get(self):
return NOWApplicationType.get_active()
|
from flask_restplus import Resource
from app.extensions import api
from app.api.utils.access_decorators import requires_role_view_all
from app.api.utils.resources_mixins import UserMixin
from app.api.now_applications.models.now_application_type import NOWApplicationType
from app.api.now_applications.response_models import NOW_APPLICATION_TYPES
class NOWApplicationTypeResource(Resource, UserMixin):
@api.doc(description='Get a list of all Notice of Work activity type codes.', params={})
@requires_role_view_all
@api.marshal_with(NOW_APPLICATION_TYPES, code=200, envelope='records')
def get(self):
return NOWApplicationType.get_active()
|
none
| 1
| 1.815621
| 2
|
|
detection/detect.py
|
DoomsdayT/Raspberry-Pi-Fall-Detection
| 1
|
6628811
|
from .models.expert import fallDetect as expertDetect
def detect(acc_series):
return int(expertDetect(acc_series)) * 1.0
if __name__ == '__main__':
print(detect([9.0, 10.0, 12.0, 5.0, 7.0]))
|
from .models.expert import fallDetect as expertDetect
def detect(acc_series):
return int(expertDetect(acc_series)) * 1.0
if __name__ == '__main__':
print(detect([9.0, 10.0, 12.0, 5.0, 7.0]))
|
none
| 1
| 2.040396
| 2
|
|
test.py
|
mgtremaine/py_tracking_urls
| 1
|
6628812
|
#!/usr/bin/python3
#Simple test values
#Missing Mail Innovations test
import py_tracking_urls
errors = False
numbers = {
#TEST UPS
'1Z9999W99999999999' : 'ups.com',
'1Z12345E1512345676' : 'ups.com',
'1Z12345E0205271688' : 'ups.com',
'1Z12345E6605272234' : 'ups.com',
'1Z12345E0305271640' : 'ups.com',
'1Z12345E0393657226' : 'ups.com',
'1Z12345E1305277940' : 'ups.com',
'1Z12345E6205277936' : 'ups.com',
'1Z12345E1505270452' : 'ups.com',
'1Z648616E192760718' : 'ups.com',
'1ZWX0692YP40636269' : 'ups.com',
'T9999999999' : 'ups.com',
#TEST FEDEX
'9999 9999 9999' : 'fedex.com',
'9999 9999 9999 999' : 'fedex.com',
'999999999999' : 'fedex.com',
'999999999999999' : 'fedex.com',
'661377569221' : 'fedex.com',
'624893691092' : 'fedex.com',
'61299995669352455464' : 'fedex.com',
'61299995669151880177' : 'fedex.com',
'00408017007469' : 'fedex.com',
#TEST USPS
'9400 1000 0000 0000 0000 00' : 'usps.com',
'9205 5000 0000 0000 0000 00' : 'usps.com',
'9407 3000 0000 0000 0000 00' : 'usps.com',
'9303 3000 0000 0000 0000 00' : 'usps.com',
'82 000 000 00' : 'usps.com',
'EC 000 000 000 US' : 'usps.com',
'9270 1000 0000 0000 0000 00' : 'usps.com',
'EA 000 000 000 US' : 'usps.com',
'CP 000 000 000 US' : 'usps.com',
'9208 8000 0000 0000 0000 00' : 'usps.com',
'9202 1000 0000 0000 0000 00' : 'usps.com',
'9400100000000000000000' : 'usps.com',
'9205500000000000000000' : 'usps.com',
'9407300000000000000000' : 'usps.com',
'9303300000000000000000' : 'usps.com',
'8200000000' : 'usps.com',
'EC000000000US' : 'usps.com',
'9270100000000000000000' : 'usps.com',
'EA000000000US' : 'usps.com',
'CP000000000US' : 'usps.com',
'9208800000000000000000' : 'usps.com',
'9202100000000000000000' : 'usps.com',
'92748963438592543475924253' : 'usps.com',
#TEST ONTRAC
'C00000000000000' : 'ontrac.com',
'C99999999999999' : 'ontrac.com',
#TEST DHL
'125-12345678' : 'dhl.com',
'125 12345678' : 'd<EMAIL>',
'12512345678' : 'dhl.com',
'SEA1234567' : 'dhl.com',
'LAX1234567' : 'dhl.com',
#TEST DHL GLOBAL
'4041005861941412' : 'dhlglobalmail.com',
#Royal Mail
'AB123456789GB' : 'royalmail.com',
#INVALID TRACKING NUMBERS
'INVALID TRACKING NUMBER' : False
}
for number, service in numbers.items():
print('TESTING: ' + number)
url = py_tracking_urls.get_tracking_url(number)
if url:
print(url)
if service not in url:
print(" --- FAILED --- WRONG SERVICE DETECTED ---")
errors = True
else:
if not url and not service:
print(" --- NO SERVICE AVAILABLE FOR THIS VALUE ---")
else:
print(" --- FAILED --- NO SERVICE FOUND ---")
errors = True;
print(" ")
if errors:
print(" --- ERROR FOUND --- TEST FAILED ---")
exit(1);
else:
print(" !!! ALL GOOD !!!")
|
#!/usr/bin/python3
#Simple test values
#Missing Mail Innovations test
import py_tracking_urls
errors = False
numbers = {
#TEST UPS
'1Z9999W99999999999' : 'ups.com',
'1Z12345E1512345676' : 'ups.com',
'1Z12345E0205271688' : 'ups.com',
'1Z12345E6605272234' : 'ups.com',
'1Z12345E0305271640' : 'ups.com',
'1Z12345E0393657226' : 'ups.com',
'1Z12345E1305277940' : 'ups.com',
'1Z12345E6205277936' : 'ups.com',
'1Z12345E1505270452' : 'ups.com',
'1Z648616E192760718' : 'ups.com',
'1ZWX0692YP40636269' : 'ups.com',
'T9999999999' : 'ups.com',
#TEST FEDEX
'9999 9999 9999' : 'fedex.com',
'9999 9999 9999 999' : 'fedex.com',
'999999999999' : 'fedex.com',
'999999999999999' : 'fedex.com',
'661377569221' : 'fedex.com',
'624893691092' : 'fedex.com',
'61299995669352455464' : 'fedex.com',
'61299995669151880177' : 'fedex.com',
'00408017007469' : 'fedex.com',
#TEST USPS
'9400 1000 0000 0000 0000 00' : 'usps.com',
'9205 5000 0000 0000 0000 00' : 'usps.com',
'9407 3000 0000 0000 0000 00' : 'usps.com',
'9303 3000 0000 0000 0000 00' : 'usps.com',
'82 000 000 00' : 'usps.com',
'EC 000 000 000 US' : 'usps.com',
'9270 1000 0000 0000 0000 00' : 'usps.com',
'EA 000 000 000 US' : 'usps.com',
'CP 000 000 000 US' : 'usps.com',
'9208 8000 0000 0000 0000 00' : 'usps.com',
'9202 1000 0000 0000 0000 00' : 'usps.com',
'9400100000000000000000' : 'usps.com',
'9205500000000000000000' : 'usps.com',
'9407300000000000000000' : 'usps.com',
'9303300000000000000000' : 'usps.com',
'8200000000' : 'usps.com',
'EC000000000US' : 'usps.com',
'9270100000000000000000' : 'usps.com',
'EA000000000US' : 'usps.com',
'CP000000000US' : 'usps.com',
'9208800000000000000000' : 'usps.com',
'9202100000000000000000' : 'usps.com',
'92748963438592543475924253' : 'usps.com',
#TEST ONTRAC
'C00000000000000' : 'ontrac.com',
'C99999999999999' : 'ontrac.com',
#TEST DHL
'125-12345678' : 'dhl.com',
'125 12345678' : 'd<EMAIL>',
'12512345678' : 'dhl.com',
'SEA1234567' : 'dhl.com',
'LAX1234567' : 'dhl.com',
#TEST DHL GLOBAL
'4041005861941412' : 'dhlglobalmail.com',
#Royal Mail
'AB123456789GB' : 'royalmail.com',
#INVALID TRACKING NUMBERS
'INVALID TRACKING NUMBER' : False
}
for number, service in numbers.items():
print('TESTING: ' + number)
url = py_tracking_urls.get_tracking_url(number)
if url:
print(url)
if service not in url:
print(" --- FAILED --- WRONG SERVICE DETECTED ---")
errors = True
else:
if not url and not service:
print(" --- NO SERVICE AVAILABLE FOR THIS VALUE ---")
else:
print(" --- FAILED --- NO SERVICE FOUND ---")
errors = True;
print(" ")
if errors:
print(" --- ERROR FOUND --- TEST FAILED ---")
exit(1);
else:
print(" !!! ALL GOOD !!!")
|
en
| 0.3463
|
#!/usr/bin/python3 #Simple test values #Missing Mail Innovations test #TEST UPS #TEST FEDEX #TEST USPS #TEST ONTRAC #TEST DHL #TEST DHL GLOBAL #Royal Mail #INVALID TRACKING NUMBERS
| 1.997973
| 2
|
python/UdemyCourse/2022_Python_Bootcamp/basics/python_statements/__init__.py
|
pradyotprksh/development_learning
| 9
|
6628813
|
<filename>python/UdemyCourse/2022_Python_Bootcamp/basics/python_statements/__init__.py
from .if_elif_else import if_elif_else_basics
from .for_loops import for_loops_basics
from .while_loops import while_loops_basics
from .useful_operators import useful_operators
|
<filename>python/UdemyCourse/2022_Python_Bootcamp/basics/python_statements/__init__.py
from .if_elif_else import if_elif_else_basics
from .for_loops import for_loops_basics
from .while_loops import while_loops_basics
from .useful_operators import useful_operators
|
none
| 1
| 1.839769
| 2
|
|
Exercicios/ex028.py
|
AndreyPaceli/Python3-CursoEmVideo
| 0
|
6628814
|
#Escreva um programa que faça o computador “pensar” em um número inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número escolhido pelo computador. O programa deverá escrever na tela se o usuário venceu ou perdeu
from random import randint
from time import sleep
computador = randint(0,5) #Faz o computador "PENSAR"
print('-=-' * 20)
print('Vou pensar em um número entre 0 e 5. Tente adivinhar...')
print('-=-' * 20)
jogador = int(input('Em que número eu pensei ? ')) #Jogador tenta adivinhar
print('PROCESSANDO...')
sleep(2)
if jogador == computador:
print('Parabens!! você acertou. GRR')
else:
print('Perdeu otário, eu pensei no número {} e não no {}!'.format(computador, jogador))
|
#Escreva um programa que faça o computador “pensar” em um número inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número escolhido pelo computador. O programa deverá escrever na tela se o usuário venceu ou perdeu
from random import randint
from time import sleep
computador = randint(0,5) #Faz o computador "PENSAR"
print('-=-' * 20)
print('Vou pensar em um número entre 0 e 5. Tente adivinhar...')
print('-=-' * 20)
jogador = int(input('Em que número eu pensei ? ')) #Jogador tenta adivinhar
print('PROCESSANDO...')
sleep(2)
if jogador == computador:
print('Parabens!! você acertou. GRR')
else:
print('Perdeu otário, eu pensei no número {} e não no {}!'.format(computador, jogador))
|
pt
| 0.9794
|
#Escreva um programa que faça o computador “pensar” em um número inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número escolhido pelo computador. O programa deverá escrever na tela se o usuário venceu ou perdeu #Faz o computador "PENSAR" #Jogador tenta adivinhar
| 4.221844
| 4
|
apps/tickets/migrations/0008_auto_20180522_2317.py
|
RonaldTheodoro/tcc-simple-ticket
| 0
|
6628815
|
<gh_stars>0
# Generated by Django 2.0.5 on 2018-05-23 02:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tickets', '0007_auto_20180522_2312'),
]
operations = [
migrations.AlterField(
model_name='task',
name='ticket',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='tasks', to='tickets.Ticket', verbose_name='task'),
),
]
|
# Generated by Django 2.0.5 on 2018-05-23 02:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tickets', '0007_auto_20180522_2312'),
]
operations = [
migrations.AlterField(
model_name='task',
name='ticket',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='tasks', to='tickets.Ticket', verbose_name='task'),
),
]
|
en
| 0.764968
|
# Generated by Django 2.0.5 on 2018-05-23 02:17
| 1.375242
| 1
|
dfvfs/resolver/compressed_stream_resolver_helper.py
|
Defense-Cyber-Crime-Center/dfvfs
| 2
|
6628816
|
<filename>dfvfs/resolver/compressed_stream_resolver_helper.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""The compressed stream path specification resolver helper implementation."""
# This is necessary to prevent a circular import.
import dfvfs.file_io.compressed_stream_io
import dfvfs.vfs.compressed_stream_file_system
from dfvfs.lib import definitions
from dfvfs.resolver import resolver
from dfvfs.resolver import resolver_helper
class CompressedStreamResolverHelper(resolver_helper.ResolverHelper):
"""Class that implements the compressed stream resolver helper."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_COMPRESSED_STREAM
def NewFileObject(self, resolver_context):
"""Creates a new file-like object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
Returns:
The file-like object (instance of file_io.FileIO).
"""
return dfvfs.file_io.compressed_stream_io.CompressedStream(resolver_context)
def NewFileSystem(self, resolver_context):
"""Creates a new file system object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
Returns:
The file system object (instance of vfs.FileSystem).
"""
return dfvfs.vfs.compressed_stream_file_system.CompressedStreamFileSystem(
resolver_context)
# Register the resolver helpers with the resolver.
resolver.Resolver.RegisterHelper(CompressedStreamResolverHelper())
|
<filename>dfvfs/resolver/compressed_stream_resolver_helper.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""The compressed stream path specification resolver helper implementation."""
# This is necessary to prevent a circular import.
import dfvfs.file_io.compressed_stream_io
import dfvfs.vfs.compressed_stream_file_system
from dfvfs.lib import definitions
from dfvfs.resolver import resolver
from dfvfs.resolver import resolver_helper
class CompressedStreamResolverHelper(resolver_helper.ResolverHelper):
"""Class that implements the compressed stream resolver helper."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_COMPRESSED_STREAM
def NewFileObject(self, resolver_context):
"""Creates a new file-like object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
Returns:
The file-like object (instance of file_io.FileIO).
"""
return dfvfs.file_io.compressed_stream_io.CompressedStream(resolver_context)
def NewFileSystem(self, resolver_context):
"""Creates a new file system object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
Returns:
The file system object (instance of vfs.FileSystem).
"""
return dfvfs.vfs.compressed_stream_file_system.CompressedStreamFileSystem(
resolver_context)
# Register the resolver helpers with the resolver.
resolver.Resolver.RegisterHelper(CompressedStreamResolverHelper())
|
en
| 0.743544
|
# -*- coding: utf-8 -*- The compressed stream path specification resolver helper implementation. # This is necessary to prevent a circular import. Class that implements the compressed stream resolver helper. Creates a new file-like object. Args: resolver_context: the resolver context (instance of resolver.Context). Returns: The file-like object (instance of file_io.FileIO). Creates a new file system object. Args: resolver_context: the resolver context (instance of resolver.Context). Returns: The file system object (instance of vfs.FileSystem). # Register the resolver helpers with the resolver.
| 2.062749
| 2
|
introduction/arithmetic_operators.py
|
pk-hackerrank/python
| 1
|
6628817
|
<reponame>pk-hackerrank/python<filename>introduction/arithmetic_operators.py<gh_stars>1-10
def printArthmeticOperationalValues(a,b):
print(a+b)
print(a-b)
print(a*b)
if __name__ == '__main__':
a = int(input())
b = int(input())
printArthmeticOperationalValues(a,b)
|
def printArthmeticOperationalValues(a,b):
print(a+b)
print(a-b)
print(a*b)
if __name__ == '__main__':
a = int(input())
b = int(input())
printArthmeticOperationalValues(a,b)
|
none
| 1
| 3.621407
| 4
|
|
dask-fargate/.env/lib/python3.6/site-packages/aws_cdk/aws_ecr/__init__.py
|
chriscoombs/amazon-sagemaker-cdk-examples
| 41
|
6628818
|
<reponame>chriscoombs/amazon-sagemaker-cdk-examples
"""
## Amazon ECR Construct Library
<!--BEGIN STABILITY BANNER-->---

---
<!--END STABILITY BANNER-->
This package contains constructs for working with Amazon Elastic Container Registry.
### Repositories
Define a repository by creating a new instance of `Repository`. A repository
holds multiple verions of a single container image.
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
repository = ecr.Repository(self, "Repository")
```
### Automatically clean up repositories
You can set life cycle rules to automatically clean up old images from your
repository. The first life cycle rule that matches an image will be applied
against that image. For example, the following deletes images older than
30 days, while keeping all images tagged with prod (note that the order
is important here):
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
repository.add_lifecycle_rule(tag_prefix_list=["prod"], max_image_count=9999)
repository.add_lifecycle_rule(max_image_age_days=cdk.Duration.days(30))
```
"""
import abc
import datetime
import enum
import typing
import jsii
import jsii.compat
import publication
from jsii.python import classproperty
import aws_cdk.aws_events
import aws_cdk.aws_iam
import aws_cdk.core
__jsii_assembly__ = jsii.JSIIAssembly.load("@aws-cdk/aws-ecr", "1.18.0", __name__, "aws-ecr@1.18.0.jsii.tgz")
@jsii.implements(aws_cdk.core.IInspectable)
class CfnRepository(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-ecr.CfnRepository"):
"""A CloudFormation ``AWS::ECR::Repository``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html
cloudformationResource:
:cloudformationResource:: AWS::ECR::Repository
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, lifecycle_policy: typing.Optional[typing.Union[typing.Optional["LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]=None, repository_name: typing.Optional[str]=None, repository_policy_text: typing.Any=None, tags: typing.Optional[typing.List[aws_cdk.core.CfnTag]]=None) -> None:
"""Create a new ``AWS::ECR::Repository``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param lifecycle_policy: ``AWS::ECR::Repository.LifecyclePolicy``.
:param repository_name: ``AWS::ECR::Repository.RepositoryName``.
:param repository_policy_text: ``AWS::ECR::Repository.RepositoryPolicyText``.
:param tags: ``AWS::ECR::Repository.Tags``.
"""
props = CfnRepositoryProps(lifecycle_policy=lifecycle_policy, repository_name=repository_name, repository_policy_text=repository_policy_text, tags=tags)
jsii.create(CfnRepository, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@property
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> str:
"""
cloudformationAttribute:
:cloudformationAttribute:: Arn
"""
return jsii.get(self, "attrArn")
@property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@property
@jsii.member(jsii_name="tags")
def tags(self) -> aws_cdk.core.TagManager:
"""``AWS::ECR::Repository.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-tags
"""
return jsii.get(self, "tags")
@property
@jsii.member(jsii_name="repositoryPolicyText")
def repository_policy_text(self) -> typing.Any:
"""``AWS::ECR::Repository.RepositoryPolicyText``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositorypolicytext
"""
return jsii.get(self, "repositoryPolicyText")
@repository_policy_text.setter
def repository_policy_text(self, value: typing.Any):
return jsii.set(self, "repositoryPolicyText", value)
@property
@jsii.member(jsii_name="lifecyclePolicy")
def lifecycle_policy(self) -> typing.Optional[typing.Union[typing.Optional["LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::ECR::Repository.LifecyclePolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-lifecyclepolicy
"""
return jsii.get(self, "lifecyclePolicy")
@lifecycle_policy.setter
def lifecycle_policy(self, value: typing.Optional[typing.Union[typing.Optional["LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]):
return jsii.set(self, "lifecyclePolicy", value)
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> typing.Optional[str]:
"""``AWS::ECR::Repository.RepositoryName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositoryname
"""
return jsii.get(self, "repositoryName")
@repository_name.setter
def repository_name(self, value: typing.Optional[str]):
return jsii.set(self, "repositoryName", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.CfnRepository.LifecyclePolicyProperty", jsii_struct_bases=[], name_mapping={'lifecycle_policy_text': 'lifecyclePolicyText', 'registry_id': 'registryId'})
class LifecyclePolicyProperty():
def __init__(self, *, lifecycle_policy_text: typing.Optional[str]=None, registry_id: typing.Optional[str]=None):
"""
:param lifecycle_policy_text: ``CfnRepository.LifecyclePolicyProperty.LifecyclePolicyText``.
:param registry_id: ``CfnRepository.LifecyclePolicyProperty.RegistryId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecr-repository-lifecyclepolicy.html
"""
self._values = {
}
if lifecycle_policy_text is not None: self._values["lifecycle_policy_text"] = lifecycle_policy_text
if registry_id is not None: self._values["registry_id"] = registry_id
@property
def lifecycle_policy_text(self) -> typing.Optional[str]:
"""``CfnRepository.LifecyclePolicyProperty.LifecyclePolicyText``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecr-repository-lifecyclepolicy.html#cfn-ecr-repository-lifecyclepolicy-lifecyclepolicytext
"""
return self._values.get('lifecycle_policy_text')
@property
def registry_id(self) -> typing.Optional[str]:
"""``CfnRepository.LifecyclePolicyProperty.RegistryId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecr-repository-lifecyclepolicy.html#cfn-ecr-repository-lifecyclepolicy-registryid
"""
return self._values.get('registry_id')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecyclePolicyProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.CfnRepositoryProps", jsii_struct_bases=[], name_mapping={'lifecycle_policy': 'lifecyclePolicy', 'repository_name': 'repositoryName', 'repository_policy_text': 'repositoryPolicyText', 'tags': 'tags'})
class CfnRepositoryProps():
def __init__(self, *, lifecycle_policy: typing.Optional[typing.Union[typing.Optional["CfnRepository.LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]=None, repository_name: typing.Optional[str]=None, repository_policy_text: typing.Any=None, tags: typing.Optional[typing.List[aws_cdk.core.CfnTag]]=None):
"""Properties for defining a ``AWS::ECR::Repository``.
:param lifecycle_policy: ``AWS::ECR::Repository.LifecyclePolicy``.
:param repository_name: ``AWS::ECR::Repository.RepositoryName``.
:param repository_policy_text: ``AWS::ECR::Repository.RepositoryPolicyText``.
:param tags: ``AWS::ECR::Repository.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html
"""
self._values = {
}
if lifecycle_policy is not None: self._values["lifecycle_policy"] = lifecycle_policy
if repository_name is not None: self._values["repository_name"] = repository_name
if repository_policy_text is not None: self._values["repository_policy_text"] = repository_policy_text
if tags is not None: self._values["tags"] = tags
@property
def lifecycle_policy(self) -> typing.Optional[typing.Union[typing.Optional["CfnRepository.LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::ECR::Repository.LifecyclePolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-lifecyclepolicy
"""
return self._values.get('lifecycle_policy')
@property
def repository_name(self) -> typing.Optional[str]:
"""``AWS::ECR::Repository.RepositoryName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositoryname
"""
return self._values.get('repository_name')
@property
def repository_policy_text(self) -> typing.Any:
"""``AWS::ECR::Repository.RepositoryPolicyText``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositorypolicytext
"""
return self._values.get('repository_policy_text')
@property
def tags(self) -> typing.Optional[typing.List[aws_cdk.core.CfnTag]]:
"""``AWS::ECR::Repository.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-tags
"""
return self._values.get('tags')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnRepositoryProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.interface(jsii_type="@aws-cdk/aws-ecr.IRepository")
class IRepository(aws_cdk.core.IResource, jsii.compat.Protocol):
"""Represents an ECR repository."""
@staticmethod
def __jsii_proxy_class__():
return _IRepositoryProxy
@property
@jsii.member(jsii_name="repositoryArn")
def repository_arn(self) -> str:
"""The ARN of the repository.
attribute:
:attribute:: true
"""
...
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> str:
"""The name of the repository.
attribute:
:attribute:: true
"""
...
@property
@jsii.member(jsii_name="repositoryUri")
def repository_uri(self) -> str:
"""The URI of this repository (represents the latest image):.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY
attribute:
:attribute:: true
"""
...
@jsii.member(jsii_name="addToResourcePolicy")
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
...
@jsii.member(jsii_name="grant")
def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant:
"""Grant the given principal identity permissions to perform the actions on this repository.
:param grantee: -
:param actions: -
"""
...
@jsii.member(jsii_name="grantPull")
def grant_pull(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull images in this repository.
:param grantee: -
"""
...
@jsii.member(jsii_name="grantPullPush")
def grant_pull_push(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull and push images to this repository.
:param grantee: -
"""
...
@jsii.member(jsii_name="onCloudTrailEvent")
def on_cloud_trail_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Define a CloudWatch event that triggers when something happens to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
...
@jsii.member(jsii_name="onCloudTrailImagePushed")
def on_cloud_trail_image_pushed(self, id: str, *, image_tag: typing.Optional[str]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when an image is pushed to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
...
@jsii.member(jsii_name="onEvent")
def on_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines a CloudWatch event rule which triggers for repository events.
Use
``rule.addEventPattern(pattern)`` to specify a filter.
:param id: -
:param options: -
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
...
@jsii.member(jsii_name="onImageScanCompleted")
def on_image_scan_completed(self, id: str, *, image_tags: typing.Optional[typing.List[str]]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when the image scan is completed.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
...
@jsii.member(jsii_name="repositoryUriForTag")
def repository_uri_for_tag(self, tag: typing.Optional[str]=None) -> str:
"""Returns the URI of the repository for a certain tag. Can be used in ``docker push/pull``.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY[:TAG]
:param tag: Image tag to use (tools usually default to "latest" if omitted).
"""
...
class _IRepositoryProxy(jsii.proxy_for(aws_cdk.core.IResource)):
"""Represents an ECR repository."""
__jsii_type__ = "@aws-cdk/aws-ecr.IRepository"
@property
@jsii.member(jsii_name="repositoryArn")
def repository_arn(self) -> str:
"""The ARN of the repository.
attribute:
:attribute:: true
"""
return jsii.get(self, "repositoryArn")
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> str:
"""The name of the repository.
attribute:
:attribute:: true
"""
return jsii.get(self, "repositoryName")
@property
@jsii.member(jsii_name="repositoryUri")
def repository_uri(self) -> str:
"""The URI of this repository (represents the latest image):.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY
attribute:
:attribute:: true
"""
return jsii.get(self, "repositoryUri")
@jsii.member(jsii_name="addToResourcePolicy")
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
return jsii.invoke(self, "addToResourcePolicy", [statement])
@jsii.member(jsii_name="grant")
def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant:
"""Grant the given principal identity permissions to perform the actions on this repository.
:param grantee: -
:param actions: -
"""
return jsii.invoke(self, "grant", [grantee, *actions])
@jsii.member(jsii_name="grantPull")
def grant_pull(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull images in this repository.
:param grantee: -
"""
return jsii.invoke(self, "grantPull", [grantee])
@jsii.member(jsii_name="grantPullPush")
def grant_pull_push(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull and push images to this repository.
:param grantee: -
"""
return jsii.invoke(self, "grantPullPush", [grantee])
@jsii.member(jsii_name="onCloudTrailEvent")
def on_cloud_trail_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Define a CloudWatch event that triggers when something happens to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = aws_cdk.aws_events.OnEventOptions(description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onCloudTrailEvent", [id, options])
@jsii.member(jsii_name="onCloudTrailImagePushed")
def on_cloud_trail_image_pushed(self, id: str, *, image_tag: typing.Optional[str]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when an image is pushed to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = OnCloudTrailImagePushedOptions(image_tag=image_tag, description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onCloudTrailImagePushed", [id, options])
@jsii.member(jsii_name="onEvent")
def on_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines a CloudWatch event rule which triggers for repository events.
Use
``rule.addEventPattern(pattern)`` to specify a filter.
:param id: -
:param options: -
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = aws_cdk.aws_events.OnEventOptions(description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onEvent", [id, options])
@jsii.member(jsii_name="onImageScanCompleted")
def on_image_scan_completed(self, id: str, *, image_tags: typing.Optional[typing.List[str]]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when the image scan is completed.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = OnImageScanCompletedOptions(image_tags=image_tags, description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onImageScanCompleted", [id, options])
@jsii.member(jsii_name="repositoryUriForTag")
def repository_uri_for_tag(self, tag: typing.Optional[str]=None) -> str:
"""Returns the URI of the repository for a certain tag. Can be used in ``docker push/pull``.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY[:TAG]
:param tag: Image tag to use (tools usually default to "latest" if omitted).
"""
return jsii.invoke(self, "repositoryUriForTag", [tag])
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.LifecycleRule", jsii_struct_bases=[], name_mapping={'description': 'description', 'max_image_age': 'maxImageAge', 'max_image_count': 'maxImageCount', 'rule_priority': 'rulePriority', 'tag_prefix_list': 'tagPrefixList', 'tag_status': 'tagStatus'})
class LifecycleRule():
def __init__(self, *, description: typing.Optional[str]=None, max_image_age: typing.Optional[aws_cdk.core.Duration]=None, max_image_count: typing.Optional[jsii.Number]=None, rule_priority: typing.Optional[jsii.Number]=None, tag_prefix_list: typing.Optional[typing.List[str]]=None, tag_status: typing.Optional["TagStatus"]=None):
"""An ECR life cycle rule.
:param description: Describes the purpose of the rule. Default: No description
:param max_image_age: The maximum age of images to retain. The value must represent a number of days. Specify exactly one of maxImageCount and maxImageAge.
:param max_image_count: The maximum number of images to retain. Specify exactly one of maxImageCount and maxImageAgeDays.
:param rule_priority: Controls the order in which rules are evaluated (low to high). All rules must have a unique priority, where lower numbers have higher precedence. The first rule that matches is applied to an image. There can only be one rule with a tagStatus of Any, and it must have the highest rulePriority. All rules without a specified priority will have incrementing priorities automatically assigned to them, higher than any rules that DO have priorities. Default: Automatically assigned
:param tag_prefix_list: Select images that have ALL the given prefixes in their tag. Only if tagStatus == TagStatus.Tagged
:param tag_status: Select images based on tags. Only one rule is allowed to select untagged images, and it must have the highest rulePriority. Default: TagStatus.Tagged if tagPrefixList is given, TagStatus.Any otherwise
"""
self._values = {
}
if description is not None: self._values["description"] = description
if max_image_age is not None: self._values["max_image_age"] = max_image_age
if max_image_count is not None: self._values["max_image_count"] = max_image_count
if rule_priority is not None: self._values["rule_priority"] = rule_priority
if tag_prefix_list is not None: self._values["tag_prefix_list"] = tag_prefix_list
if tag_status is not None: self._values["tag_status"] = tag_status
@property
def description(self) -> typing.Optional[str]:
"""Describes the purpose of the rule.
default
:default: No description
"""
return self._values.get('description')
@property
def max_image_age(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The maximum age of images to retain. The value must represent a number of days.
Specify exactly one of maxImageCount and maxImageAge.
"""
return self._values.get('max_image_age')
@property
def max_image_count(self) -> typing.Optional[jsii.Number]:
"""The maximum number of images to retain.
Specify exactly one of maxImageCount and maxImageAgeDays.
"""
return self._values.get('max_image_count')
@property
def rule_priority(self) -> typing.Optional[jsii.Number]:
"""Controls the order in which rules are evaluated (low to high).
All rules must have a unique priority, where lower numbers have
higher precedence. The first rule that matches is applied to an image.
There can only be one rule with a tagStatus of Any, and it must have
the highest rulePriority.
All rules without a specified priority will have incrementing priorities
automatically assigned to them, higher than any rules that DO have priorities.
default
:default: Automatically assigned
"""
return self._values.get('rule_priority')
@property
def tag_prefix_list(self) -> typing.Optional[typing.List[str]]:
"""Select images that have ALL the given prefixes in their tag.
Only if tagStatus == TagStatus.Tagged
"""
return self._values.get('tag_prefix_list')
@property
def tag_status(self) -> typing.Optional["TagStatus"]:
"""Select images based on tags.
Only one rule is allowed to select untagged images, and it must
have the highest rulePriority.
default
:default: TagStatus.Tagged if tagPrefixList is given, TagStatus.Any otherwise
"""
return self._values.get('tag_status')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecycleRule(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.OnCloudTrailImagePushedOptions", jsii_struct_bases=[aws_cdk.aws_events.OnEventOptions], name_mapping={'description': 'description', 'event_pattern': 'eventPattern', 'rule_name': 'ruleName', 'target': 'target', 'image_tag': 'imageTag'})
class OnCloudTrailImagePushedOptions(aws_cdk.aws_events.OnEventOptions):
def __init__(self, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None, image_tag: typing.Optional[str]=None):
"""Options for the onCloudTrailImagePushed method.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
:param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags
"""
if isinstance(event_pattern, dict): event_pattern = aws_cdk.aws_events.EventPattern(**event_pattern)
self._values = {
}
if description is not None: self._values["description"] = description
if event_pattern is not None: self._values["event_pattern"] = event_pattern
if rule_name is not None: self._values["rule_name"] = rule_name
if target is not None: self._values["target"] = target
if image_tag is not None: self._values["image_tag"] = image_tag
@property
def description(self) -> typing.Optional[str]:
"""A description of the rule's purpose.
default
:default: - No description
"""
return self._values.get('description')
@property
def event_pattern(self) -> typing.Optional[aws_cdk.aws_events.EventPattern]:
"""Additional restrictions for the event to route to the specified target.
The method that generates the rule probably imposes some type of event
filtering. The filtering implied by what you pass here is added
on top of that filtering.
default
:default: - No additional filtering based on an event pattern.
see
:see: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CloudWatchEventsandEventPatterns.html
"""
return self._values.get('event_pattern')
@property
def rule_name(self) -> typing.Optional[str]:
"""A name for the rule.
default
:default: AWS CloudFormation generates a unique physical ID.
"""
return self._values.get('rule_name')
@property
def target(self) -> typing.Optional[aws_cdk.aws_events.IRuleTarget]:
"""The target to register for the event.
default
:default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
return self._values.get('target')
@property
def image_tag(self) -> typing.Optional[str]:
"""Only watch changes to this image tag.
default
:default: - Watch changes to all tags
"""
return self._values.get('image_tag')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'OnCloudTrailImagePushedOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.OnImageScanCompletedOptions", jsii_struct_bases=[aws_cdk.aws_events.OnEventOptions], name_mapping={'description': 'description', 'event_pattern': 'eventPattern', 'rule_name': 'ruleName', 'target': 'target', 'image_tags': 'imageTags'})
class OnImageScanCompletedOptions(aws_cdk.aws_events.OnEventOptions):
def __init__(self, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None, image_tags: typing.Optional[typing.List[str]]=None):
"""Options for the OnImageScanCompleted method.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
:param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags
"""
if isinstance(event_pattern, dict): event_pattern = aws_cdk.aws_events.EventPattern(**event_pattern)
self._values = {
}
if description is not None: self._values["description"] = description
if event_pattern is not None: self._values["event_pattern"] = event_pattern
if rule_name is not None: self._values["rule_name"] = rule_name
if target is not None: self._values["target"] = target
if image_tags is not None: self._values["image_tags"] = image_tags
@property
def description(self) -> typing.Optional[str]:
"""A description of the rule's purpose.
default
:default: - No description
"""
return self._values.get('description')
@property
def event_pattern(self) -> typing.Optional[aws_cdk.aws_events.EventPattern]:
"""Additional restrictions for the event to route to the specified target.
The method that generates the rule probably imposes some type of event
filtering. The filtering implied by what you pass here is added
on top of that filtering.
default
:default: - No additional filtering based on an event pattern.
see
:see: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CloudWatchEventsandEventPatterns.html
"""
return self._values.get('event_pattern')
@property
def rule_name(self) -> typing.Optional[str]:
"""A name for the rule.
default
:default: AWS CloudFormation generates a unique physical ID.
"""
return self._values.get('rule_name')
@property
def target(self) -> typing.Optional[aws_cdk.aws_events.IRuleTarget]:
"""The target to register for the event.
default
:default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
return self._values.get('target')
@property
def image_tags(self) -> typing.Optional[typing.List[str]]:
"""Only watch changes to the image tags spedified. Leave it undefined to watch the full repository.
default
:default: - Watch the changes to the repository with all image tags
"""
return self._values.get('image_tags')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'OnImageScanCompletedOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.RepositoryAttributes", jsii_struct_bases=[], name_mapping={'repository_arn': 'repositoryArn', 'repository_name': 'repositoryName'})
class RepositoryAttributes():
def __init__(self, *, repository_arn: str, repository_name: str):
"""
:param repository_arn:
:param repository_name:
"""
self._values = {
'repository_arn': repository_arn,
'repository_name': repository_name,
}
@property
def repository_arn(self) -> str:
return self._values.get('repository_arn')
@property
def repository_name(self) -> str:
return self._values.get('repository_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RepositoryAttributes(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(IRepository)
class RepositoryBase(aws_cdk.core.Resource, metaclass=jsii.JSIIAbstractClass, jsii_type="@aws-cdk/aws-ecr.RepositoryBase"):
"""Base class for ECR repository.
Reused between imported repositories and owned repositories.
"""
@staticmethod
def __jsii_proxy_class__():
return _RepositoryBaseProxy
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, physical_name: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param physical_name: The value passed in by users to the physical name prop of the resource. - ``undefined`` implies that a physical name will be allocated by CloudFormation during deployment. - a concrete value implies a specific physical name - ``PhysicalName.GENERATE_IF_NEEDED`` is a marker that indicates that a physical will only be generated by the CDK if it is needed for cross-environment references. Otherwise, it will be allocated by CloudFormation. Default: - The physical name will be allocated by CloudFormation at deployment time
"""
props = aws_cdk.core.ResourceProps(physical_name=physical_name)
jsii.create(RepositoryBase, self, [scope, id, props])
@jsii.member(jsii_name="addToResourcePolicy")
@abc.abstractmethod
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
...
@jsii.member(jsii_name="grant")
def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant:
"""Grant the given principal identity permissions to perform the actions on this repository.
:param grantee: -
:param actions: -
"""
return jsii.invoke(self, "grant", [grantee, *actions])
@jsii.member(jsii_name="grantPull")
def grant_pull(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to use the images in this repository.
:param grantee: -
"""
return jsii.invoke(self, "grantPull", [grantee])
@jsii.member(jsii_name="grantPullPush")
def grant_pull_push(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull and push images to this repository.
:param grantee: -
"""
return jsii.invoke(self, "grantPullPush", [grantee])
@jsii.member(jsii_name="onCloudTrailEvent")
def on_cloud_trail_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Define a CloudWatch event that triggers when something happens to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = aws_cdk.aws_events.OnEventOptions(description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onCloudTrailEvent", [id, options])
@jsii.member(jsii_name="onCloudTrailImagePushed")
def on_cloud_trail_image_pushed(self, id: str, *, image_tag: typing.Optional[str]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when an image is pushed to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = OnCloudTrailImagePushedOptions(image_tag=image_tag, description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onCloudTrailImagePushed", [id, options])
@jsii.member(jsii_name="onEvent")
def on_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines a CloudWatch event rule which triggers for repository events.
Use
``rule.addEventPattern(pattern)`` to specify a filter.
:param id: -
:param options: -
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = aws_cdk.aws_events.OnEventOptions(description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onEvent", [id, options])
@jsii.member(jsii_name="onImageScanCompleted")
def on_image_scan_completed(self, id: str, *, image_tags: typing.Optional[typing.List[str]]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when an image scan is completed.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = OnImageScanCompletedOptions(image_tags=image_tags, description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onImageScanCompleted", [id, options])
@jsii.member(jsii_name="repositoryUriForTag")
def repository_uri_for_tag(self, tag: typing.Optional[str]=None) -> str:
"""Returns the URL of the repository. Can be used in ``docker push/pull``.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY[:TAG]
:param tag: Optional image tag.
"""
return jsii.invoke(self, "repositoryUriForTag", [tag])
@property
@jsii.member(jsii_name="repositoryArn")
@abc.abstractmethod
def repository_arn(self) -> str:
"""The ARN of the repository."""
...
@property
@jsii.member(jsii_name="repositoryName")
@abc.abstractmethod
def repository_name(self) -> str:
"""The name of the repository."""
...
@property
@jsii.member(jsii_name="repositoryUri")
def repository_uri(self) -> str:
"""The URI of this repository (represents the latest image):.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY
"""
return jsii.get(self, "repositoryUri")
class _RepositoryBaseProxy(RepositoryBase, jsii.proxy_for(aws_cdk.core.Resource)):
@jsii.member(jsii_name="addToResourcePolicy")
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
return jsii.invoke(self, "addToResourcePolicy", [statement])
@property
@jsii.member(jsii_name="repositoryArn")
def repository_arn(self) -> str:
"""The ARN of the repository."""
return jsii.get(self, "repositoryArn")
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> str:
"""The name of the repository."""
return jsii.get(self, "repositoryName")
class Repository(RepositoryBase, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-ecr.Repository"):
"""Define an ECR repository."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, lifecycle_registry_id: typing.Optional[str]=None, lifecycle_rules: typing.Optional[typing.List["LifecycleRule"]]=None, removal_policy: typing.Optional[aws_cdk.core.RemovalPolicy]=None, repository_name: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param lifecycle_registry_id: The AWS account ID associated with the registry that contains the repository. Default: The default registry is assumed.
:param lifecycle_rules: Life cycle rules to apply to this registry. Default: No life cycle rules
:param removal_policy: Determine what happens to the repository when the resource/stack is deleted. Default: RemovalPolicy.Retain
:param repository_name: Name for this repository. Default: Automatically generated name.
"""
props = RepositoryProps(lifecycle_registry_id=lifecycle_registry_id, lifecycle_rules=lifecycle_rules, removal_policy=removal_policy, repository_name=repository_name)
jsii.create(Repository, self, [scope, id, props])
@jsii.member(jsii_name="arnForLocalRepository")
@classmethod
def arn_for_local_repository(cls, repository_name: str, scope: aws_cdk.core.IConstruct) -> str:
"""Returns an ECR ARN for a repository that resides in the same account/region as the current stack.
:param repository_name: -
:param scope: -
"""
return jsii.sinvoke(cls, "arnForLocalRepository", [repository_name, scope])
@jsii.member(jsii_name="fromRepositoryArn")
@classmethod
def from_repository_arn(cls, scope: aws_cdk.core.Construct, id: str, repository_arn: str) -> "IRepository":
"""
:param scope: -
:param id: -
:param repository_arn: -
"""
return jsii.sinvoke(cls, "fromRepositoryArn", [scope, id, repository_arn])
@jsii.member(jsii_name="fromRepositoryAttributes")
@classmethod
def from_repository_attributes(cls, scope: aws_cdk.core.Construct, id: str, *, repository_arn: str, repository_name: str) -> "IRepository":
"""Import a repository.
:param scope: -
:param id: -
:param attrs: -
:param repository_arn:
:param repository_name:
"""
attrs = RepositoryAttributes(repository_arn=repository_arn, repository_name=repository_name)
return jsii.sinvoke(cls, "fromRepositoryAttributes", [scope, id, attrs])
@jsii.member(jsii_name="fromRepositoryName")
@classmethod
def from_repository_name(cls, scope: aws_cdk.core.Construct, id: str, repository_name: str) -> "IRepository":
"""
:param scope: -
:param id: -
:param repository_name: -
"""
return jsii.sinvoke(cls, "fromRepositoryName", [scope, id, repository_name])
@jsii.member(jsii_name="addLifecycleRule")
def add_lifecycle_rule(self, *, description: typing.Optional[str]=None, max_image_age: typing.Optional[aws_cdk.core.Duration]=None, max_image_count: typing.Optional[jsii.Number]=None, rule_priority: typing.Optional[jsii.Number]=None, tag_prefix_list: typing.Optional[typing.List[str]]=None, tag_status: typing.Optional["TagStatus"]=None) -> None:
"""Add a life cycle rule to the repository.
Life cycle rules automatically expire images from the repository that match
certain conditions.
:param rule: -
:param description: Describes the purpose of the rule. Default: No description
:param max_image_age: The maximum age of images to retain. The value must represent a number of days. Specify exactly one of maxImageCount and maxImageAge.
:param max_image_count: The maximum number of images to retain. Specify exactly one of maxImageCount and maxImageAgeDays.
:param rule_priority: Controls the order in which rules are evaluated (low to high). All rules must have a unique priority, where lower numbers have higher precedence. The first rule that matches is applied to an image. There can only be one rule with a tagStatus of Any, and it must have the highest rulePriority. All rules without a specified priority will have incrementing priorities automatically assigned to them, higher than any rules that DO have priorities. Default: Automatically assigned
:param tag_prefix_list: Select images that have ALL the given prefixes in their tag. Only if tagStatus == TagStatus.Tagged
:param tag_status: Select images based on tags. Only one rule is allowed to select untagged images, and it must have the highest rulePriority. Default: TagStatus.Tagged if tagPrefixList is given, TagStatus.Any otherwise
"""
rule = LifecycleRule(description=description, max_image_age=max_image_age, max_image_count=max_image_count, rule_priority=rule_priority, tag_prefix_list=tag_prefix_list, tag_status=tag_status)
return jsii.invoke(self, "addLifecycleRule", [rule])
@jsii.member(jsii_name="addToResourcePolicy")
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
return jsii.invoke(self, "addToResourcePolicy", [statement])
@property
@jsii.member(jsii_name="repositoryArn")
def repository_arn(self) -> str:
"""The ARN of the repository."""
return jsii.get(self, "repositoryArn")
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> str:
"""The name of the repository."""
return jsii.get(self, "repositoryName")
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.RepositoryProps", jsii_struct_bases=[], name_mapping={'lifecycle_registry_id': 'lifecycleRegistryId', 'lifecycle_rules': 'lifecycleRules', 'removal_policy': 'removalPolicy', 'repository_name': 'repositoryName'})
class RepositoryProps():
def __init__(self, *, lifecycle_registry_id: typing.Optional[str]=None, lifecycle_rules: typing.Optional[typing.List["LifecycleRule"]]=None, removal_policy: typing.Optional[aws_cdk.core.RemovalPolicy]=None, repository_name: typing.Optional[str]=None):
"""
:param lifecycle_registry_id: The AWS account ID associated with the registry that contains the repository. Default: The default registry is assumed.
:param lifecycle_rules: Life cycle rules to apply to this registry. Default: No life cycle rules
:param removal_policy: Determine what happens to the repository when the resource/stack is deleted. Default: RemovalPolicy.Retain
:param repository_name: Name for this repository. Default: Automatically generated name.
"""
self._values = {
}
if lifecycle_registry_id is not None: self._values["lifecycle_registry_id"] = lifecycle_registry_id
if lifecycle_rules is not None: self._values["lifecycle_rules"] = lifecycle_rules
if removal_policy is not None: self._values["removal_policy"] = removal_policy
if repository_name is not None: self._values["repository_name"] = repository_name
@property
def lifecycle_registry_id(self) -> typing.Optional[str]:
"""The AWS account ID associated with the registry that contains the repository.
default
:default: The default registry is assumed.
see
:see: https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_PutLifecyclePolicy.html
"""
return self._values.get('lifecycle_registry_id')
@property
def lifecycle_rules(self) -> typing.Optional[typing.List["LifecycleRule"]]:
"""Life cycle rules to apply to this registry.
default
:default: No life cycle rules
"""
return self._values.get('lifecycle_rules')
@property
def removal_policy(self) -> typing.Optional[aws_cdk.core.RemovalPolicy]:
"""Determine what happens to the repository when the resource/stack is deleted.
default
:default: RemovalPolicy.Retain
"""
return self._values.get('removal_policy')
@property
def repository_name(self) -> typing.Optional[str]:
"""Name for this repository.
default
:default: Automatically generated name.
"""
return self._values.get('repository_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RepositoryProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-ecr.TagStatus")
class TagStatus(enum.Enum):
"""Select images based on tags."""
ANY = "ANY"
"""Rule applies to all images."""
TAGGED = "TAGGED"
"""Rule applies to tagged images."""
UNTAGGED = "UNTAGGED"
"""Rule applies to untagged images."""
__all__ = ["CfnRepository", "CfnRepositoryProps", "IRepository", "LifecycleRule", "OnCloudTrailImagePushedOptions", "OnImageScanCompletedOptions", "Repository", "RepositoryAttributes", "RepositoryBase", "RepositoryProps", "TagStatus", "__jsii_assembly__"]
publication.publish()
|
"""
## Amazon ECR Construct Library
<!--BEGIN STABILITY BANNER-->---

---
<!--END STABILITY BANNER-->
This package contains constructs for working with Amazon Elastic Container Registry.
### Repositories
Define a repository by creating a new instance of `Repository`. A repository
holds multiple verions of a single container image.
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
repository = ecr.Repository(self, "Repository")
```
### Automatically clean up repositories
You can set life cycle rules to automatically clean up old images from your
repository. The first life cycle rule that matches an image will be applied
against that image. For example, the following deletes images older than
30 days, while keeping all images tagged with prod (note that the order
is important here):
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
repository.add_lifecycle_rule(tag_prefix_list=["prod"], max_image_count=9999)
repository.add_lifecycle_rule(max_image_age_days=cdk.Duration.days(30))
```
"""
import abc
import datetime
import enum
import typing
import jsii
import jsii.compat
import publication
from jsii.python import classproperty
import aws_cdk.aws_events
import aws_cdk.aws_iam
import aws_cdk.core
__jsii_assembly__ = jsii.JSIIAssembly.load("@aws-cdk/aws-ecr", "1.18.0", __name__, "aws-ecr@1.18.0.jsii.tgz")
@jsii.implements(aws_cdk.core.IInspectable)
class CfnRepository(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-ecr.CfnRepository"):
"""A CloudFormation ``AWS::ECR::Repository``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html
cloudformationResource:
:cloudformationResource:: AWS::ECR::Repository
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, lifecycle_policy: typing.Optional[typing.Union[typing.Optional["LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]=None, repository_name: typing.Optional[str]=None, repository_policy_text: typing.Any=None, tags: typing.Optional[typing.List[aws_cdk.core.CfnTag]]=None) -> None:
"""Create a new ``AWS::ECR::Repository``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param lifecycle_policy: ``AWS::ECR::Repository.LifecyclePolicy``.
:param repository_name: ``AWS::ECR::Repository.RepositoryName``.
:param repository_policy_text: ``AWS::ECR::Repository.RepositoryPolicyText``.
:param tags: ``AWS::ECR::Repository.Tags``.
"""
props = CfnRepositoryProps(lifecycle_policy=lifecycle_policy, repository_name=repository_name, repository_policy_text=repository_policy_text, tags=tags)
jsii.create(CfnRepository, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@property
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> str:
"""
cloudformationAttribute:
:cloudformationAttribute:: Arn
"""
return jsii.get(self, "attrArn")
@property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@property
@jsii.member(jsii_name="tags")
def tags(self) -> aws_cdk.core.TagManager:
"""``AWS::ECR::Repository.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-tags
"""
return jsii.get(self, "tags")
@property
@jsii.member(jsii_name="repositoryPolicyText")
def repository_policy_text(self) -> typing.Any:
"""``AWS::ECR::Repository.RepositoryPolicyText``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositorypolicytext
"""
return jsii.get(self, "repositoryPolicyText")
@repository_policy_text.setter
def repository_policy_text(self, value: typing.Any):
return jsii.set(self, "repositoryPolicyText", value)
@property
@jsii.member(jsii_name="lifecyclePolicy")
def lifecycle_policy(self) -> typing.Optional[typing.Union[typing.Optional["LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::ECR::Repository.LifecyclePolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-lifecyclepolicy
"""
return jsii.get(self, "lifecyclePolicy")
@lifecycle_policy.setter
def lifecycle_policy(self, value: typing.Optional[typing.Union[typing.Optional["LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]):
return jsii.set(self, "lifecyclePolicy", value)
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> typing.Optional[str]:
"""``AWS::ECR::Repository.RepositoryName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositoryname
"""
return jsii.get(self, "repositoryName")
@repository_name.setter
def repository_name(self, value: typing.Optional[str]):
return jsii.set(self, "repositoryName", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.CfnRepository.LifecyclePolicyProperty", jsii_struct_bases=[], name_mapping={'lifecycle_policy_text': 'lifecyclePolicyText', 'registry_id': 'registryId'})
class LifecyclePolicyProperty():
def __init__(self, *, lifecycle_policy_text: typing.Optional[str]=None, registry_id: typing.Optional[str]=None):
"""
:param lifecycle_policy_text: ``CfnRepository.LifecyclePolicyProperty.LifecyclePolicyText``.
:param registry_id: ``CfnRepository.LifecyclePolicyProperty.RegistryId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecr-repository-lifecyclepolicy.html
"""
self._values = {
}
if lifecycle_policy_text is not None: self._values["lifecycle_policy_text"] = lifecycle_policy_text
if registry_id is not None: self._values["registry_id"] = registry_id
@property
def lifecycle_policy_text(self) -> typing.Optional[str]:
"""``CfnRepository.LifecyclePolicyProperty.LifecyclePolicyText``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecr-repository-lifecyclepolicy.html#cfn-ecr-repository-lifecyclepolicy-lifecyclepolicytext
"""
return self._values.get('lifecycle_policy_text')
@property
def registry_id(self) -> typing.Optional[str]:
"""``CfnRepository.LifecyclePolicyProperty.RegistryId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecr-repository-lifecyclepolicy.html#cfn-ecr-repository-lifecyclepolicy-registryid
"""
return self._values.get('registry_id')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecyclePolicyProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.CfnRepositoryProps", jsii_struct_bases=[], name_mapping={'lifecycle_policy': 'lifecyclePolicy', 'repository_name': 'repositoryName', 'repository_policy_text': 'repositoryPolicyText', 'tags': 'tags'})
class CfnRepositoryProps():
def __init__(self, *, lifecycle_policy: typing.Optional[typing.Union[typing.Optional["CfnRepository.LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]=None, repository_name: typing.Optional[str]=None, repository_policy_text: typing.Any=None, tags: typing.Optional[typing.List[aws_cdk.core.CfnTag]]=None):
"""Properties for defining a ``AWS::ECR::Repository``.
:param lifecycle_policy: ``AWS::ECR::Repository.LifecyclePolicy``.
:param repository_name: ``AWS::ECR::Repository.RepositoryName``.
:param repository_policy_text: ``AWS::ECR::Repository.RepositoryPolicyText``.
:param tags: ``AWS::ECR::Repository.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html
"""
self._values = {
}
if lifecycle_policy is not None: self._values["lifecycle_policy"] = lifecycle_policy
if repository_name is not None: self._values["repository_name"] = repository_name
if repository_policy_text is not None: self._values["repository_policy_text"] = repository_policy_text
if tags is not None: self._values["tags"] = tags
@property
def lifecycle_policy(self) -> typing.Optional[typing.Union[typing.Optional["CfnRepository.LifecyclePolicyProperty"], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::ECR::Repository.LifecyclePolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-lifecyclepolicy
"""
return self._values.get('lifecycle_policy')
@property
def repository_name(self) -> typing.Optional[str]:
"""``AWS::ECR::Repository.RepositoryName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositoryname
"""
return self._values.get('repository_name')
@property
def repository_policy_text(self) -> typing.Any:
"""``AWS::ECR::Repository.RepositoryPolicyText``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositorypolicytext
"""
return self._values.get('repository_policy_text')
@property
def tags(self) -> typing.Optional[typing.List[aws_cdk.core.CfnTag]]:
"""``AWS::ECR::Repository.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-tags
"""
return self._values.get('tags')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnRepositoryProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.interface(jsii_type="@aws-cdk/aws-ecr.IRepository")
class IRepository(aws_cdk.core.IResource, jsii.compat.Protocol):
"""Represents an ECR repository."""
@staticmethod
def __jsii_proxy_class__():
return _IRepositoryProxy
@property
@jsii.member(jsii_name="repositoryArn")
def repository_arn(self) -> str:
"""The ARN of the repository.
attribute:
:attribute:: true
"""
...
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> str:
"""The name of the repository.
attribute:
:attribute:: true
"""
...
@property
@jsii.member(jsii_name="repositoryUri")
def repository_uri(self) -> str:
"""The URI of this repository (represents the latest image):.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY
attribute:
:attribute:: true
"""
...
@jsii.member(jsii_name="addToResourcePolicy")
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
...
@jsii.member(jsii_name="grant")
def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant:
"""Grant the given principal identity permissions to perform the actions on this repository.
:param grantee: -
:param actions: -
"""
...
@jsii.member(jsii_name="grantPull")
def grant_pull(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull images in this repository.
:param grantee: -
"""
...
@jsii.member(jsii_name="grantPullPush")
def grant_pull_push(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull and push images to this repository.
:param grantee: -
"""
...
@jsii.member(jsii_name="onCloudTrailEvent")
def on_cloud_trail_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Define a CloudWatch event that triggers when something happens to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
...
@jsii.member(jsii_name="onCloudTrailImagePushed")
def on_cloud_trail_image_pushed(self, id: str, *, image_tag: typing.Optional[str]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when an image is pushed to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
...
@jsii.member(jsii_name="onEvent")
def on_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines a CloudWatch event rule which triggers for repository events.
Use
``rule.addEventPattern(pattern)`` to specify a filter.
:param id: -
:param options: -
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
...
@jsii.member(jsii_name="onImageScanCompleted")
def on_image_scan_completed(self, id: str, *, image_tags: typing.Optional[typing.List[str]]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when the image scan is completed.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
...
@jsii.member(jsii_name="repositoryUriForTag")
def repository_uri_for_tag(self, tag: typing.Optional[str]=None) -> str:
"""Returns the URI of the repository for a certain tag. Can be used in ``docker push/pull``.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY[:TAG]
:param tag: Image tag to use (tools usually default to "latest" if omitted).
"""
...
class _IRepositoryProxy(jsii.proxy_for(aws_cdk.core.IResource)):
"""Represents an ECR repository."""
__jsii_type__ = "@aws-cdk/aws-ecr.IRepository"
@property
@jsii.member(jsii_name="repositoryArn")
def repository_arn(self) -> str:
"""The ARN of the repository.
attribute:
:attribute:: true
"""
return jsii.get(self, "repositoryArn")
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> str:
"""The name of the repository.
attribute:
:attribute:: true
"""
return jsii.get(self, "repositoryName")
@property
@jsii.member(jsii_name="repositoryUri")
def repository_uri(self) -> str:
"""The URI of this repository (represents the latest image):.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY
attribute:
:attribute:: true
"""
return jsii.get(self, "repositoryUri")
@jsii.member(jsii_name="addToResourcePolicy")
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
return jsii.invoke(self, "addToResourcePolicy", [statement])
@jsii.member(jsii_name="grant")
def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant:
"""Grant the given principal identity permissions to perform the actions on this repository.
:param grantee: -
:param actions: -
"""
return jsii.invoke(self, "grant", [grantee, *actions])
@jsii.member(jsii_name="grantPull")
def grant_pull(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull images in this repository.
:param grantee: -
"""
return jsii.invoke(self, "grantPull", [grantee])
@jsii.member(jsii_name="grantPullPush")
def grant_pull_push(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull and push images to this repository.
:param grantee: -
"""
return jsii.invoke(self, "grantPullPush", [grantee])
@jsii.member(jsii_name="onCloudTrailEvent")
def on_cloud_trail_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Define a CloudWatch event that triggers when something happens to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = aws_cdk.aws_events.OnEventOptions(description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onCloudTrailEvent", [id, options])
@jsii.member(jsii_name="onCloudTrailImagePushed")
def on_cloud_trail_image_pushed(self, id: str, *, image_tag: typing.Optional[str]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when an image is pushed to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = OnCloudTrailImagePushedOptions(image_tag=image_tag, description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onCloudTrailImagePushed", [id, options])
@jsii.member(jsii_name="onEvent")
def on_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines a CloudWatch event rule which triggers for repository events.
Use
``rule.addEventPattern(pattern)`` to specify a filter.
:param id: -
:param options: -
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = aws_cdk.aws_events.OnEventOptions(description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onEvent", [id, options])
@jsii.member(jsii_name="onImageScanCompleted")
def on_image_scan_completed(self, id: str, *, image_tags: typing.Optional[typing.List[str]]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when the image scan is completed.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = OnImageScanCompletedOptions(image_tags=image_tags, description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onImageScanCompleted", [id, options])
@jsii.member(jsii_name="repositoryUriForTag")
def repository_uri_for_tag(self, tag: typing.Optional[str]=None) -> str:
"""Returns the URI of the repository for a certain tag. Can be used in ``docker push/pull``.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY[:TAG]
:param tag: Image tag to use (tools usually default to "latest" if omitted).
"""
return jsii.invoke(self, "repositoryUriForTag", [tag])
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.LifecycleRule", jsii_struct_bases=[], name_mapping={'description': 'description', 'max_image_age': 'maxImageAge', 'max_image_count': 'maxImageCount', 'rule_priority': 'rulePriority', 'tag_prefix_list': 'tagPrefixList', 'tag_status': 'tagStatus'})
class LifecycleRule():
def __init__(self, *, description: typing.Optional[str]=None, max_image_age: typing.Optional[aws_cdk.core.Duration]=None, max_image_count: typing.Optional[jsii.Number]=None, rule_priority: typing.Optional[jsii.Number]=None, tag_prefix_list: typing.Optional[typing.List[str]]=None, tag_status: typing.Optional["TagStatus"]=None):
"""An ECR life cycle rule.
:param description: Describes the purpose of the rule. Default: No description
:param max_image_age: The maximum age of images to retain. The value must represent a number of days. Specify exactly one of maxImageCount and maxImageAge.
:param max_image_count: The maximum number of images to retain. Specify exactly one of maxImageCount and maxImageAgeDays.
:param rule_priority: Controls the order in which rules are evaluated (low to high). All rules must have a unique priority, where lower numbers have higher precedence. The first rule that matches is applied to an image. There can only be one rule with a tagStatus of Any, and it must have the highest rulePriority. All rules without a specified priority will have incrementing priorities automatically assigned to them, higher than any rules that DO have priorities. Default: Automatically assigned
:param tag_prefix_list: Select images that have ALL the given prefixes in their tag. Only if tagStatus == TagStatus.Tagged
:param tag_status: Select images based on tags. Only one rule is allowed to select untagged images, and it must have the highest rulePriority. Default: TagStatus.Tagged if tagPrefixList is given, TagStatus.Any otherwise
"""
self._values = {
}
if description is not None: self._values["description"] = description
if max_image_age is not None: self._values["max_image_age"] = max_image_age
if max_image_count is not None: self._values["max_image_count"] = max_image_count
if rule_priority is not None: self._values["rule_priority"] = rule_priority
if tag_prefix_list is not None: self._values["tag_prefix_list"] = tag_prefix_list
if tag_status is not None: self._values["tag_status"] = tag_status
@property
def description(self) -> typing.Optional[str]:
"""Describes the purpose of the rule.
default
:default: No description
"""
return self._values.get('description')
@property
def max_image_age(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The maximum age of images to retain. The value must represent a number of days.
Specify exactly one of maxImageCount and maxImageAge.
"""
return self._values.get('max_image_age')
@property
def max_image_count(self) -> typing.Optional[jsii.Number]:
"""The maximum number of images to retain.
Specify exactly one of maxImageCount and maxImageAgeDays.
"""
return self._values.get('max_image_count')
@property
def rule_priority(self) -> typing.Optional[jsii.Number]:
"""Controls the order in which rules are evaluated (low to high).
All rules must have a unique priority, where lower numbers have
higher precedence. The first rule that matches is applied to an image.
There can only be one rule with a tagStatus of Any, and it must have
the highest rulePriority.
All rules without a specified priority will have incrementing priorities
automatically assigned to them, higher than any rules that DO have priorities.
default
:default: Automatically assigned
"""
return self._values.get('rule_priority')
@property
def tag_prefix_list(self) -> typing.Optional[typing.List[str]]:
"""Select images that have ALL the given prefixes in their tag.
Only if tagStatus == TagStatus.Tagged
"""
return self._values.get('tag_prefix_list')
@property
def tag_status(self) -> typing.Optional["TagStatus"]:
"""Select images based on tags.
Only one rule is allowed to select untagged images, and it must
have the highest rulePriority.
default
:default: TagStatus.Tagged if tagPrefixList is given, TagStatus.Any otherwise
"""
return self._values.get('tag_status')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecycleRule(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.OnCloudTrailImagePushedOptions", jsii_struct_bases=[aws_cdk.aws_events.OnEventOptions], name_mapping={'description': 'description', 'event_pattern': 'eventPattern', 'rule_name': 'ruleName', 'target': 'target', 'image_tag': 'imageTag'})
class OnCloudTrailImagePushedOptions(aws_cdk.aws_events.OnEventOptions):
def __init__(self, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None, image_tag: typing.Optional[str]=None):
"""Options for the onCloudTrailImagePushed method.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
:param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags
"""
if isinstance(event_pattern, dict): event_pattern = aws_cdk.aws_events.EventPattern(**event_pattern)
self._values = {
}
if description is not None: self._values["description"] = description
if event_pattern is not None: self._values["event_pattern"] = event_pattern
if rule_name is not None: self._values["rule_name"] = rule_name
if target is not None: self._values["target"] = target
if image_tag is not None: self._values["image_tag"] = image_tag
@property
def description(self) -> typing.Optional[str]:
"""A description of the rule's purpose.
default
:default: - No description
"""
return self._values.get('description')
@property
def event_pattern(self) -> typing.Optional[aws_cdk.aws_events.EventPattern]:
"""Additional restrictions for the event to route to the specified target.
The method that generates the rule probably imposes some type of event
filtering. The filtering implied by what you pass here is added
on top of that filtering.
default
:default: - No additional filtering based on an event pattern.
see
:see: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CloudWatchEventsandEventPatterns.html
"""
return self._values.get('event_pattern')
@property
def rule_name(self) -> typing.Optional[str]:
"""A name for the rule.
default
:default: AWS CloudFormation generates a unique physical ID.
"""
return self._values.get('rule_name')
@property
def target(self) -> typing.Optional[aws_cdk.aws_events.IRuleTarget]:
"""The target to register for the event.
default
:default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
return self._values.get('target')
@property
def image_tag(self) -> typing.Optional[str]:
"""Only watch changes to this image tag.
default
:default: - Watch changes to all tags
"""
return self._values.get('image_tag')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'OnCloudTrailImagePushedOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.OnImageScanCompletedOptions", jsii_struct_bases=[aws_cdk.aws_events.OnEventOptions], name_mapping={'description': 'description', 'event_pattern': 'eventPattern', 'rule_name': 'ruleName', 'target': 'target', 'image_tags': 'imageTags'})
class OnImageScanCompletedOptions(aws_cdk.aws_events.OnEventOptions):
def __init__(self, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None, image_tags: typing.Optional[typing.List[str]]=None):
"""Options for the OnImageScanCompleted method.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
:param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags
"""
if isinstance(event_pattern, dict): event_pattern = aws_cdk.aws_events.EventPattern(**event_pattern)
self._values = {
}
if description is not None: self._values["description"] = description
if event_pattern is not None: self._values["event_pattern"] = event_pattern
if rule_name is not None: self._values["rule_name"] = rule_name
if target is not None: self._values["target"] = target
if image_tags is not None: self._values["image_tags"] = image_tags
@property
def description(self) -> typing.Optional[str]:
"""A description of the rule's purpose.
default
:default: - No description
"""
return self._values.get('description')
@property
def event_pattern(self) -> typing.Optional[aws_cdk.aws_events.EventPattern]:
"""Additional restrictions for the event to route to the specified target.
The method that generates the rule probably imposes some type of event
filtering. The filtering implied by what you pass here is added
on top of that filtering.
default
:default: - No additional filtering based on an event pattern.
see
:see: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CloudWatchEventsandEventPatterns.html
"""
return self._values.get('event_pattern')
@property
def rule_name(self) -> typing.Optional[str]:
"""A name for the rule.
default
:default: AWS CloudFormation generates a unique physical ID.
"""
return self._values.get('rule_name')
@property
def target(self) -> typing.Optional[aws_cdk.aws_events.IRuleTarget]:
"""The target to register for the event.
default
:default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
return self._values.get('target')
@property
def image_tags(self) -> typing.Optional[typing.List[str]]:
"""Only watch changes to the image tags spedified. Leave it undefined to watch the full repository.
default
:default: - Watch the changes to the repository with all image tags
"""
return self._values.get('image_tags')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'OnImageScanCompletedOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.RepositoryAttributes", jsii_struct_bases=[], name_mapping={'repository_arn': 'repositoryArn', 'repository_name': 'repositoryName'})
class RepositoryAttributes():
def __init__(self, *, repository_arn: str, repository_name: str):
"""
:param repository_arn:
:param repository_name:
"""
self._values = {
'repository_arn': repository_arn,
'repository_name': repository_name,
}
@property
def repository_arn(self) -> str:
return self._values.get('repository_arn')
@property
def repository_name(self) -> str:
return self._values.get('repository_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RepositoryAttributes(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(IRepository)
class RepositoryBase(aws_cdk.core.Resource, metaclass=jsii.JSIIAbstractClass, jsii_type="@aws-cdk/aws-ecr.RepositoryBase"):
"""Base class for ECR repository.
Reused between imported repositories and owned repositories.
"""
@staticmethod
def __jsii_proxy_class__():
return _RepositoryBaseProxy
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, physical_name: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param physical_name: The value passed in by users to the physical name prop of the resource. - ``undefined`` implies that a physical name will be allocated by CloudFormation during deployment. - a concrete value implies a specific physical name - ``PhysicalName.GENERATE_IF_NEEDED`` is a marker that indicates that a physical will only be generated by the CDK if it is needed for cross-environment references. Otherwise, it will be allocated by CloudFormation. Default: - The physical name will be allocated by CloudFormation at deployment time
"""
props = aws_cdk.core.ResourceProps(physical_name=physical_name)
jsii.create(RepositoryBase, self, [scope, id, props])
@jsii.member(jsii_name="addToResourcePolicy")
@abc.abstractmethod
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
...
@jsii.member(jsii_name="grant")
def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant:
"""Grant the given principal identity permissions to perform the actions on this repository.
:param grantee: -
:param actions: -
"""
return jsii.invoke(self, "grant", [grantee, *actions])
@jsii.member(jsii_name="grantPull")
def grant_pull(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to use the images in this repository.
:param grantee: -
"""
return jsii.invoke(self, "grantPull", [grantee])
@jsii.member(jsii_name="grantPullPush")
def grant_pull_push(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to pull and push images to this repository.
:param grantee: -
"""
return jsii.invoke(self, "grantPullPush", [grantee])
@jsii.member(jsii_name="onCloudTrailEvent")
def on_cloud_trail_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Define a CloudWatch event that triggers when something happens to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = aws_cdk.aws_events.OnEventOptions(description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onCloudTrailEvent", [id, options])
@jsii.member(jsii_name="onCloudTrailImagePushed")
def on_cloud_trail_image_pushed(self, id: str, *, image_tag: typing.Optional[str]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when an image is pushed to this repository.
Requires that there exists at least one CloudTrail Trail in your account
that captures the event. This method will not create the Trail.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = OnCloudTrailImagePushedOptions(image_tag=image_tag, description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onCloudTrailImagePushed", [id, options])
@jsii.member(jsii_name="onEvent")
def on_event(self, id: str, *, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines a CloudWatch event rule which triggers for repository events.
Use
``rule.addEventPattern(pattern)`` to specify a filter.
:param id: -
:param options: -
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = aws_cdk.aws_events.OnEventOptions(description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onEvent", [id, options])
@jsii.member(jsii_name="onImageScanCompleted")
def on_image_scan_completed(self, id: str, *, image_tags: typing.Optional[typing.List[str]]=None, description: typing.Optional[str]=None, event_pattern: typing.Optional[aws_cdk.aws_events.EventPattern]=None, rule_name: typing.Optional[str]=None, target: typing.Optional[aws_cdk.aws_events.IRuleTarget]=None) -> aws_cdk.aws_events.Rule:
"""Defines an AWS CloudWatch event rule that can trigger a target when an image scan is completed.
:param id: The id of the rule.
:param options: Options for adding the rule.
:param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags
:param description: A description of the rule's purpose. Default: - No description
:param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern.
:param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID.
:param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target.
"""
options = OnImageScanCompletedOptions(image_tags=image_tags, description=description, event_pattern=event_pattern, rule_name=rule_name, target=target)
return jsii.invoke(self, "onImageScanCompleted", [id, options])
@jsii.member(jsii_name="repositoryUriForTag")
def repository_uri_for_tag(self, tag: typing.Optional[str]=None) -> str:
"""Returns the URL of the repository. Can be used in ``docker push/pull``.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY[:TAG]
:param tag: Optional image tag.
"""
return jsii.invoke(self, "repositoryUriForTag", [tag])
@property
@jsii.member(jsii_name="repositoryArn")
@abc.abstractmethod
def repository_arn(self) -> str:
"""The ARN of the repository."""
...
@property
@jsii.member(jsii_name="repositoryName")
@abc.abstractmethod
def repository_name(self) -> str:
"""The name of the repository."""
...
@property
@jsii.member(jsii_name="repositoryUri")
def repository_uri(self) -> str:
"""The URI of this repository (represents the latest image):.
ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY
"""
return jsii.get(self, "repositoryUri")
class _RepositoryBaseProxy(RepositoryBase, jsii.proxy_for(aws_cdk.core.Resource)):
@jsii.member(jsii_name="addToResourcePolicy")
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
return jsii.invoke(self, "addToResourcePolicy", [statement])
@property
@jsii.member(jsii_name="repositoryArn")
def repository_arn(self) -> str:
"""The ARN of the repository."""
return jsii.get(self, "repositoryArn")
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> str:
"""The name of the repository."""
return jsii.get(self, "repositoryName")
class Repository(RepositoryBase, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-ecr.Repository"):
"""Define an ECR repository."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, lifecycle_registry_id: typing.Optional[str]=None, lifecycle_rules: typing.Optional[typing.List["LifecycleRule"]]=None, removal_policy: typing.Optional[aws_cdk.core.RemovalPolicy]=None, repository_name: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param lifecycle_registry_id: The AWS account ID associated with the registry that contains the repository. Default: The default registry is assumed.
:param lifecycle_rules: Life cycle rules to apply to this registry. Default: No life cycle rules
:param removal_policy: Determine what happens to the repository when the resource/stack is deleted. Default: RemovalPolicy.Retain
:param repository_name: Name for this repository. Default: Automatically generated name.
"""
props = RepositoryProps(lifecycle_registry_id=lifecycle_registry_id, lifecycle_rules=lifecycle_rules, removal_policy=removal_policy, repository_name=repository_name)
jsii.create(Repository, self, [scope, id, props])
@jsii.member(jsii_name="arnForLocalRepository")
@classmethod
def arn_for_local_repository(cls, repository_name: str, scope: aws_cdk.core.IConstruct) -> str:
"""Returns an ECR ARN for a repository that resides in the same account/region as the current stack.
:param repository_name: -
:param scope: -
"""
return jsii.sinvoke(cls, "arnForLocalRepository", [repository_name, scope])
@jsii.member(jsii_name="fromRepositoryArn")
@classmethod
def from_repository_arn(cls, scope: aws_cdk.core.Construct, id: str, repository_arn: str) -> "IRepository":
"""
:param scope: -
:param id: -
:param repository_arn: -
"""
return jsii.sinvoke(cls, "fromRepositoryArn", [scope, id, repository_arn])
@jsii.member(jsii_name="fromRepositoryAttributes")
@classmethod
def from_repository_attributes(cls, scope: aws_cdk.core.Construct, id: str, *, repository_arn: str, repository_name: str) -> "IRepository":
"""Import a repository.
:param scope: -
:param id: -
:param attrs: -
:param repository_arn:
:param repository_name:
"""
attrs = RepositoryAttributes(repository_arn=repository_arn, repository_name=repository_name)
return jsii.sinvoke(cls, "fromRepositoryAttributes", [scope, id, attrs])
@jsii.member(jsii_name="fromRepositoryName")
@classmethod
def from_repository_name(cls, scope: aws_cdk.core.Construct, id: str, repository_name: str) -> "IRepository":
"""
:param scope: -
:param id: -
:param repository_name: -
"""
return jsii.sinvoke(cls, "fromRepositoryName", [scope, id, repository_name])
@jsii.member(jsii_name="addLifecycleRule")
def add_lifecycle_rule(self, *, description: typing.Optional[str]=None, max_image_age: typing.Optional[aws_cdk.core.Duration]=None, max_image_count: typing.Optional[jsii.Number]=None, rule_priority: typing.Optional[jsii.Number]=None, tag_prefix_list: typing.Optional[typing.List[str]]=None, tag_status: typing.Optional["TagStatus"]=None) -> None:
"""Add a life cycle rule to the repository.
Life cycle rules automatically expire images from the repository that match
certain conditions.
:param rule: -
:param description: Describes the purpose of the rule. Default: No description
:param max_image_age: The maximum age of images to retain. The value must represent a number of days. Specify exactly one of maxImageCount and maxImageAge.
:param max_image_count: The maximum number of images to retain. Specify exactly one of maxImageCount and maxImageAgeDays.
:param rule_priority: Controls the order in which rules are evaluated (low to high). All rules must have a unique priority, where lower numbers have higher precedence. The first rule that matches is applied to an image. There can only be one rule with a tagStatus of Any, and it must have the highest rulePriority. All rules without a specified priority will have incrementing priorities automatically assigned to them, higher than any rules that DO have priorities. Default: Automatically assigned
:param tag_prefix_list: Select images that have ALL the given prefixes in their tag. Only if tagStatus == TagStatus.Tagged
:param tag_status: Select images based on tags. Only one rule is allowed to select untagged images, and it must have the highest rulePriority. Default: TagStatus.Tagged if tagPrefixList is given, TagStatus.Any otherwise
"""
rule = LifecycleRule(description=description, max_image_age=max_image_age, max_image_count=max_image_count, rule_priority=rule_priority, tag_prefix_list=tag_prefix_list, tag_status=tag_status)
return jsii.invoke(self, "addLifecycleRule", [rule])
@jsii.member(jsii_name="addToResourcePolicy")
def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add a policy statement to the repository's resource policy.
:param statement: -
"""
return jsii.invoke(self, "addToResourcePolicy", [statement])
@property
@jsii.member(jsii_name="repositoryArn")
def repository_arn(self) -> str:
"""The ARN of the repository."""
return jsii.get(self, "repositoryArn")
@property
@jsii.member(jsii_name="repositoryName")
def repository_name(self) -> str:
"""The name of the repository."""
return jsii.get(self, "repositoryName")
@jsii.data_type(jsii_type="@aws-cdk/aws-ecr.RepositoryProps", jsii_struct_bases=[], name_mapping={'lifecycle_registry_id': 'lifecycleRegistryId', 'lifecycle_rules': 'lifecycleRules', 'removal_policy': 'removalPolicy', 'repository_name': 'repositoryName'})
class RepositoryProps():
def __init__(self, *, lifecycle_registry_id: typing.Optional[str]=None, lifecycle_rules: typing.Optional[typing.List["LifecycleRule"]]=None, removal_policy: typing.Optional[aws_cdk.core.RemovalPolicy]=None, repository_name: typing.Optional[str]=None):
"""
:param lifecycle_registry_id: The AWS account ID associated with the registry that contains the repository. Default: The default registry is assumed.
:param lifecycle_rules: Life cycle rules to apply to this registry. Default: No life cycle rules
:param removal_policy: Determine what happens to the repository when the resource/stack is deleted. Default: RemovalPolicy.Retain
:param repository_name: Name for this repository. Default: Automatically generated name.
"""
self._values = {
}
if lifecycle_registry_id is not None: self._values["lifecycle_registry_id"] = lifecycle_registry_id
if lifecycle_rules is not None: self._values["lifecycle_rules"] = lifecycle_rules
if removal_policy is not None: self._values["removal_policy"] = removal_policy
if repository_name is not None: self._values["repository_name"] = repository_name
@property
def lifecycle_registry_id(self) -> typing.Optional[str]:
"""The AWS account ID associated with the registry that contains the repository.
default
:default: The default registry is assumed.
see
:see: https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_PutLifecyclePolicy.html
"""
return self._values.get('lifecycle_registry_id')
@property
def lifecycle_rules(self) -> typing.Optional[typing.List["LifecycleRule"]]:
"""Life cycle rules to apply to this registry.
default
:default: No life cycle rules
"""
return self._values.get('lifecycle_rules')
@property
def removal_policy(self) -> typing.Optional[aws_cdk.core.RemovalPolicy]:
"""Determine what happens to the repository when the resource/stack is deleted.
default
:default: RemovalPolicy.Retain
"""
return self._values.get('removal_policy')
@property
def repository_name(self) -> typing.Optional[str]:
"""Name for this repository.
default
:default: Automatically generated name.
"""
return self._values.get('repository_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RepositoryProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-ecr.TagStatus")
class TagStatus(enum.Enum):
"""Select images based on tags."""
ANY = "ANY"
"""Rule applies to all images."""
TAGGED = "TAGGED"
"""Rule applies to tagged images."""
UNTAGGED = "UNTAGGED"
"""Rule applies to untagged images."""
__all__ = ["CfnRepository", "CfnRepositoryProps", "IRepository", "LifecycleRule", "OnCloudTrailImagePushedOptions", "OnImageScanCompletedOptions", "Repository", "RepositoryAttributes", "RepositoryBase", "RepositoryProps", "TagStatus", "__jsii_assembly__"]
publication.publish()
|
en
| 0.79024
|
## Amazon ECR Construct Library <!--BEGIN STABILITY BANNER-->---  --- <!--END STABILITY BANNER--> This package contains constructs for working with Amazon Elastic Container Registry. ### Repositories Define a repository by creating a new instance of `Repository`. A repository holds multiple verions of a single container image. ```python # Example automatically generated. See https://github.com/aws/jsii/issues/826 repository = ecr.Repository(self, "Repository") ``` ### Automatically clean up repositories You can set life cycle rules to automatically clean up old images from your repository. The first life cycle rule that matches an image will be applied against that image. For example, the following deletes images older than 30 days, while keeping all images tagged with prod (note that the order is important here): ```python # Example automatically generated. See https://github.com/aws/jsii/issues/826 repository.add_lifecycle_rule(tag_prefix_list=["prod"], max_image_count=9999) repository.add_lifecycle_rule(max_image_age_days=cdk.Duration.days(30)) ``` A CloudFormation ``AWS::ECR::Repository``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html cloudformationResource: :cloudformationResource:: AWS::ECR::Repository Create a new ``AWS::ECR::Repository``. :param scope: - scope in which this resource is defined. :param id: - scoped id of the resource. :param props: - resource properties. :param lifecycle_policy: ``AWS::ECR::Repository.LifecyclePolicy``. :param repository_name: ``AWS::ECR::Repository.RepositoryName``. :param repository_policy_text: ``AWS::ECR::Repository.RepositoryPolicyText``. :param tags: ``AWS::ECR::Repository.Tags``. Examines the CloudFormation resource and discloses attributes. :param inspector: - tree inspector to collect and process attributes. stability :stability: experimental :param props: - The CloudFormation resource type name for this resource class. cloudformationAttribute: :cloudformationAttribute:: Arn ``AWS::ECR::Repository.Tags``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-tags ``AWS::ECR::Repository.RepositoryPolicyText``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositorypolicytext ``AWS::ECR::Repository.LifecyclePolicy``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-lifecyclepolicy ``AWS::ECR::Repository.RepositoryName``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositoryname :param lifecycle_policy_text: ``CfnRepository.LifecyclePolicyProperty.LifecyclePolicyText``. :param registry_id: ``CfnRepository.LifecyclePolicyProperty.RegistryId``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecr-repository-lifecyclepolicy.html ``CfnRepository.LifecyclePolicyProperty.LifecyclePolicyText``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecr-repository-lifecyclepolicy.html#cfn-ecr-repository-lifecyclepolicy-lifecyclepolicytext ``CfnRepository.LifecyclePolicyProperty.RegistryId``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecr-repository-lifecyclepolicy.html#cfn-ecr-repository-lifecyclepolicy-registryid Properties for defining a ``AWS::ECR::Repository``. :param lifecycle_policy: ``AWS::ECR::Repository.LifecyclePolicy``. :param repository_name: ``AWS::ECR::Repository.RepositoryName``. :param repository_policy_text: ``AWS::ECR::Repository.RepositoryPolicyText``. :param tags: ``AWS::ECR::Repository.Tags``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html ``AWS::ECR::Repository.LifecyclePolicy``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-lifecyclepolicy ``AWS::ECR::Repository.RepositoryName``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositoryname ``AWS::ECR::Repository.RepositoryPolicyText``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-repositorypolicytext ``AWS::ECR::Repository.Tags``. see :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html#cfn-ecr-repository-tags Represents an ECR repository. The ARN of the repository. attribute: :attribute:: true The name of the repository. attribute: :attribute:: true The URI of this repository (represents the latest image):. ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY attribute: :attribute:: true Add a policy statement to the repository's resource policy. :param statement: - Grant the given principal identity permissions to perform the actions on this repository. :param grantee: - :param actions: - Grant the given identity permissions to pull images in this repository. :param grantee: - Grant the given identity permissions to pull and push images to this repository. :param grantee: - Define a CloudWatch event that triggers when something happens to this repository. Requires that there exists at least one CloudTrail Trail in your account that captures the event. This method will not create the Trail. :param id: The id of the rule. :param options: Options for adding the rule. :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Defines an AWS CloudWatch event rule that can trigger a target when an image is pushed to this repository. Requires that there exists at least one CloudTrail Trail in your account that captures the event. This method will not create the Trail. :param id: The id of the rule. :param options: Options for adding the rule. :param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Defines a CloudWatch event rule which triggers for repository events. Use ``rule.addEventPattern(pattern)`` to specify a filter. :param id: - :param options: - :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Defines an AWS CloudWatch event rule that can trigger a target when the image scan is completed. :param id: The id of the rule. :param options: Options for adding the rule. :param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Returns the URI of the repository for a certain tag. Can be used in ``docker push/pull``. ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY[:TAG] :param tag: Image tag to use (tools usually default to "latest" if omitted). Represents an ECR repository. The ARN of the repository. attribute: :attribute:: true The name of the repository. attribute: :attribute:: true The URI of this repository (represents the latest image):. ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY attribute: :attribute:: true Add a policy statement to the repository's resource policy. :param statement: - Grant the given principal identity permissions to perform the actions on this repository. :param grantee: - :param actions: - Grant the given identity permissions to pull images in this repository. :param grantee: - Grant the given identity permissions to pull and push images to this repository. :param grantee: - Define a CloudWatch event that triggers when something happens to this repository. Requires that there exists at least one CloudTrail Trail in your account that captures the event. This method will not create the Trail. :param id: The id of the rule. :param options: Options for adding the rule. :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Defines an AWS CloudWatch event rule that can trigger a target when an image is pushed to this repository. Requires that there exists at least one CloudTrail Trail in your account that captures the event. This method will not create the Trail. :param id: The id of the rule. :param options: Options for adding the rule. :param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Defines a CloudWatch event rule which triggers for repository events. Use ``rule.addEventPattern(pattern)`` to specify a filter. :param id: - :param options: - :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Defines an AWS CloudWatch event rule that can trigger a target when the image scan is completed. :param id: The id of the rule. :param options: Options for adding the rule. :param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Returns the URI of the repository for a certain tag. Can be used in ``docker push/pull``. ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY[:TAG] :param tag: Image tag to use (tools usually default to "latest" if omitted). An ECR life cycle rule. :param description: Describes the purpose of the rule. Default: No description :param max_image_age: The maximum age of images to retain. The value must represent a number of days. Specify exactly one of maxImageCount and maxImageAge. :param max_image_count: The maximum number of images to retain. Specify exactly one of maxImageCount and maxImageAgeDays. :param rule_priority: Controls the order in which rules are evaluated (low to high). All rules must have a unique priority, where lower numbers have higher precedence. The first rule that matches is applied to an image. There can only be one rule with a tagStatus of Any, and it must have the highest rulePriority. All rules without a specified priority will have incrementing priorities automatically assigned to them, higher than any rules that DO have priorities. Default: Automatically assigned :param tag_prefix_list: Select images that have ALL the given prefixes in their tag. Only if tagStatus == TagStatus.Tagged :param tag_status: Select images based on tags. Only one rule is allowed to select untagged images, and it must have the highest rulePriority. Default: TagStatus.Tagged if tagPrefixList is given, TagStatus.Any otherwise Describes the purpose of the rule. default :default: No description The maximum age of images to retain. The value must represent a number of days. Specify exactly one of maxImageCount and maxImageAge. The maximum number of images to retain. Specify exactly one of maxImageCount and maxImageAgeDays. Controls the order in which rules are evaluated (low to high). All rules must have a unique priority, where lower numbers have higher precedence. The first rule that matches is applied to an image. There can only be one rule with a tagStatus of Any, and it must have the highest rulePriority. All rules without a specified priority will have incrementing priorities automatically assigned to them, higher than any rules that DO have priorities. default :default: Automatically assigned Select images that have ALL the given prefixes in their tag. Only if tagStatus == TagStatus.Tagged Select images based on tags. Only one rule is allowed to select untagged images, and it must have the highest rulePriority. default :default: TagStatus.Tagged if tagPrefixList is given, TagStatus.Any otherwise Options for the onCloudTrailImagePushed method. :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. :param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags A description of the rule's purpose. default :default: - No description Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. default :default: - No additional filtering based on an event pattern. see :see: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CloudWatchEventsandEventPatterns.html A name for the rule. default :default: AWS CloudFormation generates a unique physical ID. The target to register for the event. default :default: - No target is added to the rule. Use ``addTarget()`` to add a target. Only watch changes to this image tag. default :default: - Watch changes to all tags Options for the OnImageScanCompleted method. :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. :param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags A description of the rule's purpose. default :default: - No description Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. default :default: - No additional filtering based on an event pattern. see :see: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CloudWatchEventsandEventPatterns.html A name for the rule. default :default: AWS CloudFormation generates a unique physical ID. The target to register for the event. default :default: - No target is added to the rule. Use ``addTarget()`` to add a target. Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. default :default: - Watch the changes to the repository with all image tags :param repository_arn: :param repository_name: Base class for ECR repository. Reused between imported repositories and owned repositories. :param scope: - :param id: - :param props: - :param physical_name: The value passed in by users to the physical name prop of the resource. - ``undefined`` implies that a physical name will be allocated by CloudFormation during deployment. - a concrete value implies a specific physical name - ``PhysicalName.GENERATE_IF_NEEDED`` is a marker that indicates that a physical will only be generated by the CDK if it is needed for cross-environment references. Otherwise, it will be allocated by CloudFormation. Default: - The physical name will be allocated by CloudFormation at deployment time Add a policy statement to the repository's resource policy. :param statement: - Grant the given principal identity permissions to perform the actions on this repository. :param grantee: - :param actions: - Grant the given identity permissions to use the images in this repository. :param grantee: - Grant the given identity permissions to pull and push images to this repository. :param grantee: - Define a CloudWatch event that triggers when something happens to this repository. Requires that there exists at least one CloudTrail Trail in your account that captures the event. This method will not create the Trail. :param id: The id of the rule. :param options: Options for adding the rule. :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Defines an AWS CloudWatch event rule that can trigger a target when an image is pushed to this repository. Requires that there exists at least one CloudTrail Trail in your account that captures the event. This method will not create the Trail. :param id: The id of the rule. :param options: Options for adding the rule. :param image_tag: Only watch changes to this image tag. Default: - Watch changes to all tags :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Defines a CloudWatch event rule which triggers for repository events. Use ``rule.addEventPattern(pattern)`` to specify a filter. :param id: - :param options: - :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Defines an AWS CloudWatch event rule that can trigger a target when an image scan is completed. :param id: The id of the rule. :param options: Options for adding the rule. :param image_tags: Only watch changes to the image tags spedified. Leave it undefined to watch the full repository. Default: - Watch the changes to the repository with all image tags :param description: A description of the rule's purpose. Default: - No description :param event_pattern: Additional restrictions for the event to route to the specified target. The method that generates the rule probably imposes some type of event filtering. The filtering implied by what you pass here is added on top of that filtering. Default: - No additional filtering based on an event pattern. :param rule_name: A name for the rule. Default: AWS CloudFormation generates a unique physical ID. :param target: The target to register for the event. Default: - No target is added to the rule. Use ``addTarget()`` to add a target. Returns the URL of the repository. Can be used in ``docker push/pull``. ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY[:TAG] :param tag: Optional image tag. The ARN of the repository. The name of the repository. The URI of this repository (represents the latest image):. ACCOUNT.dkr.ecr.REGION.amazonaws.com/REPOSITORY Add a policy statement to the repository's resource policy. :param statement: - The ARN of the repository. The name of the repository. Define an ECR repository. :param scope: - :param id: - :param props: - :param lifecycle_registry_id: The AWS account ID associated with the registry that contains the repository. Default: The default registry is assumed. :param lifecycle_rules: Life cycle rules to apply to this registry. Default: No life cycle rules :param removal_policy: Determine what happens to the repository when the resource/stack is deleted. Default: RemovalPolicy.Retain :param repository_name: Name for this repository. Default: Automatically generated name. Returns an ECR ARN for a repository that resides in the same account/region as the current stack. :param repository_name: - :param scope: - :param scope: - :param id: - :param repository_arn: - Import a repository. :param scope: - :param id: - :param attrs: - :param repository_arn: :param repository_name: :param scope: - :param id: - :param repository_name: - Add a life cycle rule to the repository. Life cycle rules automatically expire images from the repository that match certain conditions. :param rule: - :param description: Describes the purpose of the rule. Default: No description :param max_image_age: The maximum age of images to retain. The value must represent a number of days. Specify exactly one of maxImageCount and maxImageAge. :param max_image_count: The maximum number of images to retain. Specify exactly one of maxImageCount and maxImageAgeDays. :param rule_priority: Controls the order in which rules are evaluated (low to high). All rules must have a unique priority, where lower numbers have higher precedence. The first rule that matches is applied to an image. There can only be one rule with a tagStatus of Any, and it must have the highest rulePriority. All rules without a specified priority will have incrementing priorities automatically assigned to them, higher than any rules that DO have priorities. Default: Automatically assigned :param tag_prefix_list: Select images that have ALL the given prefixes in their tag. Only if tagStatus == TagStatus.Tagged :param tag_status: Select images based on tags. Only one rule is allowed to select untagged images, and it must have the highest rulePriority. Default: TagStatus.Tagged if tagPrefixList is given, TagStatus.Any otherwise Add a policy statement to the repository's resource policy. :param statement: - The ARN of the repository. The name of the repository. :param lifecycle_registry_id: The AWS account ID associated with the registry that contains the repository. Default: The default registry is assumed. :param lifecycle_rules: Life cycle rules to apply to this registry. Default: No life cycle rules :param removal_policy: Determine what happens to the repository when the resource/stack is deleted. Default: RemovalPolicy.Retain :param repository_name: Name for this repository. Default: Automatically generated name. The AWS account ID associated with the registry that contains the repository. default :default: The default registry is assumed. see :see: https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_PutLifecyclePolicy.html Life cycle rules to apply to this registry. default :default: No life cycle rules Determine what happens to the repository when the resource/stack is deleted. default :default: RemovalPolicy.Retain Name for this repository. default :default: Automatically generated name. Select images based on tags. Rule applies to all images. Rule applies to tagged images. Rule applies to untagged images.
| 2.002849
| 2
|
wastetowealth/views.py
|
BuildForSDG/waste-to-wealth
| 0
|
6628819
|
<reponame>BuildForSDG/waste-to-wealth
# # from django.shortcuts import render
# # Create your views here.
# from django.contrib.auth.hashers import make_password
# from django.contrib.auth import login, authenticate
# from django.contrib.auth.models import User, Group
# from rest_framework import status, viewsets
# from .serializers import UserSerializer
# from rest_framework.views import APIView
# from rest_framework.viewsets import ModelViewSet
# from rest_framework.response import Response
# from rest_framework.authtoken.views import ObtainAuthToken
# from rest_framework.authtoken.models import Token
# from rest_framework.authentication import TokenAuthentication, SessionAuthentication, BasicAuthentication
# class UserViewSet(viewsets.ModelViewSet):
# """
# API endpoint that allows users to be viewed or edited
# """
# queryset = User.objects.all().order_by('-date_joined')
# serializer_class = UserSerializer
# # class GroupViewSet(viewsets.ModelViewSet):
# # """
# # API endpoint that allows groups to be viewed or edited
# # """
# # queryset = Group.objects.all()
# # serializer_class = GroupSerializer
# class Logout(APIView):
# def get(self, request, format=None):
# request.user.auth_token.delete()
# return Response(status=status.HTTP_200_OK)
# class LoginView(ObtainAuthToken):
# def post(self, request, *args, **kwargs):
# serializer = self.serializer_class(data=request.data,context={'request': request})
# serializer.is_valid(raise_exception=True)
# user = serializer.validated_data['user']
# token, created = Token.objects.get_or_create(user=user)
# return Response({
# 'token': token.key,
# 'user_id': user.pk,
# 'email': user.email,
# 'username':user.username,
# })
from rest_framework import viewsets
from rest_framework.exceptions import NotFound
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from allauth.account.models import EmailConfirmation, EmailConfirmationHMAC
from django.http import HttpResponseRedirect
from .models import CustomUser
class ConfirmEmailView(APIView):
permission_classes = [AllowAny]
def get(self, *args, **kwargs):
self.object = confirmation = self.get_object()
confirmation.confirm(self.request)
# A React Router Route will handle the failure scenario
return HttpResponseRedirect('/login')
def get_object(self, queryset=None):
key = self.kwargs['key']
email_confirmation = EmailConfirmationHMAC.from_key(key)
if not email_confirmation:
if queryset is None:
queryset = self.get_queryset()
try:
email_confirmation = queryset.get(key=key.lower())
except EmailConfirmation.DoesNotExist:
# A React Router Route will handle the failure scenario
return HttpResponseRedirect('/login/failure')
return email_confirmation
def get_queryset(self):
qs = EmailConfirmation.objects.all_valid()
qs = qs.select_related("email_address__user")
return qs
|
# # from django.shortcuts import render
# # Create your views here.
# from django.contrib.auth.hashers import make_password
# from django.contrib.auth import login, authenticate
# from django.contrib.auth.models import User, Group
# from rest_framework import status, viewsets
# from .serializers import UserSerializer
# from rest_framework.views import APIView
# from rest_framework.viewsets import ModelViewSet
# from rest_framework.response import Response
# from rest_framework.authtoken.views import ObtainAuthToken
# from rest_framework.authtoken.models import Token
# from rest_framework.authentication import TokenAuthentication, SessionAuthentication, BasicAuthentication
# class UserViewSet(viewsets.ModelViewSet):
# """
# API endpoint that allows users to be viewed or edited
# """
# queryset = User.objects.all().order_by('-date_joined')
# serializer_class = UserSerializer
# # class GroupViewSet(viewsets.ModelViewSet):
# # """
# # API endpoint that allows groups to be viewed or edited
# # """
# # queryset = Group.objects.all()
# # serializer_class = GroupSerializer
# class Logout(APIView):
# def get(self, request, format=None):
# request.user.auth_token.delete()
# return Response(status=status.HTTP_200_OK)
# class LoginView(ObtainAuthToken):
# def post(self, request, *args, **kwargs):
# serializer = self.serializer_class(data=request.data,context={'request': request})
# serializer.is_valid(raise_exception=True)
# user = serializer.validated_data['user']
# token, created = Token.objects.get_or_create(user=user)
# return Response({
# 'token': token.key,
# 'user_id': user.pk,
# 'email': user.email,
# 'username':user.username,
# })
from rest_framework import viewsets
from rest_framework.exceptions import NotFound
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from allauth.account.models import EmailConfirmation, EmailConfirmationHMAC
from django.http import HttpResponseRedirect
from .models import CustomUser
class ConfirmEmailView(APIView):
permission_classes = [AllowAny]
def get(self, *args, **kwargs):
self.object = confirmation = self.get_object()
confirmation.confirm(self.request)
# A React Router Route will handle the failure scenario
return HttpResponseRedirect('/login')
def get_object(self, queryset=None):
key = self.kwargs['key']
email_confirmation = EmailConfirmationHMAC.from_key(key)
if not email_confirmation:
if queryset is None:
queryset = self.get_queryset()
try:
email_confirmation = queryset.get(key=key.lower())
except EmailConfirmation.DoesNotExist:
# A React Router Route will handle the failure scenario
return HttpResponseRedirect('/login/failure')
return email_confirmation
def get_queryset(self):
qs = EmailConfirmation.objects.all_valid()
qs = qs.select_related("email_address__user")
return qs
|
en
| 0.50537
|
# # from django.shortcuts import render # # Create your views here. # from django.contrib.auth.hashers import make_password # from django.contrib.auth import login, authenticate # from django.contrib.auth.models import User, Group # from rest_framework import status, viewsets # from .serializers import UserSerializer # from rest_framework.views import APIView # from rest_framework.viewsets import ModelViewSet # from rest_framework.response import Response # from rest_framework.authtoken.views import ObtainAuthToken # from rest_framework.authtoken.models import Token # from rest_framework.authentication import TokenAuthentication, SessionAuthentication, BasicAuthentication # class UserViewSet(viewsets.ModelViewSet): # """ # API endpoint that allows users to be viewed or edited # """ # queryset = User.objects.all().order_by('-date_joined') # serializer_class = UserSerializer # # class GroupViewSet(viewsets.ModelViewSet): # # """ # # API endpoint that allows groups to be viewed or edited # # """ # # queryset = Group.objects.all() # # serializer_class = GroupSerializer # class Logout(APIView): # def get(self, request, format=None): # request.user.auth_token.delete() # return Response(status=status.HTTP_200_OK) # class LoginView(ObtainAuthToken): # def post(self, request, *args, **kwargs): # serializer = self.serializer_class(data=request.data,context={'request': request}) # serializer.is_valid(raise_exception=True) # user = serializer.validated_data['user'] # token, created = Token.objects.get_or_create(user=user) # return Response({ # 'token': token.key, # 'user_id': user.pk, # 'email': user.email, # 'username':user.username, # }) # A React Router Route will handle the failure scenario # A React Router Route will handle the failure scenario
| 2.141834
| 2
|
pysite/migrations/tables/code_jam_teams/v1.py
|
gatarelib/site
| 0
|
6628820
|
def run(db, table, table_obj):
"""
Associate the ID of each team's code jam (team -> jam)
"""
for document in db.get_all(table):
if "jam" not in document:
# find the code jam containing this team
for jam in db.get_all("code_jams"):
if document["id"] in jam["teams"]:
document["jam"] = jam["number"]
db.insert(table, document, conflict="update", durability="soft")
db.sync(table)
|
def run(db, table, table_obj):
"""
Associate the ID of each team's code jam (team -> jam)
"""
for document in db.get_all(table):
if "jam" not in document:
# find the code jam containing this team
for jam in db.get_all("code_jams"):
if document["id"] in jam["teams"]:
document["jam"] = jam["number"]
db.insert(table, document, conflict="update", durability="soft")
db.sync(table)
|
en
| 0.830315
|
Associate the ID of each team's code jam (team -> jam) # find the code jam containing this team
| 2.942111
| 3
|
validator/tests/test_message_validation/tests.py
|
andyyumao/sawtooth
| 4
|
6628821
|
<filename>validator/tests/test_message_validation/tests.py
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import unittest
import cbor
import hashlib
import random
import string
from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader, \
Transaction
from sawtooth_validator.protobuf.batch_pb2 import BatchHeader, Batch
from sawtooth_validator.protobuf.block_pb2 import BlockHeader, Block
from sawtooth_validator.gossip import signature_verifier as verifier
from sawtooth_validator.gossip import structure_verifier
class TestMessageValidation(unittest.TestCase):
def setUp(self):
context = create_context('secp256k1')
private_key = context.new_random_private_key()
crypto_factory = CryptoFactory(context)
self.signer = crypto_factory.new_signer(private_key)
@property
def public_key(self):
return self.signer.get_public_key().as_hex()
def broadcast(self, msg):
pass
def _create_transactions(self,
count,
matched_payload=True,
valid_signature=True,
valid_batcher=True):
txn_list = []
for i in range(count):
payload = {'Verb': 'set',
'Name': 'name' + str(random.randint(0, 100)),
'Value': random.randint(0, 100)}
intkey_prefix = \
hashlib.sha512('intkey'.encode('utf-8')).hexdigest()[0:6]
addr = intkey_prefix + \
hashlib.sha512(payload["Name"].encode('utf-8')).hexdigest()
payload_encode = hashlib.sha512(cbor.dumps(payload)).hexdigest()
header = TransactionHeader(
signer_public_key=self.public_key,
family_name='intkey',
family_version='1.0',
inputs=[addr],
outputs=[addr],
dependencies=[],
payload_sha512=payload_encode)
if valid_batcher:
header.batcher_public_key = self.public_key
else:
header.batcher_public_key = "bad_batcher"
header_bytes = header.SerializeToString()
if valid_signature:
signature = self.signer.sign(header_bytes)
else:
signature = "bad_signature"
if not matched_payload:
payload['Name'] = 'unmatched_payload'
transaction = Transaction(
header=header_bytes,
payload=cbor.dumps(payload),
header_signature=signature)
txn_list.append(transaction)
return txn_list
def _generate_id(self):
return hashlib.sha512(''.join(
[random.choice(string.ascii_letters)
for _ in range(0, 1024)]).encode()).hexdigest()
def _create_batches(self, batch_count, txn_count,
valid_batch=True, valid_txn=True,
valid_structure=True, valid_batcher=True):
batch_list = []
for i in range(batch_count):
txn_list = self._create_transactions(txn_count, valid_txn,
valid_batcher)
txn_sig_list = [txn.header_signature for txn in txn_list]
if not valid_structure:
txn_sig_list.pop()
batch_header = BatchHeader(signer_public_key=self.public_key)
batch_header.transaction_ids.extend(txn_sig_list)
header_bytes = batch_header.SerializeToString()
if valid_batch:
signature = self.signer.sign(header_bytes)
else:
signature = "bad_signature"
batch = Batch(header=header_bytes,
transactions=txn_list,
header_signature=signature)
batch_list.append(batch)
return batch_list
def _create_blocks(self, block_count, batch_count,
valid_block=True, valid_batch=True):
block_list = []
for i in range(block_count):
batch_list = self._create_batches(
batch_count, 2, valid_batch=valid_batch)
batch_ids = [batch.header_signature for batch in batch_list]
block_header = BlockHeader(signer_public_key=self.public_key,
batch_ids=batch_ids)
header_bytes = block_header.SerializeToString()
if valid_block:
signature = self.signer.sign(header_bytes)
else:
signature = "bad_signature"
block = Block(header=header_bytes,
batches=batch_list,
header_signature=signature)
block_list.append(block)
return block_list
def test_valid_transaction(self):
txn_list = self._create_transactions(1)
txn = txn_list[0]
valid = verifier.is_valid_transaction(txn)
self.assertTrue(valid)
def test_invalid_transaction(self):
# add invalid flag to _create transaction
txn_list = self._create_transactions(1, valid_signature=False)
txn = txn_list[0]
valid = verifier.is_valid_transaction(txn)
self.assertFalse(valid)
def test_unmatched_payload_transaction(self):
# add invalid flag to _create transaction
txn_list = self._create_transactions(1, matched_payload=False)
txn = txn_list[0]
valid = verifier.is_valid_transaction(txn)
self.assertFalse(valid)
def test_valid_batch(self):
batch_list = self._create_batches(1, 10)
batch = batch_list[0]
valid = verifier.is_valid_batch(batch)
self.assertTrue(valid)
def test_invalid_batch(self):
# add invalid flag to create_batches
batch_list = self._create_batches(1, 1, valid_batch=False)
batch = batch_list[0]
valid = verifier.is_valid_batch(batch)
self.assertFalse(valid)
# create an invalid txn in the batch
batch_list = self._create_batches(1, 1, valid_txn=False)
batch = batch_list[0]
valid = verifier.is_valid_batch(batch)
self.assertFalse(valid)
# create an invalid txn with bad batcher
batch_list = self._create_batches(1, 1, valid_batcher=False)
batch = batch_list[0]
valid = verifier.is_valid_batch(batch)
self.assertFalse(valid)
def test_invalid_batch_structure(self):
batch_list = self._create_batches(1, 2, valid_structure=False)
batch = batch_list[0]
valid = structure_verifier.is_valid_batch(batch)
self.assertFalse(valid)
def test_valid_block(self):
block_list = self._create_blocks(1, 1)
block = block_list[0]
valid = verifier.is_valid_block(block)
self.assertTrue(valid)
def test_invalid_block(self):
block_list = self._create_blocks(1, 1, valid_batch=False)
block = block_list[0]
valid = verifier.is_valid_block(block)
self.assertFalse(valid)
block_list = self._create_blocks(1, 1, valid_block=False)
block = block_list[0]
valid = verifier.is_valid_block(block)
self.assertFalse(valid)
|
<filename>validator/tests/test_message_validation/tests.py
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import unittest
import cbor
import hashlib
import random
import string
from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader, \
Transaction
from sawtooth_validator.protobuf.batch_pb2 import BatchHeader, Batch
from sawtooth_validator.protobuf.block_pb2 import BlockHeader, Block
from sawtooth_validator.gossip import signature_verifier as verifier
from sawtooth_validator.gossip import structure_verifier
class TestMessageValidation(unittest.TestCase):
def setUp(self):
context = create_context('secp256k1')
private_key = context.new_random_private_key()
crypto_factory = CryptoFactory(context)
self.signer = crypto_factory.new_signer(private_key)
@property
def public_key(self):
return self.signer.get_public_key().as_hex()
def broadcast(self, msg):
pass
def _create_transactions(self,
count,
matched_payload=True,
valid_signature=True,
valid_batcher=True):
txn_list = []
for i in range(count):
payload = {'Verb': 'set',
'Name': 'name' + str(random.randint(0, 100)),
'Value': random.randint(0, 100)}
intkey_prefix = \
hashlib.sha512('intkey'.encode('utf-8')).hexdigest()[0:6]
addr = intkey_prefix + \
hashlib.sha512(payload["Name"].encode('utf-8')).hexdigest()
payload_encode = hashlib.sha512(cbor.dumps(payload)).hexdigest()
header = TransactionHeader(
signer_public_key=self.public_key,
family_name='intkey',
family_version='1.0',
inputs=[addr],
outputs=[addr],
dependencies=[],
payload_sha512=payload_encode)
if valid_batcher:
header.batcher_public_key = self.public_key
else:
header.batcher_public_key = "bad_batcher"
header_bytes = header.SerializeToString()
if valid_signature:
signature = self.signer.sign(header_bytes)
else:
signature = "bad_signature"
if not matched_payload:
payload['Name'] = 'unmatched_payload'
transaction = Transaction(
header=header_bytes,
payload=cbor.dumps(payload),
header_signature=signature)
txn_list.append(transaction)
return txn_list
def _generate_id(self):
return hashlib.sha512(''.join(
[random.choice(string.ascii_letters)
for _ in range(0, 1024)]).encode()).hexdigest()
def _create_batches(self, batch_count, txn_count,
valid_batch=True, valid_txn=True,
valid_structure=True, valid_batcher=True):
batch_list = []
for i in range(batch_count):
txn_list = self._create_transactions(txn_count, valid_txn,
valid_batcher)
txn_sig_list = [txn.header_signature for txn in txn_list]
if not valid_structure:
txn_sig_list.pop()
batch_header = BatchHeader(signer_public_key=self.public_key)
batch_header.transaction_ids.extend(txn_sig_list)
header_bytes = batch_header.SerializeToString()
if valid_batch:
signature = self.signer.sign(header_bytes)
else:
signature = "bad_signature"
batch = Batch(header=header_bytes,
transactions=txn_list,
header_signature=signature)
batch_list.append(batch)
return batch_list
def _create_blocks(self, block_count, batch_count,
valid_block=True, valid_batch=True):
block_list = []
for i in range(block_count):
batch_list = self._create_batches(
batch_count, 2, valid_batch=valid_batch)
batch_ids = [batch.header_signature for batch in batch_list]
block_header = BlockHeader(signer_public_key=self.public_key,
batch_ids=batch_ids)
header_bytes = block_header.SerializeToString()
if valid_block:
signature = self.signer.sign(header_bytes)
else:
signature = "bad_signature"
block = Block(header=header_bytes,
batches=batch_list,
header_signature=signature)
block_list.append(block)
return block_list
def test_valid_transaction(self):
txn_list = self._create_transactions(1)
txn = txn_list[0]
valid = verifier.is_valid_transaction(txn)
self.assertTrue(valid)
def test_invalid_transaction(self):
# add invalid flag to _create transaction
txn_list = self._create_transactions(1, valid_signature=False)
txn = txn_list[0]
valid = verifier.is_valid_transaction(txn)
self.assertFalse(valid)
def test_unmatched_payload_transaction(self):
# add invalid flag to _create transaction
txn_list = self._create_transactions(1, matched_payload=False)
txn = txn_list[0]
valid = verifier.is_valid_transaction(txn)
self.assertFalse(valid)
def test_valid_batch(self):
batch_list = self._create_batches(1, 10)
batch = batch_list[0]
valid = verifier.is_valid_batch(batch)
self.assertTrue(valid)
def test_invalid_batch(self):
# add invalid flag to create_batches
batch_list = self._create_batches(1, 1, valid_batch=False)
batch = batch_list[0]
valid = verifier.is_valid_batch(batch)
self.assertFalse(valid)
# create an invalid txn in the batch
batch_list = self._create_batches(1, 1, valid_txn=False)
batch = batch_list[0]
valid = verifier.is_valid_batch(batch)
self.assertFalse(valid)
# create an invalid txn with bad batcher
batch_list = self._create_batches(1, 1, valid_batcher=False)
batch = batch_list[0]
valid = verifier.is_valid_batch(batch)
self.assertFalse(valid)
def test_invalid_batch_structure(self):
batch_list = self._create_batches(1, 2, valid_structure=False)
batch = batch_list[0]
valid = structure_verifier.is_valid_batch(batch)
self.assertFalse(valid)
def test_valid_block(self):
block_list = self._create_blocks(1, 1)
block = block_list[0]
valid = verifier.is_valid_block(block)
self.assertTrue(valid)
def test_invalid_block(self):
block_list = self._create_blocks(1, 1, valid_batch=False)
block = block_list[0]
valid = verifier.is_valid_block(block)
self.assertFalse(valid)
block_list = self._create_blocks(1, 1, valid_block=False)
block = block_list[0]
valid = verifier.is_valid_block(block)
self.assertFalse(valid)
|
en
| 0.688079
|
# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ # add invalid flag to _create transaction # add invalid flag to _create transaction # add invalid flag to create_batches # create an invalid txn in the batch # create an invalid txn with bad batcher
| 1.884885
| 2
|
aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/CreateClusterRequest.py
|
yndu13/aliyun-openapi-python-sdk
| 0
|
6628822
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class CreateClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'CreateCluster','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterSpecification(self): # String
return self.get_query_params().get('ClusterSpecification')
def set_ClusterSpecification(self, ClusterSpecification): # String
self.add_query_param('ClusterSpecification', ClusterSpecification)
def get_PubSlbSpecification(self): # String
return self.get_query_params().get('PubSlbSpecification')
def set_PubSlbSpecification(self, PubSlbSpecification): # String
self.add_query_param('PubSlbSpecification', PubSlbSpecification)
def get_PrivateSlbSpecification(self): # String
return self.get_query_params().get('PrivateSlbSpecification')
def set_PrivateSlbSpecification(self, PrivateSlbSpecification): # String
self.add_query_param('PrivateSlbSpecification', PrivateSlbSpecification)
def get_InstanceCount(self): # Integer
return self.get_query_params().get('InstanceCount')
def set_InstanceCount(self, InstanceCount): # Integer
self.add_query_param('InstanceCount', InstanceCount)
def get_RequestPars(self): # String
return self.get_query_params().get('RequestPars')
def set_RequestPars(self, RequestPars): # String
self.add_query_param('RequestPars', RequestPars)
def get_ConnectionType(self): # String
return self.get_query_params().get('ConnectionType')
def set_ConnectionType(self, ConnectionType): # String
self.add_query_param('ConnectionType', ConnectionType)
def get_ClusterVersion(self): # String
return self.get_query_params().get('ClusterVersion')
def set_ClusterVersion(self, ClusterVersion): # String
self.add_query_param('ClusterVersion', ClusterVersion)
def get_DiskCapacity(self): # Integer
return self.get_query_params().get('DiskCapacity')
def set_DiskCapacity(self, DiskCapacity): # Integer
self.add_query_param('DiskCapacity', DiskCapacity)
def get_DiskType(self): # String
return self.get_query_params().get('DiskType')
def set_DiskType(self, DiskType): # String
self.add_query_param('DiskType', DiskType)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_ClusterType(self): # String
return self.get_query_params().get('ClusterType')
def set_ClusterType(self, ClusterType): # String
self.add_query_param('ClusterType', ClusterType)
def get_PubNetworkFlow(self): # String
return self.get_query_params().get('PubNetworkFlow')
def set_PubNetworkFlow(self, PubNetworkFlow): # String
self.add_query_param('PubNetworkFlow', PubNetworkFlow)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_NetType(self): # String
return self.get_query_params().get('NetType')
def set_NetType(self, NetType): # String
self.add_query_param('NetType', NetType)
def get_MseVersion(self): # String
return self.get_query_params().get('MseVersion')
def set_MseVersion(self, MseVersion): # String
self.add_query_param('MseVersion', MseVersion)
def get_AcceptLanguage(self): # String
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage): # String
self.add_query_param('AcceptLanguage', AcceptLanguage)
def get_Region(self): # String
return self.get_query_params().get('Region')
def set_Region(self, Region): # String
self.add_query_param('Region', Region)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class CreateClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'CreateCluster','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterSpecification(self): # String
return self.get_query_params().get('ClusterSpecification')
def set_ClusterSpecification(self, ClusterSpecification): # String
self.add_query_param('ClusterSpecification', ClusterSpecification)
def get_PubSlbSpecification(self): # String
return self.get_query_params().get('PubSlbSpecification')
def set_PubSlbSpecification(self, PubSlbSpecification): # String
self.add_query_param('PubSlbSpecification', PubSlbSpecification)
def get_PrivateSlbSpecification(self): # String
return self.get_query_params().get('PrivateSlbSpecification')
def set_PrivateSlbSpecification(self, PrivateSlbSpecification): # String
self.add_query_param('PrivateSlbSpecification', PrivateSlbSpecification)
def get_InstanceCount(self): # Integer
return self.get_query_params().get('InstanceCount')
def set_InstanceCount(self, InstanceCount): # Integer
self.add_query_param('InstanceCount', InstanceCount)
def get_RequestPars(self): # String
return self.get_query_params().get('RequestPars')
def set_RequestPars(self, RequestPars): # String
self.add_query_param('RequestPars', RequestPars)
def get_ConnectionType(self): # String
return self.get_query_params().get('ConnectionType')
def set_ConnectionType(self, ConnectionType): # String
self.add_query_param('ConnectionType', ConnectionType)
def get_ClusterVersion(self): # String
return self.get_query_params().get('ClusterVersion')
def set_ClusterVersion(self, ClusterVersion): # String
self.add_query_param('ClusterVersion', ClusterVersion)
def get_DiskCapacity(self): # Integer
return self.get_query_params().get('DiskCapacity')
def set_DiskCapacity(self, DiskCapacity): # Integer
self.add_query_param('DiskCapacity', DiskCapacity)
def get_DiskType(self): # String
return self.get_query_params().get('DiskType')
def set_DiskType(self, DiskType): # String
self.add_query_param('DiskType', DiskType)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_ClusterType(self): # String
return self.get_query_params().get('ClusterType')
def set_ClusterType(self, ClusterType): # String
self.add_query_param('ClusterType', ClusterType)
def get_PubNetworkFlow(self): # String
return self.get_query_params().get('PubNetworkFlow')
def set_PubNetworkFlow(self, PubNetworkFlow): # String
self.add_query_param('PubNetworkFlow', PubNetworkFlow)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_NetType(self): # String
return self.get_query_params().get('NetType')
def set_NetType(self, NetType): # String
self.add_query_param('NetType', NetType)
def get_MseVersion(self): # String
return self.get_query_params().get('MseVersion')
def set_MseVersion(self, MseVersion): # String
self.add_query_param('MseVersion', MseVersion)
def get_AcceptLanguage(self): # String
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage): # String
self.add_query_param('AcceptLanguage', AcceptLanguage)
def get_Region(self): # String
return self.get_query_params().get('Region')
def set_Region(self, Region): # String
self.add_query_param('Region', Region)
|
en
| 0.79981
|
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # String # String # String # String # String # String # Integer # Integer # String # String # String # String # String # String # Integer # Integer # String # String # String # String # String # String # String # String # String # String # String # String # String # String # String # String # String # String
| 1.649088
| 2
|
day03.py
|
suntingting521/suntingting1170426002
| 0
|
6628823
|
import requests
url = 'http://www.baidu.com/s?'
def baidu(wds):
count = 1
for wd in wds:
res = requests.get(url,params={'wd':wd})
path = 'res%d.txt'%count
with open(path,'w',encoding='utf8') as f:
f.write(res.text)
count +=1
if __name__ == "__main__":
wds =('Joker','美女','丑女')
baidu(wds)
if 'http' in res1 or 'https' in res1:
res4 = res1.spilt('(')
for i in res4:
if 'http' in res4 or 'https' in res4:
res5 = res4.spilt('):')
print(res5)
import requests
URLS = []
url = 'http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n02127808'
response = requests.get(url)
HTML = response.text
lines = HTML.split('\n')
for line in lines:
if 'jpg' in url:
URLS.append(url)
for url in URLS:
response = requests.get(url)
content = response.content
name = url.split('/')
with open(name,'wb') as f:
f.write(content)
|
import requests
url = 'http://www.baidu.com/s?'
def baidu(wds):
count = 1
for wd in wds:
res = requests.get(url,params={'wd':wd})
path = 'res%d.txt'%count
with open(path,'w',encoding='utf8') as f:
f.write(res.text)
count +=1
if __name__ == "__main__":
wds =('Joker','美女','丑女')
baidu(wds)
if 'http' in res1 or 'https' in res1:
res4 = res1.spilt('(')
for i in res4:
if 'http' in res4 or 'https' in res4:
res5 = res4.spilt('):')
print(res5)
import requests
URLS = []
url = 'http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n02127808'
response = requests.get(url)
HTML = response.text
lines = HTML.split('\n')
for line in lines:
if 'jpg' in url:
URLS.append(url)
for url in URLS:
response = requests.get(url)
content = response.content
name = url.split('/')
with open(name,'wb') as f:
f.write(content)
|
none
| 1
| 3.001575
| 3
|
|
src/kafka_utils/base_consumer.py
|
Evcom-Lab/kafka-utils
| 0
|
6628824
|
from multiprocessing import Process
from confluent_kafka import Consumer
from abc import ABC, abstractmethod, abstractproperty
import sys
import ast
import logging
from functools import wraps
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('consumer')
def multiprocess(fn):
@wraps(fn)
def call(*args, **kwargs):
p = Process(target=fn, args=args, kwargs=kwargs)
p.start()
return p
return call
class BaseConsumer(ABC):
@abstractproperty
def topic(self):
pass
@abstractproperty
def group_id(self):
pass
@abstractmethod
def on_data(self, data):
pass
def __init__(self):
self.config = {
'bootstrap.servers': 'localhost:9093',
'group.id': self.group_id,
'auto.offset.reset': 'smallest',
}
self.running = True
@multiprocess
def listen(self):
logger.info("Starting consumer... {}".format(self.__class__.__name__))
consumer = Consumer(self.config)
try:
consumer.subscribe([self.topic])
while self.running:
msg = consumer.poll(1.0)
if msg is None:
continue
if msg.error():
logger.error("Consumer error: {}".format(msg.error()))
continue
logger.info('Received message: {}; Group id: {}'.format(msg.value().decode('utf-8'), self.group_id))
self.on_data(self.parse_data(msg.value().decode('utf-8')) )
consumer.close()
except KeyboardInterrupt:
logger.info("Exiting...")
sys.exit(1)
finally:
consumer.close()
def parse_data(self, data):
try:
return ast.literal_eval(data)
except Exception as e:
logger.error("Error: {}".format(e))
finally:
return data
def shutdown(self):
self.running = False
|
from multiprocessing import Process
from confluent_kafka import Consumer
from abc import ABC, abstractmethod, abstractproperty
import sys
import ast
import logging
from functools import wraps
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('consumer')
def multiprocess(fn):
@wraps(fn)
def call(*args, **kwargs):
p = Process(target=fn, args=args, kwargs=kwargs)
p.start()
return p
return call
class BaseConsumer(ABC):
@abstractproperty
def topic(self):
pass
@abstractproperty
def group_id(self):
pass
@abstractmethod
def on_data(self, data):
pass
def __init__(self):
self.config = {
'bootstrap.servers': 'localhost:9093',
'group.id': self.group_id,
'auto.offset.reset': 'smallest',
}
self.running = True
@multiprocess
def listen(self):
logger.info("Starting consumer... {}".format(self.__class__.__name__))
consumer = Consumer(self.config)
try:
consumer.subscribe([self.topic])
while self.running:
msg = consumer.poll(1.0)
if msg is None:
continue
if msg.error():
logger.error("Consumer error: {}".format(msg.error()))
continue
logger.info('Received message: {}; Group id: {}'.format(msg.value().decode('utf-8'), self.group_id))
self.on_data(self.parse_data(msg.value().decode('utf-8')) )
consumer.close()
except KeyboardInterrupt:
logger.info("Exiting...")
sys.exit(1)
finally:
consumer.close()
def parse_data(self, data):
try:
return ast.literal_eval(data)
except Exception as e:
logger.error("Error: {}".format(e))
finally:
return data
def shutdown(self):
self.running = False
|
none
| 1
| 2.753721
| 3
|
|
racovimge/racovimge.py
|
MohamedAliRashad/racovimge
| 0
|
6628825
|
#!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import base64
import jinja2
import os.path
import pathlib
import random as rand
import shutil
import subprocess
import tempfile
import textwrap
import os
###############################################################################
# Helper Functins
###############################################################################
def to_rgb(color):
color = color.lstrip('#')
r, g, b = map(lambda x: int(x, 16), [color[:2], color[2:4], color[4:]])
return 'rgb({},{},{})'.format(r, g, b)
def copy_fonts(*fonts):
"""
Copy the fonts to the home directory.
Necessary in order to use the fonts durring the png conversion.
"""
root = pathlib.Path(os.path.expanduser('~')) / '.fonts/racovimge'
if not root.exists():
root.mkdir(parents=True)
for font in fonts:
new_path = root / font.split('/')[-1]
if not new_path.exists():
shutil.copy(font, str(new_path))
def to_png(image):
fd, path = tempfile.mkstemp(suffix='.svg')
with open(path, 'w') as file:
file.write(image)
outpath = path.replace('.svg', '.png')
subprocess.call(['cairosvg', path, '-o', outpath])
with open(outpath, 'rb') as file:
data = file.read()
pathlib.Path(path).unlink()
pathlib.Path(outpath).unlink()
os.close(fd)
os.remove(path)
return data
def wrap(text, width):
if not isinstance(text, str):
return text
return textwrap.wrap(
text, break_long_words=False, break_on_hyphens=False, width=width)
###############################################################################
# Jinja2 setup
###############################################################################
env = jinja2.Environment(loader=jinja2.PackageLoader('racovimge'))
env.filters['wrap'] = wrap
env.filters['rgb'] = to_rgb
###############################################################################
# Templates and Color Schemes
###############################################################################
ROOT = pathlib.Path(__file__).parent
templates = [i.stem for i in (ROOT / 'templates').glob('*.svg')]
with (ROOT / 'colors.txt').open() as file:
color_schemes = [i.split() for i in file.read().split('\n')]
fonts = ROOT / 'fonts'
fonts = [i for i in fonts.glob('*.*') if i.suffix in ('.ttf', '.otf')]
fonts = [str(i.resolve()) for i in fonts]
###############################################################################
# Covers
###############################################################################
def random(
title, author, *,
templates=templates, schemes=color_schemes, fonts=fonts,
font_size=120, font_size_author=70):
template = rand.choice(templates)
colors = rand.choice(schemes)
font = rand.choice(fonts)
return cover(
title, author, template=template, colors=colors, font=font,
font_size=font_size, font_size_author=font_size_author)
def cover(
title, author, *, template, colors, font,
font_size=120, font_size_author=70):
authors = [author] if isinstance(author, str) else author
authors = authors[:3] if authors else []
clr1, clr2, clr3, clr4, clr5 = colors
font_mimetypes = dict(
otf='font/opentype',
ttf='application/x-font-ttf')
font = pathlib.Path(font)
with font.open('rb') as file:
font_data = file.read()
font_data = base64.b64encode(font_data).decode('utf-8')
font_name = font.stem
font_type = font_mimetypes[font.suffix.lstrip('.')]
image = env.get_template(template + '.svg').render(
title=title, authors=authors,
font=font_name, font_type=font_type, font_data=font_data,
color1=clr1, color2=clr2, color3=clr3, color4=clr4, color5=clr5,
font_size=font_size, font_size_author=font_size_author)
return image
def png_random(*args, **kwargs):
copy_fonts(*kwargs.get('fonts', fonts))
return to_png(random(*args, **kwargs))
def png_cover(*args, **kwargs):
copy_fonts(kwargs['font'])
return to_png(cover(*args, **kwargs))
|
#!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import base64
import jinja2
import os.path
import pathlib
import random as rand
import shutil
import subprocess
import tempfile
import textwrap
import os
###############################################################################
# Helper Functins
###############################################################################
def to_rgb(color):
color = color.lstrip('#')
r, g, b = map(lambda x: int(x, 16), [color[:2], color[2:4], color[4:]])
return 'rgb({},{},{})'.format(r, g, b)
def copy_fonts(*fonts):
"""
Copy the fonts to the home directory.
Necessary in order to use the fonts durring the png conversion.
"""
root = pathlib.Path(os.path.expanduser('~')) / '.fonts/racovimge'
if not root.exists():
root.mkdir(parents=True)
for font in fonts:
new_path = root / font.split('/')[-1]
if not new_path.exists():
shutil.copy(font, str(new_path))
def to_png(image):
fd, path = tempfile.mkstemp(suffix='.svg')
with open(path, 'w') as file:
file.write(image)
outpath = path.replace('.svg', '.png')
subprocess.call(['cairosvg', path, '-o', outpath])
with open(outpath, 'rb') as file:
data = file.read()
pathlib.Path(path).unlink()
pathlib.Path(outpath).unlink()
os.close(fd)
os.remove(path)
return data
def wrap(text, width):
if not isinstance(text, str):
return text
return textwrap.wrap(
text, break_long_words=False, break_on_hyphens=False, width=width)
###############################################################################
# Jinja2 setup
###############################################################################
env = jinja2.Environment(loader=jinja2.PackageLoader('racovimge'))
env.filters['wrap'] = wrap
env.filters['rgb'] = to_rgb
###############################################################################
# Templates and Color Schemes
###############################################################################
ROOT = pathlib.Path(__file__).parent
templates = [i.stem for i in (ROOT / 'templates').glob('*.svg')]
with (ROOT / 'colors.txt').open() as file:
color_schemes = [i.split() for i in file.read().split('\n')]
fonts = ROOT / 'fonts'
fonts = [i for i in fonts.glob('*.*') if i.suffix in ('.ttf', '.otf')]
fonts = [str(i.resolve()) for i in fonts]
###############################################################################
# Covers
###############################################################################
def random(
title, author, *,
templates=templates, schemes=color_schemes, fonts=fonts,
font_size=120, font_size_author=70):
template = rand.choice(templates)
colors = rand.choice(schemes)
font = rand.choice(fonts)
return cover(
title, author, template=template, colors=colors, font=font,
font_size=font_size, font_size_author=font_size_author)
def cover(
title, author, *, template, colors, font,
font_size=120, font_size_author=70):
authors = [author] if isinstance(author, str) else author
authors = authors[:3] if authors else []
clr1, clr2, clr3, clr4, clr5 = colors
font_mimetypes = dict(
otf='font/opentype',
ttf='application/x-font-ttf')
font = pathlib.Path(font)
with font.open('rb') as file:
font_data = file.read()
font_data = base64.b64encode(font_data).decode('utf-8')
font_name = font.stem
font_type = font_mimetypes[font.suffix.lstrip('.')]
image = env.get_template(template + '.svg').render(
title=title, authors=authors,
font=font_name, font_type=font_type, font_data=font_data,
color1=clr1, color2=clr2, color3=clr3, color4=clr4, color5=clr5,
font_size=font_size, font_size_author=font_size_author)
return image
def png_random(*args, **kwargs):
copy_fonts(*kwargs.get('fonts', fonts))
return to_png(random(*args, **kwargs))
def png_cover(*args, **kwargs):
copy_fonts(kwargs['font'])
return to_png(cover(*args, **kwargs))
|
de
| 0.785952
|
#!/usr/bin/env python3 ############################################################################### # Module Imports ############################################################################### ############################################################################### # Helper Functins ############################################################################### Copy the fonts to the home directory. Necessary in order to use the fonts durring the png conversion. ############################################################################### # Jinja2 setup ############################################################################### ############################################################################### # Templates and Color Schemes ############################################################################### ############################################################################### # Covers ###############################################################################
| 2.488957
| 2
|
abc065/c.py
|
y-sira/atcoder
| 0
|
6628826
|
<reponame>y-sira/atcoder
import sys
import math
def main():
n, m = map(int, input().split())
diff = abs(n - m)
if diff > 1:
print(0)
return 0
if diff == 1:
print(math.factorial(n) * math.factorial(m) % (10 ** 9 + 7))
else:
print(math.factorial(n) * math.factorial(m) * 2 % (10 ** 9 + 7))
return 0
if __name__ == '__main__':
sys.exit(main())
|
import sys
import math
def main():
n, m = map(int, input().split())
diff = abs(n - m)
if diff > 1:
print(0)
return 0
if diff == 1:
print(math.factorial(n) * math.factorial(m) % (10 ** 9 + 7))
else:
print(math.factorial(n) * math.factorial(m) * 2 % (10 ** 9 + 7))
return 0
if __name__ == '__main__':
sys.exit(main())
|
none
| 1
| 3.364967
| 3
|
|
setup.py
|
PGM-Lab/bcause
| 0
|
6628827
|
import re
import sys
import setuptools
import os
if sys.version_info < (3, 6):
sys.exit('Python < 3.6 is not supported')
# get abs path from this folder name
here = os.path.dirname(os.path.abspath(__file__))
print(here)
# open __init__.py, where version is specified
with open(os.path.join(here, 'bcause', '__init__.py')) as f:
txt = f.read()
# try to read it from source code
try:
version = re.findall(r"^__version__ = '([^']+)'\r?$",
txt, re.M)[0]
except IndexError:
raise RuntimeError('Unable to determine version.')
# get long description from file in docs folder
with open(os.path.join(here, 'docs/project_description.md')) as f:
long_description = f.read()
setuptools.setup(
name="bcause", # Replace with your own username
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Bayesian causal models",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/PGM-Lab/bcause",
packages=["bcause"],
#package_dir={'': 'bcause'},
include_package_data=True,
license='Apache License 2.0',
classifiers=['Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3.6'],
python_requires='>=3.6',
#extras_require=dict(tests=['pytest'])
)
|
import re
import sys
import setuptools
import os
if sys.version_info < (3, 6):
sys.exit('Python < 3.6 is not supported')
# get abs path from this folder name
here = os.path.dirname(os.path.abspath(__file__))
print(here)
# open __init__.py, where version is specified
with open(os.path.join(here, 'bcause', '__init__.py')) as f:
txt = f.read()
# try to read it from source code
try:
version = re.findall(r"^__version__ = '([^']+)'\r?$",
txt, re.M)[0]
except IndexError:
raise RuntimeError('Unable to determine version.')
# get long description from file in docs folder
with open(os.path.join(here, 'docs/project_description.md')) as f:
long_description = f.read()
setuptools.setup(
name="bcause", # Replace with your own username
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Bayesian causal models",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/PGM-Lab/bcause",
packages=["bcause"],
#package_dir={'': 'bcause'},
include_package_data=True,
license='Apache License 2.0',
classifiers=['Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3.6'],
python_requires='>=3.6',
#extras_require=dict(tests=['pytest'])
)
|
en
| 0.709804
|
# get abs path from this folder name # open __init__.py, where version is specified # try to read it from source code # get long description from file in docs folder # Replace with your own username #package_dir={'': 'bcause'}, #extras_require=dict(tests=['pytest'])
| 1.983793
| 2
|
tensorflow/test_utils.py
|
mixuala/fast-neural-style-pytorch
| 1
|
6628828
|
<reponame>mixuala/fast-neural-style-pytorch
import tensorflow as tf
import numpy as np
import PIL.Image
from fast_neural_style_pytorch.tensorflow import utils as fnstf_utils
from fast_neural_style_pytorch.tensorflow import vgg as tf_vgg
from fast_neural_style_pytorch.tensorflow import transformer as tf_transformer
class TransformerNetwork_VGG_ONES(tf.keras.Model):
"""TransformerNetwork_VGG that returns outputs as tf.ones()"""
def __init__(self, style_image):
super(TransformerNetwork_VGG_ONES, self).__init__()
TransformerNetwork = tf_transformer.TransformerNetwork()
TransformerNetwork.trainable = True
style_model = tf_vgg.get_layers("vgg19")
VGG = tf_vgg.vgg_layers19( style_model['content_layers'], style_model['style_layers'] )
target_style_gram = TransformerNetwork_VGG_ONES._get_target_style_gram_from_image(style_image, style_model)
nonzero = [tf.math.add(v,1.0) for v in target_style_gram]
ones = [tf.math.divide_no_nan(v,v) for v in nonzero]
vGGfeatures = VGG_Features(VGG, target_style_gram=tuple(ones))
self.transformer = TransformerNetwork
self.vgg = vGGfeatures
def call(self, inputs):
x = inputs
x = self.transformer(x)
# output_shapes = [(BATCH_SIZZE, 16, 16, 512), (BATCH_SIZZE, 64, 64), (BATCH_SIZZE, 128, 128), (BATCH_SIZZE, 256, 256), (BATCH_SIZZE, 512, 512), (BATCH_SIZZE, 512, 512)]
features = self.vgg(x)
nonzero = [tf.math.add(v,1.0) for v in features]
ones = [tf.math.divide_no_nan(v,v) for v in nonzero]
return tuple(ones)
@staticmethod
def _get_target_style_gram_from_image(style_image, style_model):
""""use when style_image.shape != (256,256,3)"""
VGG_Target = tf_vgg.vgg_layers19( style_model['content_layers'], style_model['style_layers'], input_shape=None )
if isinstance(style_image,str):
image_string = tf.io.read_file(style_image)
style_image = fnstf_utils.ImageRecordDatasetFactory.image2tensor(image_string, normalize=False)
target_style_gram = VGG_Features.get_style_gram(VGG_Target, style_image)
show([style_image], labels=["style_image, shape={}".format(style_image.shape)], w=128, domain=(0.,255.) )
return target_style_gram
# ### unit tests
class UNIT_TEST():
# static
TransformerNetwork_VGG_ONES = TransformerNetwork_VGG_ONES
@staticmethod
def inspect_vgg_features(features):
""" features: features = VGG(generated_batch)"""
assert isinstance(features, (tuple, dict)), "expecting tuple or dict got {}".format(type(features))
if isinstance(features, tuple):
for i, k in enumerate(features):
if tf.is_tensor(k): print(i," >", k.shape)
elif isinstance(k, (tuple, list)):
_ = [ print(i,j," >>", v.shape) for j,v in enumerate(k)]
else:
for k in features:
if tf.is_tensor(features[k]): print(k," >", features[k].shape)
elif isinstance(features[k], (tuple, list)):
_ = [ print(k," >>", v.shape) for v in features[k]]
@staticmethod
def inpsect_model_losses(y_true, y_pred):
""" check loss caclulations and loss weights"""
print("y_pred: ")
UNIT_TEST.inspect_vgg_features(y_pred)
print("y_true: ")
UNIT_TEST.inspect_vgg_features(y_true)
if isinstance(y_pred, (tuple, list)):
check1 = _content_loss_WEIGHTED(y_true[0], y_pred[0])
check2 = _style_loss_WEIGHTED(y_true[:1], y_pred[:1])
print("weighted losses", check1.numpy(), check2.numpy())
check3 = utils.get_SUM_mse_loss(y_true[0], y_pred[0])
check4 = utils.get_SUM_mse_loss(y_true[:1], y_pred[:1])
print( "weights: ", CONTENT_WEIGHT, STYLE_WEIGHT)
print("losses * weights", check3.numpy()*CONTENT_WEIGHT, check4.numpy()*STYLE_WEIGHT)
if isinstance(y_pred, dict):
check1 = _content_loss_WEIGHTED(y_true['content'], y_pred['content'])
check2 = _style_loss_WEIGHTED(y_true['style'], y_pred['style'])
print("weighted losses", check1.numpy(), check2.numpy())
check3 = utils.get_SUM_mse_loss(y_true['content'], y_pred['content'])
check4 = utils.get_SUM_mse_loss(y_true['style'], y_pred['style'])
print( "weights: ", CONTENT_WEIGHT, STYLE_WEIGHT)
print("losses * weights", check3.numpy()*CONTENT_WEIGHT, check4.numpy()*STYLE_WEIGHT)
assert check2==check4*STYLE_WEIGHT, "style losses failed"
assert check1==check3*CONTENT_WEIGHT, "content losses failed"
@staticmethod
def batch_generator_with_model_losses( transformer_vgg , BATCH_xy_Dataset255_with_TWOS):
print("BATCH_dataset")
# for x,y,weights in BATCH_xy_Dataset255_with_features.take(1):
for x,y,weights in BATCH_xy_Dataset255_with_TWOS.take(1):
print("FEATURE_WEIGHTS=", [tf.squeeze(v).numpy() for v in weights])
# show(x, domain=None, w=128)
# generated = TransformerNetwork(x)
# show(generated, domain=None, w=128)
features = transformer_vgg(x)
# print("features")
# UNIT_TEST_inspect_vgg_features(features)
# print("y_true")
# UNIT_TEST_inspect_vgg_features(y)
if isinstance(features, (tuple, list)):
print("\nFEATURE_WEIGHTS:",[v[0].numpy() for v in weights])
print()
check7 = utils.get_MEAN_mse_loss(y[0], features[0], weights[0])
check8 = utils.get_MEAN_mse_loss(y[1:], features[1:], weights[1:])
print("get_MEAN_mse_loss with FEATURE_WEIGHTS", check7.numpy(), check8.numpy())
print()
check5 = utils.get_SUM_mse_loss(y[0], features[0], weights[0])
check6 = utils.get_SUM_mse_loss(y[1:], features[1:], weights[1:])
print("get_SUM_mse_loss with FEATURE_WEIGHTS", check5.numpy(), check6.numpy())
print()
else:
assert False, "features is probably a dict"
@staticmethod
def BATCH_xyGenerator_y_true_as_TWOS_and_weights(tensor_ds_255,
VGGfeatures,
feature_weights=None
):
""" returns generator with weights(x_train, y_true, weights)
"""
if feature_weights is not None: print("dataset generator using FEATURE_WEIGHTS=", feature_weights)
def gen():
weights = tuple( [v] for v in feature_weights) if feature_weights else tuple( [1.] for v in range(6))
for x_train in tensor_ds_255.batch(BATCH_SIZE):
batch = x_train if len(x_train.shape)==4 else x_train[tf.newaxis,...]
# # # return as tuple( tensor, tuple) or tuple( tensor, ...)
y_true_features = VGGfeatures(batch)
if isinstance(y_true_features, (tuple, list)):
if len(y_true_features)==2:
# must FLATTEN to tuple( tensor, ...)
content, style = y_true_features
y_true_features = tuple([content] + list(style))
else:
pass # FLAT tuple(tensor x6) OK
# ones
nonzero = [tf.math.add(v,1.0) for v in y_true_features]
ones = [tf.math.divide_no_nan(v,v) for v in nonzero]
twos = [ v*2. for v in ones ]
yield (x_train, tuple(twos), weights)
output_types= (
tf.float32,
(
tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32
),
(tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32)
)
output_shapes = (
(None, 256,256,3),
(
(None, 16, 16, 512),
(None, 64, 64), (None, 128, 128), (None, 256, 256), (None, 512, 512), (None, 512, 512)
),
(
(1,), (1,),(1,),(1,),(1,),(1,)
)
)
return tf.data.Dataset.from_generator(
generator=gen,
output_types=output_types,
output_shapes=output_shapes,
)
@staticmethod
def check_mean_loss():
# [OK] get_MEAN_mse_loss() gives correct loss value of FEATURE_WEIGHTS
ONES = [1.,1.,1.,1.,1.,1.,]
SEQ = [1.,2.,3.,4.,5.,6.,]
WEIGHTS = SEQ
transformerNetwork_VGG_ONES = UNIT_TEST.TransformerNetwork_VGG_ONES(style_image)
BATCH_xy_Dataset255_with_TWOS = UNIT_TEST.BATCH_xyGenerator_y_true_as_TWOS_and_weights(
tensor_ds_255,
transformerNetwork_VGG_ONES.vgg,
feature_weights=WEIGHTS)
train_dataset = BATCH_xy_Dataset255_with_TWOS.take(BATCH_SIZE * NUM_BATCHES)
# force loss = 1.
for x,y,w in train_dataset.take(1):
y_pred = transformerNetwork_VGG_ONES(x)
loss = [ utils.get_MEAN_mse_loss(a,b, WEIGHTS[i]).numpy() for i,(a,b) in enumerate(zip(y,y_pred)) ]
# _ = [print( v[0,14:15,14, ...].numpy()) for v in y_pred]
print("loss=", loss )
assert loss==WEIGHTS
@staticmethod
def check_multiple_output_loss_handling():
SEQ = [1.,2.,3.,4.,5.,6.,]
FEATURE_WEIGHTS = SEQ
TransformerNetwork_VGG = UNIT_TEST.TransformerNetwork_VGG_ONES(style_image)
BATCH_xy_Dataset255_with_features = UNIT_TEST.BATCH_xyGenerator_y_true_as_TWOS_and_weights(
tensor_ds_255,
TransformerNetwork_VGG.vgg,
feature_weights=FEATURE_WEIGHTS
)
train_dataset = BATCH_xy_Dataset255_with_features.take(BATCH_SIZE * NUM_BATCHES)
# for x,y,w in BATCH_xy_Dataset255_with_features.take(1):
# print("check", [v[0].numpy() for v in w])
def get_MEAN_mse_loss_TEST(y_true, y_pred):
# CONFIRMED, TESTED OK
# generator returns (x,y,w)
# losses fed individually, without weights.
# loss = [loss(x,y)*w, for x,y,w in zip(y_true, y_pred, weights)]
assert not isinstance(y_pred, (tuple, list)), "expecting a tensor "
return utils.get_MEAN_mse_loss(y_true, y_pred)
TransformerNetwork_VGG.compile(
optimizer=optimizer,
loss=get_MEAN_mse_loss_TEST,
)
history = TransformerNetwork_VGG.fit(
x=train_dataset.repeat(NUM_EPOCHS),
epochs=NUM_EPOCHS,
steps_per_epoch=NUM_BATCHES,
callbacks=callbacks, # NOT working
)
|
import tensorflow as tf
import numpy as np
import PIL.Image
from fast_neural_style_pytorch.tensorflow import utils as fnstf_utils
from fast_neural_style_pytorch.tensorflow import vgg as tf_vgg
from fast_neural_style_pytorch.tensorflow import transformer as tf_transformer
class TransformerNetwork_VGG_ONES(tf.keras.Model):
"""TransformerNetwork_VGG that returns outputs as tf.ones()"""
def __init__(self, style_image):
super(TransformerNetwork_VGG_ONES, self).__init__()
TransformerNetwork = tf_transformer.TransformerNetwork()
TransformerNetwork.trainable = True
style_model = tf_vgg.get_layers("vgg19")
VGG = tf_vgg.vgg_layers19( style_model['content_layers'], style_model['style_layers'] )
target_style_gram = TransformerNetwork_VGG_ONES._get_target_style_gram_from_image(style_image, style_model)
nonzero = [tf.math.add(v,1.0) for v in target_style_gram]
ones = [tf.math.divide_no_nan(v,v) for v in nonzero]
vGGfeatures = VGG_Features(VGG, target_style_gram=tuple(ones))
self.transformer = TransformerNetwork
self.vgg = vGGfeatures
def call(self, inputs):
x = inputs
x = self.transformer(x)
# output_shapes = [(BATCH_SIZZE, 16, 16, 512), (BATCH_SIZZE, 64, 64), (BATCH_SIZZE, 128, 128), (BATCH_SIZZE, 256, 256), (BATCH_SIZZE, 512, 512), (BATCH_SIZZE, 512, 512)]
features = self.vgg(x)
nonzero = [tf.math.add(v,1.0) for v in features]
ones = [tf.math.divide_no_nan(v,v) for v in nonzero]
return tuple(ones)
@staticmethod
def _get_target_style_gram_from_image(style_image, style_model):
""""use when style_image.shape != (256,256,3)"""
VGG_Target = tf_vgg.vgg_layers19( style_model['content_layers'], style_model['style_layers'], input_shape=None )
if isinstance(style_image,str):
image_string = tf.io.read_file(style_image)
style_image = fnstf_utils.ImageRecordDatasetFactory.image2tensor(image_string, normalize=False)
target_style_gram = VGG_Features.get_style_gram(VGG_Target, style_image)
show([style_image], labels=["style_image, shape={}".format(style_image.shape)], w=128, domain=(0.,255.) )
return target_style_gram
# ### unit tests
class UNIT_TEST():
# static
TransformerNetwork_VGG_ONES = TransformerNetwork_VGG_ONES
@staticmethod
def inspect_vgg_features(features):
""" features: features = VGG(generated_batch)"""
assert isinstance(features, (tuple, dict)), "expecting tuple or dict got {}".format(type(features))
if isinstance(features, tuple):
for i, k in enumerate(features):
if tf.is_tensor(k): print(i," >", k.shape)
elif isinstance(k, (tuple, list)):
_ = [ print(i,j," >>", v.shape) for j,v in enumerate(k)]
else:
for k in features:
if tf.is_tensor(features[k]): print(k," >", features[k].shape)
elif isinstance(features[k], (tuple, list)):
_ = [ print(k," >>", v.shape) for v in features[k]]
@staticmethod
def inpsect_model_losses(y_true, y_pred):
""" check loss caclulations and loss weights"""
print("y_pred: ")
UNIT_TEST.inspect_vgg_features(y_pred)
print("y_true: ")
UNIT_TEST.inspect_vgg_features(y_true)
if isinstance(y_pred, (tuple, list)):
check1 = _content_loss_WEIGHTED(y_true[0], y_pred[0])
check2 = _style_loss_WEIGHTED(y_true[:1], y_pred[:1])
print("weighted losses", check1.numpy(), check2.numpy())
check3 = utils.get_SUM_mse_loss(y_true[0], y_pred[0])
check4 = utils.get_SUM_mse_loss(y_true[:1], y_pred[:1])
print( "weights: ", CONTENT_WEIGHT, STYLE_WEIGHT)
print("losses * weights", check3.numpy()*CONTENT_WEIGHT, check4.numpy()*STYLE_WEIGHT)
if isinstance(y_pred, dict):
check1 = _content_loss_WEIGHTED(y_true['content'], y_pred['content'])
check2 = _style_loss_WEIGHTED(y_true['style'], y_pred['style'])
print("weighted losses", check1.numpy(), check2.numpy())
check3 = utils.get_SUM_mse_loss(y_true['content'], y_pred['content'])
check4 = utils.get_SUM_mse_loss(y_true['style'], y_pred['style'])
print( "weights: ", CONTENT_WEIGHT, STYLE_WEIGHT)
print("losses * weights", check3.numpy()*CONTENT_WEIGHT, check4.numpy()*STYLE_WEIGHT)
assert check2==check4*STYLE_WEIGHT, "style losses failed"
assert check1==check3*CONTENT_WEIGHT, "content losses failed"
@staticmethod
def batch_generator_with_model_losses( transformer_vgg , BATCH_xy_Dataset255_with_TWOS):
print("BATCH_dataset")
# for x,y,weights in BATCH_xy_Dataset255_with_features.take(1):
for x,y,weights in BATCH_xy_Dataset255_with_TWOS.take(1):
print("FEATURE_WEIGHTS=", [tf.squeeze(v).numpy() for v in weights])
# show(x, domain=None, w=128)
# generated = TransformerNetwork(x)
# show(generated, domain=None, w=128)
features = transformer_vgg(x)
# print("features")
# UNIT_TEST_inspect_vgg_features(features)
# print("y_true")
# UNIT_TEST_inspect_vgg_features(y)
if isinstance(features, (tuple, list)):
print("\nFEATURE_WEIGHTS:",[v[0].numpy() for v in weights])
print()
check7 = utils.get_MEAN_mse_loss(y[0], features[0], weights[0])
check8 = utils.get_MEAN_mse_loss(y[1:], features[1:], weights[1:])
print("get_MEAN_mse_loss with FEATURE_WEIGHTS", check7.numpy(), check8.numpy())
print()
check5 = utils.get_SUM_mse_loss(y[0], features[0], weights[0])
check6 = utils.get_SUM_mse_loss(y[1:], features[1:], weights[1:])
print("get_SUM_mse_loss with FEATURE_WEIGHTS", check5.numpy(), check6.numpy())
print()
else:
assert False, "features is probably a dict"
@staticmethod
def BATCH_xyGenerator_y_true_as_TWOS_and_weights(tensor_ds_255,
VGGfeatures,
feature_weights=None
):
""" returns generator with weights(x_train, y_true, weights)
"""
if feature_weights is not None: print("dataset generator using FEATURE_WEIGHTS=", feature_weights)
def gen():
weights = tuple( [v] for v in feature_weights) if feature_weights else tuple( [1.] for v in range(6))
for x_train in tensor_ds_255.batch(BATCH_SIZE):
batch = x_train if len(x_train.shape)==4 else x_train[tf.newaxis,...]
# # # return as tuple( tensor, tuple) or tuple( tensor, ...)
y_true_features = VGGfeatures(batch)
if isinstance(y_true_features, (tuple, list)):
if len(y_true_features)==2:
# must FLATTEN to tuple( tensor, ...)
content, style = y_true_features
y_true_features = tuple([content] + list(style))
else:
pass # FLAT tuple(tensor x6) OK
# ones
nonzero = [tf.math.add(v,1.0) for v in y_true_features]
ones = [tf.math.divide_no_nan(v,v) for v in nonzero]
twos = [ v*2. for v in ones ]
yield (x_train, tuple(twos), weights)
output_types= (
tf.float32,
(
tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32
),
(tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32)
)
output_shapes = (
(None, 256,256,3),
(
(None, 16, 16, 512),
(None, 64, 64), (None, 128, 128), (None, 256, 256), (None, 512, 512), (None, 512, 512)
),
(
(1,), (1,),(1,),(1,),(1,),(1,)
)
)
return tf.data.Dataset.from_generator(
generator=gen,
output_types=output_types,
output_shapes=output_shapes,
)
@staticmethod
def check_mean_loss():
# [OK] get_MEAN_mse_loss() gives correct loss value of FEATURE_WEIGHTS
ONES = [1.,1.,1.,1.,1.,1.,]
SEQ = [1.,2.,3.,4.,5.,6.,]
WEIGHTS = SEQ
transformerNetwork_VGG_ONES = UNIT_TEST.TransformerNetwork_VGG_ONES(style_image)
BATCH_xy_Dataset255_with_TWOS = UNIT_TEST.BATCH_xyGenerator_y_true_as_TWOS_and_weights(
tensor_ds_255,
transformerNetwork_VGG_ONES.vgg,
feature_weights=WEIGHTS)
train_dataset = BATCH_xy_Dataset255_with_TWOS.take(BATCH_SIZE * NUM_BATCHES)
# force loss = 1.
for x,y,w in train_dataset.take(1):
y_pred = transformerNetwork_VGG_ONES(x)
loss = [ utils.get_MEAN_mse_loss(a,b, WEIGHTS[i]).numpy() for i,(a,b) in enumerate(zip(y,y_pred)) ]
# _ = [print( v[0,14:15,14, ...].numpy()) for v in y_pred]
print("loss=", loss )
assert loss==WEIGHTS
@staticmethod
def check_multiple_output_loss_handling():
SEQ = [1.,2.,3.,4.,5.,6.,]
FEATURE_WEIGHTS = SEQ
TransformerNetwork_VGG = UNIT_TEST.TransformerNetwork_VGG_ONES(style_image)
BATCH_xy_Dataset255_with_features = UNIT_TEST.BATCH_xyGenerator_y_true_as_TWOS_and_weights(
tensor_ds_255,
TransformerNetwork_VGG.vgg,
feature_weights=FEATURE_WEIGHTS
)
train_dataset = BATCH_xy_Dataset255_with_features.take(BATCH_SIZE * NUM_BATCHES)
# for x,y,w in BATCH_xy_Dataset255_with_features.take(1):
# print("check", [v[0].numpy() for v in w])
def get_MEAN_mse_loss_TEST(y_true, y_pred):
# CONFIRMED, TESTED OK
# generator returns (x,y,w)
# losses fed individually, without weights.
# loss = [loss(x,y)*w, for x,y,w in zip(y_true, y_pred, weights)]
assert not isinstance(y_pred, (tuple, list)), "expecting a tensor "
return utils.get_MEAN_mse_loss(y_true, y_pred)
TransformerNetwork_VGG.compile(
optimizer=optimizer,
loss=get_MEAN_mse_loss_TEST,
)
history = TransformerNetwork_VGG.fit(
x=train_dataset.repeat(NUM_EPOCHS),
epochs=NUM_EPOCHS,
steps_per_epoch=NUM_BATCHES,
callbacks=callbacks, # NOT working
)
|
en
| 0.653347
|
TransformerNetwork_VGG that returns outputs as tf.ones() # output_shapes = [(BATCH_SIZZE, 16, 16, 512), (BATCH_SIZZE, 64, 64), (BATCH_SIZZE, 128, 128), (BATCH_SIZZE, 256, 256), (BATCH_SIZZE, 512, 512), (BATCH_SIZZE, 512, 512)] "use when style_image.shape != (256,256,3) # ### unit tests # static features: features = VGG(generated_batch) check loss caclulations and loss weights # for x,y,weights in BATCH_xy_Dataset255_with_features.take(1): # show(x, domain=None, w=128) # generated = TransformerNetwork(x) # show(generated, domain=None, w=128) # print("features") # UNIT_TEST_inspect_vgg_features(features) # print("y_true") # UNIT_TEST_inspect_vgg_features(y) returns generator with weights(x_train, y_true, weights) # # # return as tuple( tensor, tuple) or tuple( tensor, ...) # must FLATTEN to tuple( tensor, ...) # FLAT tuple(tensor x6) OK # ones # [OK] get_MEAN_mse_loss() gives correct loss value of FEATURE_WEIGHTS # force loss = 1. # _ = [print( v[0,14:15,14, ...].numpy()) for v in y_pred] # for x,y,w in BATCH_xy_Dataset255_with_features.take(1): # print("check", [v[0].numpy() for v in w]) # CONFIRMED, TESTED OK # generator returns (x,y,w) # losses fed individually, without weights. # loss = [loss(x,y)*w, for x,y,w in zip(y_true, y_pred, weights)] # NOT working
| 2.373272
| 2
|
manage.py
|
xflows/textflows
| 18
|
6628829
|
<reponame>xflows/textflows
#!/usr/bin/env python
# Copyright (C) <2014> <NAME>
import os, sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mothra.settings')
from django.core.management import execute_from_command_line
from django.conf import settings
if hasattr(settings, "LATINO_BIN_PATH"):
import clr
sys.path.append(settings.LATINO_BIN_PATH)
import LatinoInterfaces
execute_from_command_line(sys.argv)
|
#!/usr/bin/env python
# Copyright (C) <2014> <NAME>
import os, sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mothra.settings')
from django.core.management import execute_from_command_line
from django.conf import settings
if hasattr(settings, "LATINO_BIN_PATH"):
import clr
sys.path.append(settings.LATINO_BIN_PATH)
import LatinoInterfaces
execute_from_command_line(sys.argv)
|
en
| 0.193721
|
#!/usr/bin/env python # Copyright (C) <2014> <NAME>
| 1.620585
| 2
|
config/traces.py
|
JRPan/gltracesim
| 11
|
6628830
|
class Trace:
def __init__(self, name, input = None, fast_forward = 0, sim_end = 1E6, trace=False):
self.name = name
self.input = input
self.fast_forward = fast_forward
self.sim_end = sim_end
self.trace = trace
traces = [
Trace(
name="telemetry_youtube",
input="youtube.gltrace",
fast_forward=0,
sim_end=177
),
Trace(
name="telemetry_wikipedia",
input="wikipedia.gltrace",
fast_forward=0,
sim_end=378
),
Trace(
name="telemetry_twitter",
input="twitter.gltrace",
fast_forward=0,
sim_end=286
),
Trace(
name="telemetry_techcrunch",
input="techcrunch.gltrace",
fast_forward=0,
sim_end=488
),
Trace(
name="telemetry_sports_yahoo",
input="sports_yahoo.gltrace",
fast_forward=0,
sim_end=398
),
Trace(
name="telemetry_reddit",
input="reddit.gltrace",
fast_forward=0,
sim_end=165
),
Trace(
name="telemetry_news_yahoo",
input="news_yahoo.gltrace",
fast_forward=0,
sim_end=229
),
Trace(
name="telemetry_google",
input="google.gltrace",
fast_forward=0,
sim_end=165
),
Trace(
name="telemetry_facebook",
input="facebook.gltrace",
fast_forward=0,
sim_end=237
),
Trace(
name="telemetry_ebay",
input="ebay.gltrace",
fast_forward=0,
sim_end=276
),
Trace(
name="telemetry_cnn",
input="cnn.gltrace",
fast_forward=0,
sim_end=455
),
Trace(
name="telemetry_booking",
input="booking.gltrace",
fast_forward=0,
sim_end=417
),
Trace(
name="telemetry_answers_yahoo",
input="answers_yahoo.gltrace",
fast_forward=0,
sim_end=195
),
Trace(
name="telemetry_amazon",
input="amazon.gltrace",
fast_forward=0,
sim_end=309
),
Trace(
name="ue4_tappy_chicken",
input="ue4-tappy-chicken.gltrace",
fast_forward=20,
sim_end=5000
),
Trace(
name="pts_tesseract_1.1.0_1920x1200",
input="pts-tesseract-1.1.0-1920x1200.gltrace",
fast_forward=410,
sim_end=906
),
Trace(
name="pts_unigine_heaven_1.6.2_1920x1200",
input="pts-unigine-heaven-1.6.2-1920x1200.gltrace",
fast_forward=110,
sim_end=161
),
Trace(
name="pts_unigine_valley_1.1.4_1920x1200",
input="pts-unigine-valley-1.1.4-1920x1200.gltrace",
fast_forward=110,
sim_end=163
),
Trace(
name="pts_openarena_1.5.3_1920x1200",
input="pts-openarena-1.5.3-1920x1200.gltrace",
fast_forward=45,
sim_end=1148
),
Trace(
name="pts_xonotic_1.4.0_1920x1200_LD",
input="pts-xonotic-1.4.0-1920x1200-Low.gltrace",
fast_forward=65,
sim_end=3475
),
Trace(
name="pts_xonotic_1.4.0_1920x1200_UD",
input="pts-xonotic-1.4.0-1920x1200-Ultimate.gltrace",
fast_forward=65,
sim_end=1116
),
Trace(
name="gfxbench_manhattan_1920x1200",
input="gfxbench-manhattan-1920x1200.gltrace",
fast_forward=0,
sim_end=1079
),
Trace(
name="gfxbench_trex_1920x1200",
input="gfxbench-trex-1920x1200.gltrace",
fast_forward=0,
sim_end=1507
),
]
|
class Trace:
def __init__(self, name, input = None, fast_forward = 0, sim_end = 1E6, trace=False):
self.name = name
self.input = input
self.fast_forward = fast_forward
self.sim_end = sim_end
self.trace = trace
traces = [
Trace(
name="telemetry_youtube",
input="youtube.gltrace",
fast_forward=0,
sim_end=177
),
Trace(
name="telemetry_wikipedia",
input="wikipedia.gltrace",
fast_forward=0,
sim_end=378
),
Trace(
name="telemetry_twitter",
input="twitter.gltrace",
fast_forward=0,
sim_end=286
),
Trace(
name="telemetry_techcrunch",
input="techcrunch.gltrace",
fast_forward=0,
sim_end=488
),
Trace(
name="telemetry_sports_yahoo",
input="sports_yahoo.gltrace",
fast_forward=0,
sim_end=398
),
Trace(
name="telemetry_reddit",
input="reddit.gltrace",
fast_forward=0,
sim_end=165
),
Trace(
name="telemetry_news_yahoo",
input="news_yahoo.gltrace",
fast_forward=0,
sim_end=229
),
Trace(
name="telemetry_google",
input="google.gltrace",
fast_forward=0,
sim_end=165
),
Trace(
name="telemetry_facebook",
input="facebook.gltrace",
fast_forward=0,
sim_end=237
),
Trace(
name="telemetry_ebay",
input="ebay.gltrace",
fast_forward=0,
sim_end=276
),
Trace(
name="telemetry_cnn",
input="cnn.gltrace",
fast_forward=0,
sim_end=455
),
Trace(
name="telemetry_booking",
input="booking.gltrace",
fast_forward=0,
sim_end=417
),
Trace(
name="telemetry_answers_yahoo",
input="answers_yahoo.gltrace",
fast_forward=0,
sim_end=195
),
Trace(
name="telemetry_amazon",
input="amazon.gltrace",
fast_forward=0,
sim_end=309
),
Trace(
name="ue4_tappy_chicken",
input="ue4-tappy-chicken.gltrace",
fast_forward=20,
sim_end=5000
),
Trace(
name="pts_tesseract_1.1.0_1920x1200",
input="pts-tesseract-1.1.0-1920x1200.gltrace",
fast_forward=410,
sim_end=906
),
Trace(
name="pts_unigine_heaven_1.6.2_1920x1200",
input="pts-unigine-heaven-1.6.2-1920x1200.gltrace",
fast_forward=110,
sim_end=161
),
Trace(
name="pts_unigine_valley_1.1.4_1920x1200",
input="pts-unigine-valley-1.1.4-1920x1200.gltrace",
fast_forward=110,
sim_end=163
),
Trace(
name="pts_openarena_1.5.3_1920x1200",
input="pts-openarena-1.5.3-1920x1200.gltrace",
fast_forward=45,
sim_end=1148
),
Trace(
name="pts_xonotic_1.4.0_1920x1200_LD",
input="pts-xonotic-1.4.0-1920x1200-Low.gltrace",
fast_forward=65,
sim_end=3475
),
Trace(
name="pts_xonotic_1.4.0_1920x1200_UD",
input="pts-xonotic-1.4.0-1920x1200-Ultimate.gltrace",
fast_forward=65,
sim_end=1116
),
Trace(
name="gfxbench_manhattan_1920x1200",
input="gfxbench-manhattan-1920x1200.gltrace",
fast_forward=0,
sim_end=1079
),
Trace(
name="gfxbench_trex_1920x1200",
input="gfxbench-trex-1920x1200.gltrace",
fast_forward=0,
sim_end=1507
),
]
|
none
| 1
| 2.443939
| 2
|
|
mapper/getCLIPscores.py
|
Dokhyam/StyleCLIP
| 0
|
6628831
|
import os
import torch
from models.stylegan2.model import Generator
directions_path = '/disk1/dokhyam/StyleCLIP/directions/'
image_latents = torch.load('/disk1/dokhyam/StyleCLIP/mapper/latent_data/train_faces.pt')
directions_list = os.listdir(directions_path)
image_ind = 0
StyleGANGenerator = Generator(1024,512,8)
for d in directions_list:
input_batch = image_latents[i,:]
input_cuda = input_batch.cuda().float()
I_1 = StyleGANGenerator(input_cuda)
|
import os
import torch
from models.stylegan2.model import Generator
directions_path = '/disk1/dokhyam/StyleCLIP/directions/'
image_latents = torch.load('/disk1/dokhyam/StyleCLIP/mapper/latent_data/train_faces.pt')
directions_list = os.listdir(directions_path)
image_ind = 0
StyleGANGenerator = Generator(1024,512,8)
for d in directions_list:
input_batch = image_latents[i,:]
input_cuda = input_batch.cuda().float()
I_1 = StyleGANGenerator(input_cuda)
|
none
| 1
| 2.051907
| 2
|
|
src/pymcprotocol/type4e.py
|
tyaro/pymcprotocol
| 0
|
6628832
|
"""This file implements mcprotocol 4E type communication.
"""
import re
import socket
from . import mcprotocolerror
from . import mcprotocolconst as const
from .type3e import Type3E
class Type4E(Type3E):
"""mcprotocol 4E communication class.
Type 4e is almost same to Type 3E. Difference is only subheader.
So, Changed self.subhear and self._make_senddata()
Arributes:
subheader(int): Subheader for mc protocol
subheaderserial(int): Subheader serial for mc protocol to identify client
"""
subheader = 0x5400
subheaderserial = 0X0000
def set_subheaderserial(self, subheaderserial):
"""Change subheader serial
Args:
subheaderserial(int): Subheader serial to change
"""
if(0 <= subheaderserial <= 65535):
self.subheaderserial = subheaderserial
else:
raise ValueError("subheaderserial must be 0 <= subheaderserial <= 65535")
return None
def _get_answerdata_index(self):
"""Get answer data index from return data byte.
4e type's data index is defferent from 3e type's.
"""
if self.commtype == const.COMMTYPE_BINARY:
return 15
else:
return 30
def _get_answerstatus_index(self):
"""Get command status index from return data byte.
"""
if self.commtype == const.COMMTYPE_BINARY:
return 13
else:
return 26
def _make_senddata(self, requestdata):
"""Makes send mc protorocl data.
Args:
requestdata(bytes): mc protocol request data.
data must be converted according to self.commtype
Returns:
mc_data(bytes): send mc protorocl data
"""
mc_data = bytes()
# subheader is big endian
if self.commtype == const.COMMTYPE_BINARY:
mc_data += self.subheader.to_bytes(2, "big")
else:
mc_data += format(self.subheader, "x").ljust(4, "0").upper().encode()
mc_data += self._encode_value(self.subheaderserial, "short")
mc_data += self._encode_value(0, "short")
mc_data += self._encode_value(self.network, "byte")
mc_data += self._encode_value(self.pc, "byte")
mc_data += self._encode_value(self.dest_moduleio, "short")
mc_data += self._encode_value(self.dest_modulesta, "byte")
#add self.timer size
mc_data += self._encode_value(self._wordsize + len(requestdata), "short")
mc_data += self._encode_value(self.timer, "short")
mc_data += requestdata
return mc_data
|
"""This file implements mcprotocol 4E type communication.
"""
import re
import socket
from . import mcprotocolerror
from . import mcprotocolconst as const
from .type3e import Type3E
class Type4E(Type3E):
"""mcprotocol 4E communication class.
Type 4e is almost same to Type 3E. Difference is only subheader.
So, Changed self.subhear and self._make_senddata()
Arributes:
subheader(int): Subheader for mc protocol
subheaderserial(int): Subheader serial for mc protocol to identify client
"""
subheader = 0x5400
subheaderserial = 0X0000
def set_subheaderserial(self, subheaderserial):
"""Change subheader serial
Args:
subheaderserial(int): Subheader serial to change
"""
if(0 <= subheaderserial <= 65535):
self.subheaderserial = subheaderserial
else:
raise ValueError("subheaderserial must be 0 <= subheaderserial <= 65535")
return None
def _get_answerdata_index(self):
"""Get answer data index from return data byte.
4e type's data index is defferent from 3e type's.
"""
if self.commtype == const.COMMTYPE_BINARY:
return 15
else:
return 30
def _get_answerstatus_index(self):
"""Get command status index from return data byte.
"""
if self.commtype == const.COMMTYPE_BINARY:
return 13
else:
return 26
def _make_senddata(self, requestdata):
"""Makes send mc protorocl data.
Args:
requestdata(bytes): mc protocol request data.
data must be converted according to self.commtype
Returns:
mc_data(bytes): send mc protorocl data
"""
mc_data = bytes()
# subheader is big endian
if self.commtype == const.COMMTYPE_BINARY:
mc_data += self.subheader.to_bytes(2, "big")
else:
mc_data += format(self.subheader, "x").ljust(4, "0").upper().encode()
mc_data += self._encode_value(self.subheaderserial, "short")
mc_data += self._encode_value(0, "short")
mc_data += self._encode_value(self.network, "byte")
mc_data += self._encode_value(self.pc, "byte")
mc_data += self._encode_value(self.dest_moduleio, "short")
mc_data += self._encode_value(self.dest_modulesta, "byte")
#add self.timer size
mc_data += self._encode_value(self._wordsize + len(requestdata), "short")
mc_data += self._encode_value(self.timer, "short")
mc_data += requestdata
return mc_data
|
en
| 0.479408
|
This file implements mcprotocol 4E type communication. mcprotocol 4E communication class. Type 4e is almost same to Type 3E. Difference is only subheader. So, Changed self.subhear and self._make_senddata() Arributes: subheader(int): Subheader for mc protocol subheaderserial(int): Subheader serial for mc protocol to identify client Change subheader serial Args: subheaderserial(int): Subheader serial to change Get answer data index from return data byte. 4e type's data index is defferent from 3e type's. Get command status index from return data byte. Makes send mc protorocl data. Args: requestdata(bytes): mc protocol request data. data must be converted according to self.commtype Returns: mc_data(bytes): send mc protorocl data # subheader is big endian #add self.timer size
| 2.904643
| 3
|
UNET/nets.py
|
zz00zws/magic_learning
| 1
|
6628833
|
import torch.nn as nn
import torch
import cfg
device = torch.device("cpu" if torch.cuda.is_available() else "cpu")
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class conv(nn.Module):
def __init__(self,x,y,z,p=0,s=1,b=True):
super().__init__()
self.block1=nn.Sequential(
nn.Conv2d(x,y,z,padding=p,stride=s,bias=b),
nn.BatchNorm2d(y),
nn.LeakyReLU(negative_slope=0.3, inplace=True)
)
def forward(self,y):
return self.block1(y)
class conv2(nn.Module):
def __init__(self,x,y,b=True):
super().__init__()
self.block1=nn.Sequential(
conv(x,y,3,1,1,b),
conv(y,y,3,1,1,b)
)
self.dout=conv(y,y,3,1,2,b)
self.cout=conv(y,y,3,1,1,b)
def forward(self,y):
y=self.block1(y)
return self.dout(y),self.cout(y)
class up(nn.Module):
def __init__(self,x,y,p=0,s=1,b=True):
super().__init__()
self.block1=nn.Sequential(
conv(x,2*y,3,1,1,b),
conv(2*y,2*y,3,1,1,b),
nn.ConvTranspose2d(2*y,y,3,2,1,1)
)
def forward(self,y):
return self.block1(y)
class unet(nn.Module):
def __init__(self):
super().__init__()
self.d1=conv2(1,64)
self.d2=conv2(64,128)
self.d3=conv2(128,256)
self.d4=conv2(256,512)
self.dd=nn.Sequential(
conv(512,1024,3,1,1),
conv(1024,1024,3,1,1),
nn.ConvTranspose2d(1024,512,3,2,1,1)
)
self.u4=up(1024,256)
self.u3=up(512,128)
self.u2=up(256,64)
self.u1=nn.Sequential(
conv(128,64,3,1,1),
conv(64,64,3,1,1),
conv(64,cfg.class_num,3,1,1),
nn.Sigmoid()
)
def forward(self,y):
d1,c1=self.d1(y)
d2,c2=self.d2(d1)
d3,c3=self.d3(d2)
d4,c4=self.d4(d3)
dd=self.dd(d4)
u4=torch.cat((c4,dd),1)
u4=self.u4(u4)
u3=torch.cat((c3,u4),1)
u3=self.u3(u3)
u2=torch.cat((c2,u3),1)
u2=self.u2(u2)
u1=torch.cat((c1,u2),1)
u1=self.u1(u1)
u1=u1.permute(0,2,3,1)
return u1
if __name__ == '__main__':
net=unet().to(device)
z=torch.randn(1,1,512,512).to(device)
z=net(z)
# net2=fc()
# z=torch.randn(5,256)
# b=net2(z)
print(z.size())
|
import torch.nn as nn
import torch
import cfg
device = torch.device("cpu" if torch.cuda.is_available() else "cpu")
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class conv(nn.Module):
def __init__(self,x,y,z,p=0,s=1,b=True):
super().__init__()
self.block1=nn.Sequential(
nn.Conv2d(x,y,z,padding=p,stride=s,bias=b),
nn.BatchNorm2d(y),
nn.LeakyReLU(negative_slope=0.3, inplace=True)
)
def forward(self,y):
return self.block1(y)
class conv2(nn.Module):
def __init__(self,x,y,b=True):
super().__init__()
self.block1=nn.Sequential(
conv(x,y,3,1,1,b),
conv(y,y,3,1,1,b)
)
self.dout=conv(y,y,3,1,2,b)
self.cout=conv(y,y,3,1,1,b)
def forward(self,y):
y=self.block1(y)
return self.dout(y),self.cout(y)
class up(nn.Module):
def __init__(self,x,y,p=0,s=1,b=True):
super().__init__()
self.block1=nn.Sequential(
conv(x,2*y,3,1,1,b),
conv(2*y,2*y,3,1,1,b),
nn.ConvTranspose2d(2*y,y,3,2,1,1)
)
def forward(self,y):
return self.block1(y)
class unet(nn.Module):
def __init__(self):
super().__init__()
self.d1=conv2(1,64)
self.d2=conv2(64,128)
self.d3=conv2(128,256)
self.d4=conv2(256,512)
self.dd=nn.Sequential(
conv(512,1024,3,1,1),
conv(1024,1024,3,1,1),
nn.ConvTranspose2d(1024,512,3,2,1,1)
)
self.u4=up(1024,256)
self.u3=up(512,128)
self.u2=up(256,64)
self.u1=nn.Sequential(
conv(128,64,3,1,1),
conv(64,64,3,1,1),
conv(64,cfg.class_num,3,1,1),
nn.Sigmoid()
)
def forward(self,y):
d1,c1=self.d1(y)
d2,c2=self.d2(d1)
d3,c3=self.d3(d2)
d4,c4=self.d4(d3)
dd=self.dd(d4)
u4=torch.cat((c4,dd),1)
u4=self.u4(u4)
u3=torch.cat((c3,u4),1)
u3=self.u3(u3)
u2=torch.cat((c2,u3),1)
u2=self.u2(u2)
u1=torch.cat((c1,u2),1)
u1=self.u1(u1)
u1=u1.permute(0,2,3,1)
return u1
if __name__ == '__main__':
net=unet().to(device)
z=torch.randn(1,1,512,512).to(device)
z=net(z)
# net2=fc()
# z=torch.randn(5,256)
# b=net2(z)
print(z.size())
|
en
| 0.311998
|
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # net2=fc() # z=torch.randn(5,256) # b=net2(z)
| 2.750445
| 3
|
corehq/apps/domain/forms.py
|
bglar/commcare-hq
| 1
|
6628834
|
<reponame>bglar/commcare-hq<gh_stars>1-10
import copy
import logging
from urlparse import urlparse, parse_qs
import dateutil
import re
import io
from PIL import Image
import uuid
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import UNUSABLE_PASSWORD
from corehq import privileges
from corehq.apps.accounting.exceptions import SubscriptionRenewalError
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.sms.phonenumbers_helper import parse_phone_number
from corehq.feature_previews import CALLCENTER
import settings
from django import forms
from crispy_forms.bootstrap import FormActions, StrictButton
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from django.core.urlresolvers import reverse
from django.forms.fields import (ChoiceField, CharField, BooleanField,
ImageField, DecimalField, IntegerField)
from django.forms.widgets import Select
from django.utils.encoding import smart_str
from django.contrib.auth.forms import PasswordResetForm
from django.utils.safestring import mark_safe
from django_countries.countries import COUNTRIES
from corehq.apps.accounting.models import BillingContactInfo, BillingAccountAdmin, SubscriptionAdjustmentMethod, Subscription, SoftwarePlanEdition
from corehq.apps.app_manager.models import Application, FormBase, ApplicationBase, get_apps_in_domain
from corehq.apps.domain.models import (LOGO_ATTACHMENT, LICENSES, DATA_DICT,
AREA_CHOICES, SUB_AREA_CHOICES, Domain)
from corehq.apps.reminders.models import CaseReminderHandler
from corehq.apps.users.models import WebUser, CommCareUser
from corehq.apps.groups.models import Group
from dimagi.utils.django.email import send_HTML_email
from dimagi.utils.timezones.fields import TimeZoneField
from dimagi.utils.timezones.forms import TimeZoneChoiceField
from django.template.loader import render_to_string
from django.utils.translation import ugettext_noop, ugettext as _
from corehq.apps.style.forms.widgets import BootstrapCheckboxInput, BootstrapDisabledInput
# used to resize uploaded custom logos, aspect ratio is preserved
LOGO_SIZE = (211, 32)
logger = logging.getLogger(__name__)
def tf_choices(true_txt, false_txt):
return (('false', false_txt), ('true', true_txt))
class SnapshotSettingsMixin(forms.Form):
project_type = CharField(label=ugettext_noop("Project Category"), required=False,
help_text=ugettext_noop("e.g. MCH, HIV, etc."))
class ProjectSettingsForm(forms.Form):
"""
Form for updating a user's project settings
"""
global_timezone = forms.CharField(
initial="UTC",
label="Project Timezone",
widget=BootstrapDisabledInput(attrs={'class': 'input-xlarge'}))
override_global_tz = forms.BooleanField(
initial=False,
required=False,
label="",
widget=BootstrapCheckboxInput(
attrs={'data-bind': 'checked: override_tz, event: {change: updateForm}'},
inline_label=ugettext_noop("Override project's timezone setting just for me.")))
user_timezone = TimeZoneChoiceField(
label="My Timezone",
initial=global_timezone.initial,
widget=forms.Select(attrs={'class': 'input-xlarge', 'bindparent': 'visible: override_tz',
'data-bind': 'event: {change: updateForm}'}))
def clean_user_timezone(self):
data = self.cleaned_data['user_timezone']
timezone_field = TimeZoneField()
timezone_field.run_validators(data)
return smart_str(data)
def save(self, user, domain):
try:
timezone = self.cleaned_data['global_timezone']
override = self.cleaned_data['override_global_tz']
if override:
timezone = self.cleaned_data['user_timezone']
dm = user.get_domain_membership(domain)
dm.timezone = timezone
dm.override_global_tz = override
user.save()
return True
except Exception:
return False
class SnapshotApplicationForm(forms.Form):
publish = BooleanField(label=ugettext_noop("Publish?"), required=False)
name = CharField(label=ugettext_noop("Name"), required=True)
description = CharField(label=ugettext_noop("Description"), required=False, widget=forms.Textarea,
help_text=ugettext_noop("A detailed technical description of the application"))
deployment_date = CharField(label=ugettext_noop("Deployment date"), required=False)
phone_model = CharField(label=ugettext_noop("Phone model"), required=False)
user_type = CharField(label=ugettext_noop("User type"), required=False,
help_text=ugettext_noop("e.g. CHW, ASHA, RA, etc"))
attribution_notes = CharField(label=ugettext_noop("Attribution notes"), required=False,
help_text=ugettext_noop("Enter any special instructions to users here. This will be shown just before users copy your project."), widget=forms.Textarea)
def __init__(self, *args, **kwargs):
super(SnapshotApplicationForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = [
'publish',
'name',
'description',
'deployment_date',
'phone_model',
'user_type',
'attribution_notes'
]
class SnapshotFixtureForm(forms.Form):
publish = BooleanField(label=ugettext_noop("Publish?"), required=False)
description = CharField(label=ugettext_noop("Description"), required=False, widget=forms.Textarea,
help_text=ugettext_noop("A detailed technical description of the table"))
def __init__(self, *args, **kwargs):
super(SnapshotFixtureForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = [
'publish',
'description',
]
class SnapshotSettingsForm(SnapshotSettingsMixin):
title = CharField(label=ugettext_noop("Title"), required=True, max_length=100)
project_type = CharField(label=ugettext_noop("Project Category"), required=True,
help_text=ugettext_noop("e.g. MCH, HIV, etc."))
license = ChoiceField(label=ugettext_noop("License"), required=True, choices=LICENSES.items(),
widget=Select(attrs={'class': 'input-xxlarge'}))
description = CharField(label=ugettext_noop("Long Description"), required=False, widget=forms.Textarea,
help_text=ugettext_noop("A high-level overview of your project as a whole"))
short_description = CharField(label=ugettext_noop("Short Description"), required=False,
widget=forms.Textarea(attrs={'maxlength': 200}),
help_text=ugettext_noop("A brief description of your project (max. 200 characters)"))
share_multimedia = BooleanField(label=ugettext_noop("Share all multimedia?"), required=False,
help_text=ugettext_noop("This will allow any user to see and use all multimedia in this project"))
share_reminders = BooleanField(label=ugettext_noop("Share Reminders?"), required=False,
help_text=ugettext_noop("This will publish reminders along with this project"))
image = forms.ImageField(label=ugettext_noop("Exchange image"), required=False,
help_text=ugettext_noop("An optional image to show other users your logo or what your app looks like"))
video = CharField(label=ugettext_noop("Youtube Video"), required=False,
help_text=ugettext_noop("An optional youtube clip to tell users about your app. Please copy and paste a URL to a youtube video"))
cda_confirmed = BooleanField(required=False, label=ugettext_noop("Content Distribution Agreement"))
def __init__(self, *args, **kw):
super(SnapshotSettingsForm, self).__init__(*args, **kw)
self.fields.keyOrder = [
'title',
'short_description',
'description',
'project_type',
'image',
'video',
'share_multimedia',
'share_reminders',
'license',
'cda_confirmed',]
self.fields['license'].help_text = \
render_to_string('domain/partials/license_explanations.html', {
'extra': _("All un-licensed multimedia files in "
"your project will be given this license")
})
self.fields['cda_confirmed'].help_text = \
render_to_string('domain/partials/cda_modal.html')
def clean_cda_confirmed(self):
data_cda = self.cleaned_data['cda_confirmed']
data_publish = self.data.get('publish_on_submit', "no") == "yes"
if data_publish and data_cda is False:
raise forms.ValidationError('You must agree to our Content Distribution Agreement to publish your project.')
return data_cda
def clean_video(self):
video = self.cleaned_data['video']
if not video:
return video
def video_id(value):
# http://stackoverflow.com/questions/4356538/how-can-i-extract-video-id-from-youtubes-link-in-python#answer-7936523
"""
Examples:
- http://youtu.be/SA2iWivDJiE
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
- http://www.youtube.com/embed/SA2iWivDJiE
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
"""
query = urlparse(value)
if query.hostname == 'youtu.be':
return query.path[1:]
if query.hostname in ('www.youtube.com', 'youtube.com'):
if query.path == '/watch':
p = parse_qs(query.query)
return p['v'][0]
if query.path[:7] == '/embed/':
return query.path.split('/')[2]
if query.path[:3] == '/v/':
return query.path.split('/')[2]
# fail?
return None
v_id = video_id(video)
if not v_id:
raise forms.ValidationError('This is not a correctly formatted youtube URL. Please use a different URL.')
return v_id
def clean(self):
cleaned_data = self.cleaned_data
sm = cleaned_data["share_multimedia"]
license = cleaned_data["license"]
app_ids = self._get_apps_to_publish()
if sm and license not in self.dom.most_restrictive_licenses(apps_to_check=app_ids):
license_choices = [LICENSES[l] for l in self.dom.most_restrictive_licenses(apps_to_check=app_ids)]
msg = render_to_string('domain/partials/restrictive_license.html', {'licenses': license_choices})
self._errors["license"] = self.error_class([msg])
del cleaned_data["license"]
sr = cleaned_data["share_reminders"]
if sr: # check that the forms referenced by the events in each reminders exist in the project
referenced_forms = CaseReminderHandler.get_referenced_forms(domain=self.dom.name)
if referenced_forms:
apps = [Application.get(app_id) for app_id in app_ids]
app_forms = [f.unique_id for forms in [app.get_forms() for app in apps] for f in forms]
nonexistent_forms = filter(lambda f: f not in app_forms, referenced_forms)
nonexistent_forms = [FormBase.get_form(f) for f in nonexistent_forms]
if nonexistent_forms:
msg = """
Your reminders reference forms that are not being published.
Make sure the following forms are being published: %s
""" % str([f.default_name() for f in nonexistent_forms]).strip('[]')
self._errors["share_reminders"] = self.error_class([msg])
return cleaned_data
def _get_apps_to_publish(self):
app_ids = []
for d, val in self.data.iteritems():
d = d.split('-')
if len(d) < 2:
continue
if d[1] == 'publish' and val == 'on':
app_ids.append(d[0])
return app_ids
########################################################################################################
class SubAreaMixin():
def clean_sub_area(self):
area = self.cleaned_data['area']
sub_area = self.cleaned_data['sub_area']
if sub_area:
if not area:
raise forms.ValidationError(_('You may not specify a sub area when the project has no specified area'))
else:
return None
sub_areas = []
for a in DATA_DICT["area"]:
if a["name"] == area:
sub_areas = a["sub_areas"]
if sub_area not in sub_areas:
raise forms.ValidationError(_('This is not a valid sub-area for the area %s') % area)
return sub_area
class DomainGlobalSettingsForm(forms.Form):
default_timezone = TimeZoneChoiceField(label=ugettext_noop("Default Timezone"), initial="UTC")
logo = ImageField(
label=_("Custom Logo"),
required=False,
help_text=_("Upload a custom image to display instead of the "
"CommCare HQ logo. It will be automatically resized to "
"a height of 32 pixels.")
)
delete_logo = BooleanField(
label=_("Delete Logo"),
required=False,
help_text=_("Delete your custom logo and use the standard one.")
)
call_center_enabled = BooleanField(
label=_("Call Center Application"),
required=False,
help_text=_("Call Center mode is a CommCareHQ module for managing "
"call center workflows. It is still under "
"active development. Do not enable for your domain unless "
"you're actively piloting it.")
)
call_center_case_owner = ChoiceField(
label=_("Call Center Case Owner"),
initial=None,
required=False,
help_text=_("Select the person who will be listed as the owner "
"of all cases created for call center users.")
)
call_center_case_type = CharField(
label=_("Call Center Case Type"),
required=False,
help_text=_("Enter the case type to be used for FLWs in call center apps")
)
secure_submissions = BooleanField(
label=_("Only accept secure submissions"),
required=False,
help_text=_("Turn this on to prevent others from impersonating your "
"mobile workers. To use, all of your deployed applications "
"must be using secure submissions."),
)
def __init__(self, *args, **kwargs):
domain = kwargs.pop('domain', None)
self.can_use_custom_logo = kwargs.pop('can_use_custom_logo', False)
super(DomainGlobalSettingsForm, self).__init__(*args, **kwargs)
if not self.can_use_custom_logo:
del self.fields['logo']
del self.fields['delete_logo']
if domain:
if not CALLCENTER.enabled(domain):
self.fields['call_center_enabled'].widget = forms.HiddenInput()
self.fields['call_center_case_owner'].widget = forms.HiddenInput()
self.fields['call_center_case_type'].widget = forms.HiddenInput()
else:
groups = Group.get_case_sharing_groups(domain)
users = CommCareUser.by_domain(domain)
call_center_user_choices = [
(user._id, user.raw_username + ' [user]') for user in users
]
call_center_group_choices = [
(group._id, group.name + ' [group]') for group in groups
]
self.fields["call_center_case_owner"].choices = \
[('', '')] + \
call_center_user_choices + \
call_center_group_choices
def clean_default_timezone(self):
data = self.cleaned_data['default_timezone']
timezone_field = TimeZoneField()
timezone_field.run_validators(data)
return smart_str(data)
def save(self, request, domain):
try:
if self.can_use_custom_logo:
logo = self.cleaned_data['logo']
if logo:
input_image = Image.open(io.BytesIO(logo.read()))
input_image.load()
input_image.thumbnail(LOGO_SIZE)
# had issues trying to use a BytesIO instead
tmpfilename = "/tmp/%s_%s" % (uuid.uuid4(), logo.name)
input_image.save(tmpfilename, 'PNG')
with open(tmpfilename) as tmpfile:
domain.put_attachment(tmpfile, name=LOGO_ATTACHMENT)
elif self.cleaned_data['delete_logo']:
domain.delete_attachment(LOGO_ATTACHMENT)
domain.call_center_config.enabled = self.cleaned_data.get('call_center_enabled', False)
if domain.call_center_config.enabled:
domain.internal.using_call_center = True
domain.call_center_config.case_owner_id = self.cleaned_data.get('call_center_case_owner', None)
domain.call_center_config.case_type = self.cleaned_data.get('call_center_case_type', None)
global_tz = self.cleaned_data['default_timezone']
if domain.default_timezone != global_tz:
domain.default_timezone = global_tz
users = WebUser.by_domain(domain.name)
users_to_save = []
for user in users:
dm = user.get_domain_membership(domain.name)
if not dm.override_global_tz and dm.timezone != global_tz:
dm.timezone = global_tz
users_to_save.append(user)
if users_to_save:
WebUser.bulk_save(users_to_save)
secure_submissions = self.cleaned_data.get(
'secure_submissions', False)
apps_to_save = []
if secure_submissions != domain.secure_submissions:
for app in get_apps_in_domain(domain.name):
if app.secure_submissions != secure_submissions:
app.secure_submissions = secure_submissions
apps_to_save.append(app)
domain.secure_submissions = secure_submissions
domain.save()
if apps_to_save:
ApplicationBase.bulk_save(apps_to_save)
return True
except Exception:
return False
class DomainMetadataForm(DomainGlobalSettingsForm, SnapshotSettingsMixin):
customer_type = ChoiceField(
label=_("Customer Type"),
choices=(('basic', _('Basic')),
('plus', _('Plus')),
('full', _('Full')))
)
is_test = ChoiceField(
label=_("Test Project"),
choices=(('true', _('Test')),
('false', _('Real')),
('none', _('Not Sure')))
)
commconnect_enabled = BooleanField(
label=_("CommConnect Enabled"),
required=False,
help_text=_("CommConnect is a CommCareHQ module for SMS messages, "
"reminders and data collection.")
)
survey_management_enabled = BooleanField(
label=_("Survey Management Enabled"),
required=False,
help_text=_("Survey Management is a CommCareHQ module for SMS and "
"Call Center based surveys for large samples. It is "
"under active development. Do not enable for your domain "
"unless you're piloting it.")
)
sms_case_registration_enabled = BooleanField(
label=_("Enable Case Registration Via SMS"),
required=False
)
sms_case_registration_type = CharField(
label=_("SMS Case Registration Type"),
required=False
)
sms_case_registration_owner_id = ChoiceField(
label=_("SMS Case Registration Owner"),
required=False,
choices=[]
)
sms_case_registration_user_id = ChoiceField(
label=_("SMS Case Registration Submitting User"),
required=False,
choices=[]
)
restrict_superusers = BooleanField(
label=_("Restrict Superuser Access"),
required=False,
help_text=_("If access to a domain is restricted only users added " +
"to the domain and staff members will have access.")
)
secure_submissions = BooleanField(
label=_("Only accept secure submissions"),
required=False,
help_text=_("Turn this on to prevent others from impersonating your "
"mobile workers. To use, all of your deployed applications "
"must be using secure submissions."),
)
cloudcare_releases = ChoiceField(
label=_("CloudCare should use"),
initial=None,
required=False,
choices=(
('stars', _('Latest starred version')),
('nostars', _('Highest numbered version (not recommended)')),
),
help_text=_("Choose whether CloudCare should use the latest "
"starred build or highest numbered build in your "
"application.")
)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
domain = kwargs.get('domain', None)
super(DomainMetadataForm, self).__init__(*args, **kwargs)
if not (user and user.is_staff):
self.fields['restrict_superusers'].widget = forms.HiddenInput()
project = Domain.get_by_name(domain)
if project.cloudcare_releases == 'default' or not domain_has_privilege(domain, privileges.CLOUDCARE):
# if the cloudcare_releases flag was just defaulted, don't bother showing
# this setting at all
self.fields['cloudcare_releases'].widget = forms.HiddenInput()
if domain is not None:
groups = Group.get_case_sharing_groups(domain)
users = CommCareUser.by_domain(domain)
domain_group_choices = [(group._id, group.name) for group in groups]
domain_user_choices = [(user._id, user.raw_username) for user in users]
domain_owner_choices = domain_group_choices + domain_user_choices
self.fields["sms_case_registration_owner_id"].choices = domain_owner_choices
self.fields["sms_case_registration_user_id"].choices = domain_user_choices
def _validate_sms_registration_field(self, field_name, error_msg):
value = self.cleaned_data.get(field_name)
if value is not None:
value = value.strip()
if self.cleaned_data.get("sms_case_registration_enabled", False):
if value is None or value == "":
raise forms.ValidationError(error_msg)
return value
def clean_sms_case_registration_type(self):
return self._validate_sms_registration_field("sms_case_registration_type", _("Please enter a default case type for cases that register themselves via sms."))
def clean_sms_case_registration_owner_id(self):
return self._validate_sms_registration_field("sms_case_registration_owner_id", _("Please enter a default owner for cases that register themselves via sms."))
def clean_sms_case_registration_user_id(self):
return self._validate_sms_registration_field("sms_case_registration_user_id", _("Please enter a default submitting user for cases that register themselves via sms."))
def save(self, request, domain):
res = DomainGlobalSettingsForm.save(self, request, domain)
if not res:
return False
try:
domain.project_type = self.cleaned_data['project_type']
domain.customer_type = self.cleaned_data['customer_type']
domain.is_test = self.cleaned_data['is_test']
domain.commconnect_enabled = self.cleaned_data.get(
'commconnect_enabled', False)
domain.survey_management_enabled = self.cleaned_data.get('survey_management_enabled', False)
domain.sms_case_registration_enabled = self.cleaned_data.get('sms_case_registration_enabled', False)
domain.sms_case_registration_type = self.cleaned_data.get('sms_case_registration_type')
domain.sms_case_registration_owner_id = self.cleaned_data.get('sms_case_registration_owner_id')
domain.sms_case_registration_user_id = self.cleaned_data.get('sms_case_registration_user_id')
domain.restrict_superusers = self.cleaned_data.get('restrict_superusers', False)
cloudcare_releases = self.cleaned_data.get('cloudcare_releases')
if cloudcare_releases and domain.cloudcare_releases != 'default':
# you're never allowed to change from default
domain.cloudcare_releases = cloudcare_releases
domain.save()
return True
except Exception, e:
logging.exception("couldn't save project settings - error is %s" % e)
return False
class DomainDeploymentForm(forms.Form):
city = CharField(label=ugettext_noop("City"), required=False)
countries = forms.MultipleChoiceField(label=ugettext_noop("Countries"),
choices=COUNTRIES)
region = CharField(label=ugettext_noop("Region"), required=False,
help_text=ugettext_noop("e.g. US, LAC, SA, Sub-Saharan Africa, Southeast Asia, etc."))
deployment_date = CharField(label=ugettext_noop("Deployment date"), required=False)
description = CharField(label=ugettext_noop("Description"), required=False, widget=forms.Textarea)
public = ChoiceField(label=ugettext_noop("Make Public?"), choices=tf_choices('Yes', 'No'), required=False)
def save(self, domain):
try:
domain.update_deployment(city=self.cleaned_data['city'],
countries=self.cleaned_data['countries'],
region=self.cleaned_data['region'],
date=dateutil.parser.parse(self.cleaned_data['deployment_date']),
description=self.cleaned_data['description'],
public=(self.cleaned_data['public'] == 'true'))
return True
except Exception:
return False
def tuple_of_copies(a_list, blank=True):
ret = [(item, item) for item in a_list]
if blank:
ret.insert(0, ('', '---'))
return tuple(ret)
class DomainInternalForm(forms.Form, SubAreaMixin):
sf_contract_id = CharField(label=ugettext_noop("Salesforce Contract ID"), required=False)
sf_account_id = CharField(label=ugettext_noop("Salesforce Account ID"), required=False)
commcare_edition = ChoiceField(label=ugettext_noop("CommCare Plan"), initial="community", required=False,
choices=tuple([(p, p) for p in
["community", "standard", "pro", "advanced", "enterprise"]]))
services = ChoiceField(label=ugettext_noop("Services"), required=False,
choices=tuple_of_copies(["basic", "plus", "full", "custom"]))
initiative = forms.MultipleChoiceField(label=ugettext_noop("Initiative"), widget=forms.CheckboxSelectMultiple(),
choices=tuple_of_copies(DATA_DICT["initiatives"], blank=False), required=False)
workshop_region = CharField(label=ugettext_noop("Workshop Region"), required=False,
help_text=ugettext_noop("e.g. US, LAC, SA, Sub-Saharan Africa, Southeast Asia, etc."))
project_state = ChoiceField(label=ugettext_noop("Project State"), required=False,
choices=tuple_of_copies(["POC", "transition", "at-scale"]))
self_started = ChoiceField(label=ugettext_noop("Self Started?"), choices=tf_choices('Yes', 'No'), required=False)
area = ChoiceField(label=ugettext_noop("Sector"), required=False, choices=tuple_of_copies(AREA_CHOICES))
sub_area = ChoiceField(label=ugettext_noop("Sub-Sector"), required=False, choices=tuple_of_copies(SUB_AREA_CHOICES))
using_adm = ChoiceField(label=ugettext_noop("Using ADM?"), choices=tf_choices('Yes', 'No'), required=False)
using_call_center = ChoiceField(label=ugettext_noop("Using Call Center?"), choices=tf_choices('Yes', 'No'), required=False)
organization_name = CharField(label=ugettext_noop("Organization Name"), required=False)
notes = CharField(label=ugettext_noop("Notes"), required=False, widget=forms.Textarea)
platform = forms.MultipleChoiceField(label=ugettext_noop("Platform"), widget=forms.CheckboxSelectMultiple(),
choices=tuple_of_copies(["java", "android", "cloudcare"], blank=False), required=False)
phone_model = CharField(label=ugettext_noop("Phone Model"), required=False)
project_manager = CharField(label=ugettext_noop("Project Manager's Email"), required=False)
goal_time_period = IntegerField(label=ugettext_noop("Goal time period (in days)"), required=False)
goal_followup_rate = DecimalField(label=ugettext_noop("Goal followup rate (percentage in decimal format. e.g. 70% is .7)"), required=False)
commtrack_domain = ChoiceField(label=ugettext_noop("CommTrack domain?"),
choices=tf_choices('Yes', 'No'), required=False)
def __init__(self, can_edit_eula, *args, **kwargs):
super(DomainInternalForm, self).__init__(*args, **kwargs)
self.can_edit_eula = can_edit_eula
if self.can_edit_eula:
self.fields['custom_eula'] = ChoiceField(
label=ugettext_noop("Custom Eula?"),
choices=tf_choices('Yes', 'No'),
required=False,
help_text='Set to "yes" if this project has a customized EULA as per their contract.'
)
self.fields['can_use_data'] = ChoiceField(
label=ugettext_noop("Can use project data?"),
choices=tf_choices('Yes', 'No'),
required=False,
help_text='Set to "no" if this project opts out of data usage. Defaults to "yes".'
)
def save(self, domain):
kwargs = {"workshop_region": self.cleaned_data["workshop_region"]} if self.cleaned_data["workshop_region"] else {}
if self.can_edit_eula:
kwargs['custom_eula'] = self.cleaned_data['custom_eula'] == 'true'
kwargs['can_use_data'] = self.cleaned_data['can_use_data'] == 'true'
domain.update_internal(sf_contract_id=self.cleaned_data['sf_contract_id'],
sf_account_id=self.cleaned_data['sf_account_id'],
commcare_edition=self.cleaned_data['commcare_edition'],
services=self.cleaned_data['services'],
initiative=self.cleaned_data['initiative'],
project_state=self.cleaned_data['project_state'],
self_started=self.cleaned_data['self_started'] == 'true',
area=self.cleaned_data['area'],
sub_area=self.cleaned_data['sub_area'],
using_adm=self.cleaned_data['using_adm'] == 'true',
using_call_center=self.cleaned_data['using_call_center'] == 'true',
organization_name=self.cleaned_data['organization_name'],
notes=self.cleaned_data['notes'],
platform=self.cleaned_data['platform'],
project_manager=self.cleaned_data['project_manager'],
phone_model=self.cleaned_data['phone_model'],
goal_time_period=self.cleaned_data['goal_time_period'],
goal_followup_rate=self.cleaned_data['goal_followup_rate'],
commtrack_domain=self.cleaned_data['commtrack_domain'] == 'true',
**kwargs
)
########################################################################################################
min_pwd = 4
max_pwd = 20
pwd_pattern = re.compile( r"([-\w]){" + str(min_pwd) + ',' + str(max_pwd) + '}' )
def clean_password(txt):
if len(txt) < min_pwd:
raise forms.ValidationError('Password is too short; must be at least %s characters' % min_pwd )
if len(txt) > max_pwd:
raise forms.ValidationError('Password is too long; must be less than %s characters' % max_pwd )
if not pwd_pattern.match(txt):
raise forms.ValidationError('Password may only contain letters, numbers, hyphens, and underscores')
return txt
class HQPasswordResetForm(PasswordResetForm):
"""
Modified from PasswordResetForm to filter only web users by default.
This prevents duplicate emails with linked commcare user accounts to the same email.
"""
def clean_email(self):
UserModel = get_user_model()
email = self.cleaned_data["email"]
matching_users = UserModel._default_manager.filter(username__iexact=email)
if matching_users.count():
self.users_cache = matching_users
else:
# revert to previous behavior to theoretically allow commcare users to create an account
self.users_cache = UserModel._default_manager.filter(email__iexact=email)
# below here is not modified from the superclass
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if not any(user.is_active for user in self.users_cache):
# none of the filtered users are active
raise forms.ValidationError(self.error_messages['unknown'])
if any((user.password == <PASSWORD>)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
class ConfidentialPasswordResetForm(HQPasswordResetForm):
def clean_email(self):
try:
return super(ConfidentialPasswordResetForm, self).clean_email()
except forms.ValidationError:
# The base class throws various emails that give away information about the user;
# we can pretend all is well since the save() method is safe for missing users.
return self.cleaned_data['email']
class EditBillingAccountInfoForm(forms.ModelForm):
billing_admins = forms.CharField(
required=False,
label=ugettext_noop("Other Billing Admins"),
help_text=ugettext_noop(mark_safe(
"<p>These are the Web Users that will be able to access and "
"modify your account's subscription and billing information.</p> "
"<p>Your logged in account is already a Billing Administrator."
"</p>"
)),
)
class Meta:
model = BillingContactInfo
fields = ['first_name', 'last_name', 'phone_number', 'emails', 'company_name', 'first_line',
'second_line', 'city', 'state_province_region', 'postal_code', 'country']
def __init__(self, account, domain, creating_user, data=None, *args, **kwargs):
self.account = account
self.domain = domain
self.creating_user = creating_user
try:
self.current_country = self.account.billingcontactinfo.country
except Exception:
initial = kwargs.get('initial')
self.current_country = initial.get('country') if initial is not None else None
try:
kwargs['instance'] = self.account.billingcontactinfo
except BillingContactInfo.DoesNotExist:
pass
super(EditBillingAccountInfoForm, self).__init__(data, *args, **kwargs)
other_admins = self.account.billing_admins.filter(
domain=self.domain).exclude(web_user=self.creating_user).all()
self.fields['billing_admins'].initial = ','.join([o.web_user for o in other_admins])
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_("Billing Administrators"),
crispy.Field('billing_admins', css_class='input-xxlarge'),
),
crispy.Fieldset(
_("Basic Information"),
'company_name',
'first_name',
'last_name',
crispy.Field('emails', css_class='input-xxlarge'),
'phone_number',
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large",
data_countryname=dict(COUNTRIES).get(self.current_country, '')),
),
FormActions(
StrictButton(
_("Update Billing Information"),
type="submit",
css_class='btn btn-primary',
),
),
)
def clean_billing_admins(self):
data = self.cleaned_data['billing_admins']
all_admins = data.split(',')
result = []
for admin in all_admins:
if admin and admin != u'':
result.append(BillingAccountAdmin.objects.get_or_create(
web_user=admin,
domain=self.domain,
)[0])
result.append(BillingAccountAdmin.objects.get_or_create(
web_user=self.creating_user,
domain=self.domain,
)[0])
return result
def clean_phone_number(self):
data = self.cleaned_data['phone_number']
parsed_number = None
if data:
for country in ["US", "GB", None]:
parsed_number = parse_phone_number(data, country, failhard=False)
if parsed_number is not None:
break
if parsed_number is None:
raise forms.ValidationError(_("It looks like this phone number is invalid. "
"Did you forget the country code?"))
return "+%s%s" % (parsed_number.country_code, parsed_number.national_number)
def save(self, commit=True):
billing_contact_info = super(EditBillingAccountInfoForm, self).save(commit=False)
billing_contact_info.account = self.account
billing_contact_info.save()
billing_admins = self.cleaned_data['billing_admins']
other_domain_admins = copy.copy(self.account.billing_admins.exclude(
domain=self.domain).all())
self.account.billing_admins.clear()
for other_admin in other_domain_admins:
self.account.billing_admins.add(other_admin)
for admin in billing_admins:
self.account.billing_admins.add(admin)
self.account.save()
return True
class ConfirmNewSubscriptionForm(EditBillingAccountInfoForm):
plan_edition = forms.CharField(
widget=forms.HiddenInput,
)
def __init__(self, account, domain, creating_user, plan_version, current_subscription, data=None, *args, **kwargs):
self.plan_version = plan_version
self.current_subscription = current_subscription
super(ConfirmNewSubscriptionForm, self).__init__(account, domain, creating_user, data=data, *args, **kwargs)
self.fields['plan_edition'].initial = self.plan_version.plan.edition
from corehq.apps.domain.views import DomainSubscriptionView
self.helper.layout = crispy.Layout(
'plan_edition',
crispy.Fieldset(
_("Billing Administrators"),
crispy.Field('billing_admins', css_class='input-xxlarge'),
),
crispy.Fieldset(
_("Basic Information"),
'company_name',
'first_name',
'last_name',
crispy.Field('emails', css_class='input-xxlarge'),
'phone_number',
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large",
data_countryname=dict(COUNTRIES).get(self.current_country, ''))
),
FormActions(
crispy.HTML('<a href="%(url)s" style="margin-right:5px;" class="btn">%(title)s</a>' % {
'url': reverse(DomainSubscriptionView.urlname, args=[self.domain]),
'title': _("Cancel"),
}),
StrictButton(
_("Subscribe to Plan"),
type="submit",
css_class='btn btn-success disable-on-submit-no-spinner add-spinner-on-click',
),
),
)
def save(self, commit=True):
account_save_success = super(ConfirmNewSubscriptionForm, self).save(commit=False)
if not account_save_success:
return False
try:
if self.current_subscription is not None:
if self.plan_version.plan.edition == SoftwarePlanEdition.COMMUNITY:
self.current_subscription.cancel_subscription(adjustment_method=SubscriptionAdjustmentMethod.USER,
web_user=self.creating_user)
else:
subscription = self.current_subscription.change_plan(
self.plan_version, web_user=self.creating_user, adjustment_method=SubscriptionAdjustmentMethod.USER
)
subscription.is_active = True
if subscription.plan_version.plan.edition == SoftwarePlanEdition.ENTERPRISE:
subscription.do_not_invoice = True
subscription.save()
else:
subscription = Subscription.new_domain_subscription(
self.account, self.domain, self.plan_version,
web_user=self.creating_user,
adjustment_method=SubscriptionAdjustmentMethod.USER)
subscription.is_active = True
if subscription.plan_version.plan.edition == SoftwarePlanEdition.ENTERPRISE:
# this point can only be reached if the initiating user was a superuser
subscription.do_not_invoice = True
subscription.save()
return True
except Exception:
logger.exception("There was an error subscribing the domain '%s' to plan '%s'. "
"Go quickly!" % (self.domain, self.plan_version.plan.name))
return False
class ConfirmSubscriptionRenewalForm(EditBillingAccountInfoForm):
plan_edition = forms.CharField(
widget=forms.HiddenInput,
)
confirm_legal = forms.BooleanField(
required=True,
)
def __init__(self, account, domain, creating_user, current_subscription,
renewed_version, data=None, *args, **kwargs):
self.current_subscription = current_subscription
super(ConfirmSubscriptionRenewalForm, self).__init__(
account, domain, creating_user, data=data, *args, **kwargs
)
self.fields['plan_edition'].initial = renewed_version.plan.edition
self.fields['confirm_legal'].label = mark_safe(ugettext_noop(
'I have read and agree to the <a href="%(pa_url)s" '
'target="_blank">Software Product Agreement</a>.'
) % {
'pa_url': reverse("product_agreement"),
})
from corehq.apps.domain.views import DomainSubscriptionView
self.helper.layout = crispy.Layout(
'plan_edition',
crispy.Fieldset(
_("Billing Administrators"),
crispy.Field('billing_admins', css_class='input-xxlarge'),
),
crispy.Fieldset(
_("Basic Information"),
'company_name',
'first_name',
'last_name',
crispy.Field('emails', css_class='input-xxlarge'),
'phone_number',
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large",
data_countryname=dict(COUNTRIES).get(self.current_country, ''))
),
crispy.Fieldset(
_("Re-Confirm Product Agreement"),
'confirm_legal',
),
FormActions(
crispy.HTML('<a href="%(url)s" style="margin-right:5px;" class="btn">%(title)s</a>' % {
'url': reverse(DomainSubscriptionView.urlname, args=[self.domain]),
'title': _("Cancel"),
}),
StrictButton(
_("Renew Plan"),
type="submit",
css_class='btn btn-success',
),
),
)
def save(self, commit=True):
account_save_success = super(ConfirmSubscriptionRenewalForm, self).save(commit=False)
if not account_save_success:
return False
try:
self.current_subscription.renew_subscription(
web_user=self.creating_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
)
except SubscriptionRenewalError as e:
logger.error("[BILLING] Subscription for %(domain)s failed to "
"renew due to: %(error)s." % {
'domain': self.domain,
'error': e,
})
return True
class ProBonoForm(forms.Form):
contact_email = forms.EmailField(label=_("Contact email"))
organization = forms.CharField(label=_("Organization"))
project_overview = forms.CharField(widget=forms.Textarea, label="Project overview")
pay_only_features_needed = forms.CharField(widget=forms.Textarea, label="Pay only features needed")
duration_of_project = forms.CharField(help_text=_(
"We grant pro-bono subscriptions to match the duration of your "
"project, up to a maximum of 12 months at a time (at which point "
"you need to reapply)."
))
domain = forms.CharField(label=_("Project Space"))
dimagi_contact = forms.CharField(
help_text=_("If you have already been in touch with someone from "
"Dimagi, please list their name."),
required=False)
num_expected_users = forms.CharField(label=_("Number of expected users"))
def __init__(self, use_domain_field, *args, **kwargs):
super(ProBonoForm, self).__init__(*args, **kwargs)
if not use_domain_field:
self.fields['domain'].required = False
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_('Pro-Bono Application'),
'contact_email',
'organization',
crispy.Div(
'domain',
style=('' if use_domain_field else 'display:none'),
),
'project_overview',
'pay_only_features_needed',
'duration_of_project',
'num_expected_users',
'dimagi_contact',
),
FormActions(
crispy.ButtonHolder(
crispy.Submit('submit_pro_bono', _('Submit Pro-Bono Application'))
)
),
)
def process_submission(self, domain=None):
try:
params = {
'pro_bono_form': self,
'domain': domain,
}
html_content = render_to_string("domain/email/pro_bono_application.html", params)
text_content = render_to_string("domain/email/pro_bono_application.txt", params)
recipient = settings.BILLING_EMAIL
subject = "[Pro-Bono Application]"
if domain is not None:
subject = "%s %s" % (subject, domain)
send_HTML_email(subject, recipient, html_content, text_content=text_content,
email_from=settings.DEFAULT_FROM_EMAIL)
except Exception:
logging.error("Couldn't send pro-bono application email. "
"Contact: %s" % self.cleaned_data['contact_email']
)
|
import copy
import logging
from urlparse import urlparse, parse_qs
import dateutil
import re
import io
from PIL import Image
import uuid
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import UNUSABLE_PASSWORD
from corehq import privileges
from corehq.apps.accounting.exceptions import SubscriptionRenewalError
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.sms.phonenumbers_helper import parse_phone_number
from corehq.feature_previews import CALLCENTER
import settings
from django import forms
from crispy_forms.bootstrap import FormActions, StrictButton
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from django.core.urlresolvers import reverse
from django.forms.fields import (ChoiceField, CharField, BooleanField,
ImageField, DecimalField, IntegerField)
from django.forms.widgets import Select
from django.utils.encoding import smart_str
from django.contrib.auth.forms import PasswordResetForm
from django.utils.safestring import mark_safe
from django_countries.countries import COUNTRIES
from corehq.apps.accounting.models import BillingContactInfo, BillingAccountAdmin, SubscriptionAdjustmentMethod, Subscription, SoftwarePlanEdition
from corehq.apps.app_manager.models import Application, FormBase, ApplicationBase, get_apps_in_domain
from corehq.apps.domain.models import (LOGO_ATTACHMENT, LICENSES, DATA_DICT,
AREA_CHOICES, SUB_AREA_CHOICES, Domain)
from corehq.apps.reminders.models import CaseReminderHandler
from corehq.apps.users.models import WebUser, CommCareUser
from corehq.apps.groups.models import Group
from dimagi.utils.django.email import send_HTML_email
from dimagi.utils.timezones.fields import TimeZoneField
from dimagi.utils.timezones.forms import TimeZoneChoiceField
from django.template.loader import render_to_string
from django.utils.translation import ugettext_noop, ugettext as _
from corehq.apps.style.forms.widgets import BootstrapCheckboxInput, BootstrapDisabledInput
# used to resize uploaded custom logos, aspect ratio is preserved
LOGO_SIZE = (211, 32)
logger = logging.getLogger(__name__)
def tf_choices(true_txt, false_txt):
return (('false', false_txt), ('true', true_txt))
class SnapshotSettingsMixin(forms.Form):
project_type = CharField(label=ugettext_noop("Project Category"), required=False,
help_text=ugettext_noop("e.g. MCH, HIV, etc."))
class ProjectSettingsForm(forms.Form):
"""
Form for updating a user's project settings
"""
global_timezone = forms.CharField(
initial="UTC",
label="Project Timezone",
widget=BootstrapDisabledInput(attrs={'class': 'input-xlarge'}))
override_global_tz = forms.BooleanField(
initial=False,
required=False,
label="",
widget=BootstrapCheckboxInput(
attrs={'data-bind': 'checked: override_tz, event: {change: updateForm}'},
inline_label=ugettext_noop("Override project's timezone setting just for me.")))
user_timezone = TimeZoneChoiceField(
label="My Timezone",
initial=global_timezone.initial,
widget=forms.Select(attrs={'class': 'input-xlarge', 'bindparent': 'visible: override_tz',
'data-bind': 'event: {change: updateForm}'}))
def clean_user_timezone(self):
data = self.cleaned_data['user_timezone']
timezone_field = TimeZoneField()
timezone_field.run_validators(data)
return smart_str(data)
def save(self, user, domain):
try:
timezone = self.cleaned_data['global_timezone']
override = self.cleaned_data['override_global_tz']
if override:
timezone = self.cleaned_data['user_timezone']
dm = user.get_domain_membership(domain)
dm.timezone = timezone
dm.override_global_tz = override
user.save()
return True
except Exception:
return False
class SnapshotApplicationForm(forms.Form):
publish = BooleanField(label=ugettext_noop("Publish?"), required=False)
name = CharField(label=ugettext_noop("Name"), required=True)
description = CharField(label=ugettext_noop("Description"), required=False, widget=forms.Textarea,
help_text=ugettext_noop("A detailed technical description of the application"))
deployment_date = CharField(label=ugettext_noop("Deployment date"), required=False)
phone_model = CharField(label=ugettext_noop("Phone model"), required=False)
user_type = CharField(label=ugettext_noop("User type"), required=False,
help_text=ugettext_noop("e.g. CHW, ASHA, RA, etc"))
attribution_notes = CharField(label=ugettext_noop("Attribution notes"), required=False,
help_text=ugettext_noop("Enter any special instructions to users here. This will be shown just before users copy your project."), widget=forms.Textarea)
def __init__(self, *args, **kwargs):
super(SnapshotApplicationForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = [
'publish',
'name',
'description',
'deployment_date',
'phone_model',
'user_type',
'attribution_notes'
]
class SnapshotFixtureForm(forms.Form):
publish = BooleanField(label=ugettext_noop("Publish?"), required=False)
description = CharField(label=ugettext_noop("Description"), required=False, widget=forms.Textarea,
help_text=ugettext_noop("A detailed technical description of the table"))
def __init__(self, *args, **kwargs):
super(SnapshotFixtureForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = [
'publish',
'description',
]
class SnapshotSettingsForm(SnapshotSettingsMixin):
title = CharField(label=ugettext_noop("Title"), required=True, max_length=100)
project_type = CharField(label=ugettext_noop("Project Category"), required=True,
help_text=ugettext_noop("e.g. MCH, HIV, etc."))
license = ChoiceField(label=ugettext_noop("License"), required=True, choices=LICENSES.items(),
widget=Select(attrs={'class': 'input-xxlarge'}))
description = CharField(label=ugettext_noop("Long Description"), required=False, widget=forms.Textarea,
help_text=ugettext_noop("A high-level overview of your project as a whole"))
short_description = CharField(label=ugettext_noop("Short Description"), required=False,
widget=forms.Textarea(attrs={'maxlength': 200}),
help_text=ugettext_noop("A brief description of your project (max. 200 characters)"))
share_multimedia = BooleanField(label=ugettext_noop("Share all multimedia?"), required=False,
help_text=ugettext_noop("This will allow any user to see and use all multimedia in this project"))
share_reminders = BooleanField(label=ugettext_noop("Share Reminders?"), required=False,
help_text=ugettext_noop("This will publish reminders along with this project"))
image = forms.ImageField(label=ugettext_noop("Exchange image"), required=False,
help_text=ugettext_noop("An optional image to show other users your logo or what your app looks like"))
video = CharField(label=ugettext_noop("Youtube Video"), required=False,
help_text=ugettext_noop("An optional youtube clip to tell users about your app. Please copy and paste a URL to a youtube video"))
cda_confirmed = BooleanField(required=False, label=ugettext_noop("Content Distribution Agreement"))
def __init__(self, *args, **kw):
super(SnapshotSettingsForm, self).__init__(*args, **kw)
self.fields.keyOrder = [
'title',
'short_description',
'description',
'project_type',
'image',
'video',
'share_multimedia',
'share_reminders',
'license',
'cda_confirmed',]
self.fields['license'].help_text = \
render_to_string('domain/partials/license_explanations.html', {
'extra': _("All un-licensed multimedia files in "
"your project will be given this license")
})
self.fields['cda_confirmed'].help_text = \
render_to_string('domain/partials/cda_modal.html')
def clean_cda_confirmed(self):
data_cda = self.cleaned_data['cda_confirmed']
data_publish = self.data.get('publish_on_submit', "no") == "yes"
if data_publish and data_cda is False:
raise forms.ValidationError('You must agree to our Content Distribution Agreement to publish your project.')
return data_cda
def clean_video(self):
video = self.cleaned_data['video']
if not video:
return video
def video_id(value):
# http://stackoverflow.com/questions/4356538/how-can-i-extract-video-id-from-youtubes-link-in-python#answer-7936523
"""
Examples:
- http://youtu.be/SA2iWivDJiE
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
- http://www.youtube.com/embed/SA2iWivDJiE
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
"""
query = urlparse(value)
if query.hostname == 'youtu.be':
return query.path[1:]
if query.hostname in ('www.youtube.com', 'youtube.com'):
if query.path == '/watch':
p = parse_qs(query.query)
return p['v'][0]
if query.path[:7] == '/embed/':
return query.path.split('/')[2]
if query.path[:3] == '/v/':
return query.path.split('/')[2]
# fail?
return None
v_id = video_id(video)
if not v_id:
raise forms.ValidationError('This is not a correctly formatted youtube URL. Please use a different URL.')
return v_id
def clean(self):
cleaned_data = self.cleaned_data
sm = cleaned_data["share_multimedia"]
license = cleaned_data["license"]
app_ids = self._get_apps_to_publish()
if sm and license not in self.dom.most_restrictive_licenses(apps_to_check=app_ids):
license_choices = [LICENSES[l] for l in self.dom.most_restrictive_licenses(apps_to_check=app_ids)]
msg = render_to_string('domain/partials/restrictive_license.html', {'licenses': license_choices})
self._errors["license"] = self.error_class([msg])
del cleaned_data["license"]
sr = cleaned_data["share_reminders"]
if sr: # check that the forms referenced by the events in each reminders exist in the project
referenced_forms = CaseReminderHandler.get_referenced_forms(domain=self.dom.name)
if referenced_forms:
apps = [Application.get(app_id) for app_id in app_ids]
app_forms = [f.unique_id for forms in [app.get_forms() for app in apps] for f in forms]
nonexistent_forms = filter(lambda f: f not in app_forms, referenced_forms)
nonexistent_forms = [FormBase.get_form(f) for f in nonexistent_forms]
if nonexistent_forms:
msg = """
Your reminders reference forms that are not being published.
Make sure the following forms are being published: %s
""" % str([f.default_name() for f in nonexistent_forms]).strip('[]')
self._errors["share_reminders"] = self.error_class([msg])
return cleaned_data
def _get_apps_to_publish(self):
app_ids = []
for d, val in self.data.iteritems():
d = d.split('-')
if len(d) < 2:
continue
if d[1] == 'publish' and val == 'on':
app_ids.append(d[0])
return app_ids
########################################################################################################
class SubAreaMixin():
def clean_sub_area(self):
area = self.cleaned_data['area']
sub_area = self.cleaned_data['sub_area']
if sub_area:
if not area:
raise forms.ValidationError(_('You may not specify a sub area when the project has no specified area'))
else:
return None
sub_areas = []
for a in DATA_DICT["area"]:
if a["name"] == area:
sub_areas = a["sub_areas"]
if sub_area not in sub_areas:
raise forms.ValidationError(_('This is not a valid sub-area for the area %s') % area)
return sub_area
class DomainGlobalSettingsForm(forms.Form):
default_timezone = TimeZoneChoiceField(label=ugettext_noop("Default Timezone"), initial="UTC")
logo = ImageField(
label=_("Custom Logo"),
required=False,
help_text=_("Upload a custom image to display instead of the "
"CommCare HQ logo. It will be automatically resized to "
"a height of 32 pixels.")
)
delete_logo = BooleanField(
label=_("Delete Logo"),
required=False,
help_text=_("Delete your custom logo and use the standard one.")
)
call_center_enabled = BooleanField(
label=_("Call Center Application"),
required=False,
help_text=_("Call Center mode is a CommCareHQ module for managing "
"call center workflows. It is still under "
"active development. Do not enable for your domain unless "
"you're actively piloting it.")
)
call_center_case_owner = ChoiceField(
label=_("Call Center Case Owner"),
initial=None,
required=False,
help_text=_("Select the person who will be listed as the owner "
"of all cases created for call center users.")
)
call_center_case_type = CharField(
label=_("Call Center Case Type"),
required=False,
help_text=_("Enter the case type to be used for FLWs in call center apps")
)
secure_submissions = BooleanField(
label=_("Only accept secure submissions"),
required=False,
help_text=_("Turn this on to prevent others from impersonating your "
"mobile workers. To use, all of your deployed applications "
"must be using secure submissions."),
)
def __init__(self, *args, **kwargs):
domain = kwargs.pop('domain', None)
self.can_use_custom_logo = kwargs.pop('can_use_custom_logo', False)
super(DomainGlobalSettingsForm, self).__init__(*args, **kwargs)
if not self.can_use_custom_logo:
del self.fields['logo']
del self.fields['delete_logo']
if domain:
if not CALLCENTER.enabled(domain):
self.fields['call_center_enabled'].widget = forms.HiddenInput()
self.fields['call_center_case_owner'].widget = forms.HiddenInput()
self.fields['call_center_case_type'].widget = forms.HiddenInput()
else:
groups = Group.get_case_sharing_groups(domain)
users = CommCareUser.by_domain(domain)
call_center_user_choices = [
(user._id, user.raw_username + ' [user]') for user in users
]
call_center_group_choices = [
(group._id, group.name + ' [group]') for group in groups
]
self.fields["call_center_case_owner"].choices = \
[('', '')] + \
call_center_user_choices + \
call_center_group_choices
def clean_default_timezone(self):
data = self.cleaned_data['default_timezone']
timezone_field = TimeZoneField()
timezone_field.run_validators(data)
return smart_str(data)
def save(self, request, domain):
try:
if self.can_use_custom_logo:
logo = self.cleaned_data['logo']
if logo:
input_image = Image.open(io.BytesIO(logo.read()))
input_image.load()
input_image.thumbnail(LOGO_SIZE)
# had issues trying to use a BytesIO instead
tmpfilename = "/tmp/%s_%s" % (uuid.uuid4(), logo.name)
input_image.save(tmpfilename, 'PNG')
with open(tmpfilename) as tmpfile:
domain.put_attachment(tmpfile, name=LOGO_ATTACHMENT)
elif self.cleaned_data['delete_logo']:
domain.delete_attachment(LOGO_ATTACHMENT)
domain.call_center_config.enabled = self.cleaned_data.get('call_center_enabled', False)
if domain.call_center_config.enabled:
domain.internal.using_call_center = True
domain.call_center_config.case_owner_id = self.cleaned_data.get('call_center_case_owner', None)
domain.call_center_config.case_type = self.cleaned_data.get('call_center_case_type', None)
global_tz = self.cleaned_data['default_timezone']
if domain.default_timezone != global_tz:
domain.default_timezone = global_tz
users = WebUser.by_domain(domain.name)
users_to_save = []
for user in users:
dm = user.get_domain_membership(domain.name)
if not dm.override_global_tz and dm.timezone != global_tz:
dm.timezone = global_tz
users_to_save.append(user)
if users_to_save:
WebUser.bulk_save(users_to_save)
secure_submissions = self.cleaned_data.get(
'secure_submissions', False)
apps_to_save = []
if secure_submissions != domain.secure_submissions:
for app in get_apps_in_domain(domain.name):
if app.secure_submissions != secure_submissions:
app.secure_submissions = secure_submissions
apps_to_save.append(app)
domain.secure_submissions = secure_submissions
domain.save()
if apps_to_save:
ApplicationBase.bulk_save(apps_to_save)
return True
except Exception:
return False
class DomainMetadataForm(DomainGlobalSettingsForm, SnapshotSettingsMixin):
customer_type = ChoiceField(
label=_("Customer Type"),
choices=(('basic', _('Basic')),
('plus', _('Plus')),
('full', _('Full')))
)
is_test = ChoiceField(
label=_("Test Project"),
choices=(('true', _('Test')),
('false', _('Real')),
('none', _('Not Sure')))
)
commconnect_enabled = BooleanField(
label=_("CommConnect Enabled"),
required=False,
help_text=_("CommConnect is a CommCareHQ module for SMS messages, "
"reminders and data collection.")
)
survey_management_enabled = BooleanField(
label=_("Survey Management Enabled"),
required=False,
help_text=_("Survey Management is a CommCareHQ module for SMS and "
"Call Center based surveys for large samples. It is "
"under active development. Do not enable for your domain "
"unless you're piloting it.")
)
sms_case_registration_enabled = BooleanField(
label=_("Enable Case Registration Via SMS"),
required=False
)
sms_case_registration_type = CharField(
label=_("SMS Case Registration Type"),
required=False
)
sms_case_registration_owner_id = ChoiceField(
label=_("SMS Case Registration Owner"),
required=False,
choices=[]
)
sms_case_registration_user_id = ChoiceField(
label=_("SMS Case Registration Submitting User"),
required=False,
choices=[]
)
restrict_superusers = BooleanField(
label=_("Restrict Superuser Access"),
required=False,
help_text=_("If access to a domain is restricted only users added " +
"to the domain and staff members will have access.")
)
secure_submissions = BooleanField(
label=_("Only accept secure submissions"),
required=False,
help_text=_("Turn this on to prevent others from impersonating your "
"mobile workers. To use, all of your deployed applications "
"must be using secure submissions."),
)
cloudcare_releases = ChoiceField(
label=_("CloudCare should use"),
initial=None,
required=False,
choices=(
('stars', _('Latest starred version')),
('nostars', _('Highest numbered version (not recommended)')),
),
help_text=_("Choose whether CloudCare should use the latest "
"starred build or highest numbered build in your "
"application.")
)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
domain = kwargs.get('domain', None)
super(DomainMetadataForm, self).__init__(*args, **kwargs)
if not (user and user.is_staff):
self.fields['restrict_superusers'].widget = forms.HiddenInput()
project = Domain.get_by_name(domain)
if project.cloudcare_releases == 'default' or not domain_has_privilege(domain, privileges.CLOUDCARE):
# if the cloudcare_releases flag was just defaulted, don't bother showing
# this setting at all
self.fields['cloudcare_releases'].widget = forms.HiddenInput()
if domain is not None:
groups = Group.get_case_sharing_groups(domain)
users = CommCareUser.by_domain(domain)
domain_group_choices = [(group._id, group.name) for group in groups]
domain_user_choices = [(user._id, user.raw_username) for user in users]
domain_owner_choices = domain_group_choices + domain_user_choices
self.fields["sms_case_registration_owner_id"].choices = domain_owner_choices
self.fields["sms_case_registration_user_id"].choices = domain_user_choices
def _validate_sms_registration_field(self, field_name, error_msg):
value = self.cleaned_data.get(field_name)
if value is not None:
value = value.strip()
if self.cleaned_data.get("sms_case_registration_enabled", False):
if value is None or value == "":
raise forms.ValidationError(error_msg)
return value
def clean_sms_case_registration_type(self):
return self._validate_sms_registration_field("sms_case_registration_type", _("Please enter a default case type for cases that register themselves via sms."))
def clean_sms_case_registration_owner_id(self):
return self._validate_sms_registration_field("sms_case_registration_owner_id", _("Please enter a default owner for cases that register themselves via sms."))
def clean_sms_case_registration_user_id(self):
return self._validate_sms_registration_field("sms_case_registration_user_id", _("Please enter a default submitting user for cases that register themselves via sms."))
def save(self, request, domain):
res = DomainGlobalSettingsForm.save(self, request, domain)
if not res:
return False
try:
domain.project_type = self.cleaned_data['project_type']
domain.customer_type = self.cleaned_data['customer_type']
domain.is_test = self.cleaned_data['is_test']
domain.commconnect_enabled = self.cleaned_data.get(
'commconnect_enabled', False)
domain.survey_management_enabled = self.cleaned_data.get('survey_management_enabled', False)
domain.sms_case_registration_enabled = self.cleaned_data.get('sms_case_registration_enabled', False)
domain.sms_case_registration_type = self.cleaned_data.get('sms_case_registration_type')
domain.sms_case_registration_owner_id = self.cleaned_data.get('sms_case_registration_owner_id')
domain.sms_case_registration_user_id = self.cleaned_data.get('sms_case_registration_user_id')
domain.restrict_superusers = self.cleaned_data.get('restrict_superusers', False)
cloudcare_releases = self.cleaned_data.get('cloudcare_releases')
if cloudcare_releases and domain.cloudcare_releases != 'default':
# you're never allowed to change from default
domain.cloudcare_releases = cloudcare_releases
domain.save()
return True
except Exception, e:
logging.exception("couldn't save project settings - error is %s" % e)
return False
class DomainDeploymentForm(forms.Form):
city = CharField(label=ugettext_noop("City"), required=False)
countries = forms.MultipleChoiceField(label=ugettext_noop("Countries"),
choices=COUNTRIES)
region = CharField(label=ugettext_noop("Region"), required=False,
help_text=ugettext_noop("e.g. US, LAC, SA, Sub-Saharan Africa, Southeast Asia, etc."))
deployment_date = CharField(label=ugettext_noop("Deployment date"), required=False)
description = CharField(label=ugettext_noop("Description"), required=False, widget=forms.Textarea)
public = ChoiceField(label=ugettext_noop("Make Public?"), choices=tf_choices('Yes', 'No'), required=False)
def save(self, domain):
try:
domain.update_deployment(city=self.cleaned_data['city'],
countries=self.cleaned_data['countries'],
region=self.cleaned_data['region'],
date=dateutil.parser.parse(self.cleaned_data['deployment_date']),
description=self.cleaned_data['description'],
public=(self.cleaned_data['public'] == 'true'))
return True
except Exception:
return False
def tuple_of_copies(a_list, blank=True):
ret = [(item, item) for item in a_list]
if blank:
ret.insert(0, ('', '---'))
return tuple(ret)
class DomainInternalForm(forms.Form, SubAreaMixin):
sf_contract_id = CharField(label=ugettext_noop("Salesforce Contract ID"), required=False)
sf_account_id = CharField(label=ugettext_noop("Salesforce Account ID"), required=False)
commcare_edition = ChoiceField(label=ugettext_noop("CommCare Plan"), initial="community", required=False,
choices=tuple([(p, p) for p in
["community", "standard", "pro", "advanced", "enterprise"]]))
services = ChoiceField(label=ugettext_noop("Services"), required=False,
choices=tuple_of_copies(["basic", "plus", "full", "custom"]))
initiative = forms.MultipleChoiceField(label=ugettext_noop("Initiative"), widget=forms.CheckboxSelectMultiple(),
choices=tuple_of_copies(DATA_DICT["initiatives"], blank=False), required=False)
workshop_region = CharField(label=ugettext_noop("Workshop Region"), required=False,
help_text=ugettext_noop("e.g. US, LAC, SA, Sub-Saharan Africa, Southeast Asia, etc."))
project_state = ChoiceField(label=ugettext_noop("Project State"), required=False,
choices=tuple_of_copies(["POC", "transition", "at-scale"]))
self_started = ChoiceField(label=ugettext_noop("Self Started?"), choices=tf_choices('Yes', 'No'), required=False)
area = ChoiceField(label=ugettext_noop("Sector"), required=False, choices=tuple_of_copies(AREA_CHOICES))
sub_area = ChoiceField(label=ugettext_noop("Sub-Sector"), required=False, choices=tuple_of_copies(SUB_AREA_CHOICES))
using_adm = ChoiceField(label=ugettext_noop("Using ADM?"), choices=tf_choices('Yes', 'No'), required=False)
using_call_center = ChoiceField(label=ugettext_noop("Using Call Center?"), choices=tf_choices('Yes', 'No'), required=False)
organization_name = CharField(label=ugettext_noop("Organization Name"), required=False)
notes = CharField(label=ugettext_noop("Notes"), required=False, widget=forms.Textarea)
platform = forms.MultipleChoiceField(label=ugettext_noop("Platform"), widget=forms.CheckboxSelectMultiple(),
choices=tuple_of_copies(["java", "android", "cloudcare"], blank=False), required=False)
phone_model = CharField(label=ugettext_noop("Phone Model"), required=False)
project_manager = CharField(label=ugettext_noop("Project Manager's Email"), required=False)
goal_time_period = IntegerField(label=ugettext_noop("Goal time period (in days)"), required=False)
goal_followup_rate = DecimalField(label=ugettext_noop("Goal followup rate (percentage in decimal format. e.g. 70% is .7)"), required=False)
commtrack_domain = ChoiceField(label=ugettext_noop("CommTrack domain?"),
choices=tf_choices('Yes', 'No'), required=False)
def __init__(self, can_edit_eula, *args, **kwargs):
super(DomainInternalForm, self).__init__(*args, **kwargs)
self.can_edit_eula = can_edit_eula
if self.can_edit_eula:
self.fields['custom_eula'] = ChoiceField(
label=ugettext_noop("Custom Eula?"),
choices=tf_choices('Yes', 'No'),
required=False,
help_text='Set to "yes" if this project has a customized EULA as per their contract.'
)
self.fields['can_use_data'] = ChoiceField(
label=ugettext_noop("Can use project data?"),
choices=tf_choices('Yes', 'No'),
required=False,
help_text='Set to "no" if this project opts out of data usage. Defaults to "yes".'
)
def save(self, domain):
kwargs = {"workshop_region": self.cleaned_data["workshop_region"]} if self.cleaned_data["workshop_region"] else {}
if self.can_edit_eula:
kwargs['custom_eula'] = self.cleaned_data['custom_eula'] == 'true'
kwargs['can_use_data'] = self.cleaned_data['can_use_data'] == 'true'
domain.update_internal(sf_contract_id=self.cleaned_data['sf_contract_id'],
sf_account_id=self.cleaned_data['sf_account_id'],
commcare_edition=self.cleaned_data['commcare_edition'],
services=self.cleaned_data['services'],
initiative=self.cleaned_data['initiative'],
project_state=self.cleaned_data['project_state'],
self_started=self.cleaned_data['self_started'] == 'true',
area=self.cleaned_data['area'],
sub_area=self.cleaned_data['sub_area'],
using_adm=self.cleaned_data['using_adm'] == 'true',
using_call_center=self.cleaned_data['using_call_center'] == 'true',
organization_name=self.cleaned_data['organization_name'],
notes=self.cleaned_data['notes'],
platform=self.cleaned_data['platform'],
project_manager=self.cleaned_data['project_manager'],
phone_model=self.cleaned_data['phone_model'],
goal_time_period=self.cleaned_data['goal_time_period'],
goal_followup_rate=self.cleaned_data['goal_followup_rate'],
commtrack_domain=self.cleaned_data['commtrack_domain'] == 'true',
**kwargs
)
########################################################################################################
min_pwd = 4
max_pwd = 20
pwd_pattern = re.compile( r"([-\w]){" + str(min_pwd) + ',' + str(max_pwd) + '}' )
def clean_password(txt):
if len(txt) < min_pwd:
raise forms.ValidationError('Password is too short; must be at least %s characters' % min_pwd )
if len(txt) > max_pwd:
raise forms.ValidationError('Password is too long; must be less than %s characters' % max_pwd )
if not pwd_pattern.match(txt):
raise forms.ValidationError('Password may only contain letters, numbers, hyphens, and underscores')
return txt
class HQPasswordResetForm(PasswordResetForm):
"""
Modified from PasswordResetForm to filter only web users by default.
This prevents duplicate emails with linked commcare user accounts to the same email.
"""
def clean_email(self):
UserModel = get_user_model()
email = self.cleaned_data["email"]
matching_users = UserModel._default_manager.filter(username__iexact=email)
if matching_users.count():
self.users_cache = matching_users
else:
# revert to previous behavior to theoretically allow commcare users to create an account
self.users_cache = UserModel._default_manager.filter(email__iexact=email)
# below here is not modified from the superclass
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if not any(user.is_active for user in self.users_cache):
# none of the filtered users are active
raise forms.ValidationError(self.error_messages['unknown'])
if any((user.password == <PASSWORD>)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
class ConfidentialPasswordResetForm(HQPasswordResetForm):
def clean_email(self):
try:
return super(ConfidentialPasswordResetForm, self).clean_email()
except forms.ValidationError:
# The base class throws various emails that give away information about the user;
# we can pretend all is well since the save() method is safe for missing users.
return self.cleaned_data['email']
class EditBillingAccountInfoForm(forms.ModelForm):
billing_admins = forms.CharField(
required=False,
label=ugettext_noop("Other Billing Admins"),
help_text=ugettext_noop(mark_safe(
"<p>These are the Web Users that will be able to access and "
"modify your account's subscription and billing information.</p> "
"<p>Your logged in account is already a Billing Administrator."
"</p>"
)),
)
class Meta:
model = BillingContactInfo
fields = ['first_name', 'last_name', 'phone_number', 'emails', 'company_name', 'first_line',
'second_line', 'city', 'state_province_region', 'postal_code', 'country']
def __init__(self, account, domain, creating_user, data=None, *args, **kwargs):
self.account = account
self.domain = domain
self.creating_user = creating_user
try:
self.current_country = self.account.billingcontactinfo.country
except Exception:
initial = kwargs.get('initial')
self.current_country = initial.get('country') if initial is not None else None
try:
kwargs['instance'] = self.account.billingcontactinfo
except BillingContactInfo.DoesNotExist:
pass
super(EditBillingAccountInfoForm, self).__init__(data, *args, **kwargs)
other_admins = self.account.billing_admins.filter(
domain=self.domain).exclude(web_user=self.creating_user).all()
self.fields['billing_admins'].initial = ','.join([o.web_user for o in other_admins])
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_("Billing Administrators"),
crispy.Field('billing_admins', css_class='input-xxlarge'),
),
crispy.Fieldset(
_("Basic Information"),
'company_name',
'first_name',
'last_name',
crispy.Field('emails', css_class='input-xxlarge'),
'phone_number',
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large",
data_countryname=dict(COUNTRIES).get(self.current_country, '')),
),
FormActions(
StrictButton(
_("Update Billing Information"),
type="submit",
css_class='btn btn-primary',
),
),
)
def clean_billing_admins(self):
data = self.cleaned_data['billing_admins']
all_admins = data.split(',')
result = []
for admin in all_admins:
if admin and admin != u'':
result.append(BillingAccountAdmin.objects.get_or_create(
web_user=admin,
domain=self.domain,
)[0])
result.append(BillingAccountAdmin.objects.get_or_create(
web_user=self.creating_user,
domain=self.domain,
)[0])
return result
def clean_phone_number(self):
data = self.cleaned_data['phone_number']
parsed_number = None
if data:
for country in ["US", "GB", None]:
parsed_number = parse_phone_number(data, country, failhard=False)
if parsed_number is not None:
break
if parsed_number is None:
raise forms.ValidationError(_("It looks like this phone number is invalid. "
"Did you forget the country code?"))
return "+%s%s" % (parsed_number.country_code, parsed_number.national_number)
def save(self, commit=True):
billing_contact_info = super(EditBillingAccountInfoForm, self).save(commit=False)
billing_contact_info.account = self.account
billing_contact_info.save()
billing_admins = self.cleaned_data['billing_admins']
other_domain_admins = copy.copy(self.account.billing_admins.exclude(
domain=self.domain).all())
self.account.billing_admins.clear()
for other_admin in other_domain_admins:
self.account.billing_admins.add(other_admin)
for admin in billing_admins:
self.account.billing_admins.add(admin)
self.account.save()
return True
class ConfirmNewSubscriptionForm(EditBillingAccountInfoForm):
plan_edition = forms.CharField(
widget=forms.HiddenInput,
)
def __init__(self, account, domain, creating_user, plan_version, current_subscription, data=None, *args, **kwargs):
self.plan_version = plan_version
self.current_subscription = current_subscription
super(ConfirmNewSubscriptionForm, self).__init__(account, domain, creating_user, data=data, *args, **kwargs)
self.fields['plan_edition'].initial = self.plan_version.plan.edition
from corehq.apps.domain.views import DomainSubscriptionView
self.helper.layout = crispy.Layout(
'plan_edition',
crispy.Fieldset(
_("Billing Administrators"),
crispy.Field('billing_admins', css_class='input-xxlarge'),
),
crispy.Fieldset(
_("Basic Information"),
'company_name',
'first_name',
'last_name',
crispy.Field('emails', css_class='input-xxlarge'),
'phone_number',
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large",
data_countryname=dict(COUNTRIES).get(self.current_country, ''))
),
FormActions(
crispy.HTML('<a href="%(url)s" style="margin-right:5px;" class="btn">%(title)s</a>' % {
'url': reverse(DomainSubscriptionView.urlname, args=[self.domain]),
'title': _("Cancel"),
}),
StrictButton(
_("Subscribe to Plan"),
type="submit",
css_class='btn btn-success disable-on-submit-no-spinner add-spinner-on-click',
),
),
)
def save(self, commit=True):
account_save_success = super(ConfirmNewSubscriptionForm, self).save(commit=False)
if not account_save_success:
return False
try:
if self.current_subscription is not None:
if self.plan_version.plan.edition == SoftwarePlanEdition.COMMUNITY:
self.current_subscription.cancel_subscription(adjustment_method=SubscriptionAdjustmentMethod.USER,
web_user=self.creating_user)
else:
subscription = self.current_subscription.change_plan(
self.plan_version, web_user=self.creating_user, adjustment_method=SubscriptionAdjustmentMethod.USER
)
subscription.is_active = True
if subscription.plan_version.plan.edition == SoftwarePlanEdition.ENTERPRISE:
subscription.do_not_invoice = True
subscription.save()
else:
subscription = Subscription.new_domain_subscription(
self.account, self.domain, self.plan_version,
web_user=self.creating_user,
adjustment_method=SubscriptionAdjustmentMethod.USER)
subscription.is_active = True
if subscription.plan_version.plan.edition == SoftwarePlanEdition.ENTERPRISE:
# this point can only be reached if the initiating user was a superuser
subscription.do_not_invoice = True
subscription.save()
return True
except Exception:
logger.exception("There was an error subscribing the domain '%s' to plan '%s'. "
"Go quickly!" % (self.domain, self.plan_version.plan.name))
return False
class ConfirmSubscriptionRenewalForm(EditBillingAccountInfoForm):
plan_edition = forms.CharField(
widget=forms.HiddenInput,
)
confirm_legal = forms.BooleanField(
required=True,
)
def __init__(self, account, domain, creating_user, current_subscription,
renewed_version, data=None, *args, **kwargs):
self.current_subscription = current_subscription
super(ConfirmSubscriptionRenewalForm, self).__init__(
account, domain, creating_user, data=data, *args, **kwargs
)
self.fields['plan_edition'].initial = renewed_version.plan.edition
self.fields['confirm_legal'].label = mark_safe(ugettext_noop(
'I have read and agree to the <a href="%(pa_url)s" '
'target="_blank">Software Product Agreement</a>.'
) % {
'pa_url': reverse("product_agreement"),
})
from corehq.apps.domain.views import DomainSubscriptionView
self.helper.layout = crispy.Layout(
'plan_edition',
crispy.Fieldset(
_("Billing Administrators"),
crispy.Field('billing_admins', css_class='input-xxlarge'),
),
crispy.Fieldset(
_("Basic Information"),
'company_name',
'first_name',
'last_name',
crispy.Field('emails', css_class='input-xxlarge'),
'phone_number',
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large",
data_countryname=dict(COUNTRIES).get(self.current_country, ''))
),
crispy.Fieldset(
_("Re-Confirm Product Agreement"),
'confirm_legal',
),
FormActions(
crispy.HTML('<a href="%(url)s" style="margin-right:5px;" class="btn">%(title)s</a>' % {
'url': reverse(DomainSubscriptionView.urlname, args=[self.domain]),
'title': _("Cancel"),
}),
StrictButton(
_("Renew Plan"),
type="submit",
css_class='btn btn-success',
),
),
)
def save(self, commit=True):
account_save_success = super(ConfirmSubscriptionRenewalForm, self).save(commit=False)
if not account_save_success:
return False
try:
self.current_subscription.renew_subscription(
web_user=self.creating_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
)
except SubscriptionRenewalError as e:
logger.error("[BILLING] Subscription for %(domain)s failed to "
"renew due to: %(error)s." % {
'domain': self.domain,
'error': e,
})
return True
class ProBonoForm(forms.Form):
contact_email = forms.EmailField(label=_("Contact email"))
organization = forms.CharField(label=_("Organization"))
project_overview = forms.CharField(widget=forms.Textarea, label="Project overview")
pay_only_features_needed = forms.CharField(widget=forms.Textarea, label="Pay only features needed")
duration_of_project = forms.CharField(help_text=_(
"We grant pro-bono subscriptions to match the duration of your "
"project, up to a maximum of 12 months at a time (at which point "
"you need to reapply)."
))
domain = forms.CharField(label=_("Project Space"))
dimagi_contact = forms.CharField(
help_text=_("If you have already been in touch with someone from "
"Dimagi, please list their name."),
required=False)
num_expected_users = forms.CharField(label=_("Number of expected users"))
def __init__(self, use_domain_field, *args, **kwargs):
super(ProBonoForm, self).__init__(*args, **kwargs)
if not use_domain_field:
self.fields['domain'].required = False
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_('Pro-Bono Application'),
'contact_email',
'organization',
crispy.Div(
'domain',
style=('' if use_domain_field else 'display:none'),
),
'project_overview',
'pay_only_features_needed',
'duration_of_project',
'num_expected_users',
'dimagi_contact',
),
FormActions(
crispy.ButtonHolder(
crispy.Submit('submit_pro_bono', _('Submit Pro-Bono Application'))
)
),
)
def process_submission(self, domain=None):
try:
params = {
'pro_bono_form': self,
'domain': domain,
}
html_content = render_to_string("domain/email/pro_bono_application.html", params)
text_content = render_to_string("domain/email/pro_bono_application.txt", params)
recipient = settings.BILLING_EMAIL
subject = "[Pro-Bono Application]"
if domain is not None:
subject = "%s %s" % (subject, domain)
send_HTML_email(subject, recipient, html_content, text_content=text_content,
email_from=settings.DEFAULT_FROM_EMAIL)
except Exception:
logging.error("Couldn't send pro-bono application email. "
"Contact: %s" % self.cleaned_data['contact_email']
)
|
en
| 0.740227
|
# used to resize uploaded custom logos, aspect ratio is preserved Form for updating a user's project settings # http://stackoverflow.com/questions/4356538/how-can-i-extract-video-id-from-youtubes-link-in-python#answer-7936523 Examples: - http://youtu.be/SA2iWivDJiE - http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu - http://www.youtube.com/embed/SA2iWivDJiE - http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US # fail? # check that the forms referenced by the events in each reminders exist in the project Your reminders reference forms that are not being published. Make sure the following forms are being published: %s ######################################################################################################## # had issues trying to use a BytesIO instead # if the cloudcare_releases flag was just defaulted, don't bother showing # this setting at all # you're never allowed to change from default ######################################################################################################## Modified from PasswordResetForm to filter only web users by default. This prevents duplicate emails with linked commcare user accounts to the same email. # revert to previous behavior to theoretically allow commcare users to create an account # below here is not modified from the superclass # none of the filtered users are active # The base class throws various emails that give away information about the user; # we can pretend all is well since the save() method is safe for missing users. # this point can only be reached if the initiating user was a superuser
| 1.377663
| 1
|
tools/run_kotlin_benchmarks.py
|
demon-xxi/r8
| 8
|
6628835
|
#!/usr/bin/env python
# Copyright (c) 2018, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script for running kotlin based benchmarks
import golem
import optparse
import os
import subprocess
import sys
import toolhelper
import utils
BENCHMARK_ROOT = os.path.join(utils.REPO_ROOT, 'third_party', 'benchmarks',
'kotlin-benches')
BENCHMARK_PATTERN = '{benchmark}/kotlin/perf/build/libs/perf-1.0-BENCH.jar'
BENCHMARK_MAIN_CLASS = 'com.android.kt.bms.cli.Runner'
ART = os.path.join(utils.TOOLS_DIR, 'linux', 'art', 'bin', 'art')
PROGUARD_CONF = """
# From Android rules
-keepclasseswithmembers public class * {
public static void main(java.lang.String[]);
}
# Disable obfuscation to only focus on shrinking
-dontobfuscate
# Once we're ready for optimization, we might want to relax access modifiers.
-allowaccessmodification
"""
DEVICE_TEMP='/data/local/temp/bench'
def parse_options():
result = optparse.OptionParser()
result.add_option('--api',
help='Android api level',
default='26',
choices=['21', '22', '23', '24', '25', '26'])
result.add_option('--benchmark',
help='The benchmark to run',
default='rgx',
choices=['rgx', 'deltablue', 'sta', 'empty'])
result.add_option('--golem',
help='Don\'t build r8 and link in third_party deps',
default=False, action='store_true')
result.add_option('--use-device',
help='Run the benchmark on an attaced device',
default=False, action='store_true')
return result.parse_args()
def get_jar_for_benchmark(benchmark):
return os.path.join(BENCHMARK_ROOT,
BENCHMARK_PATTERN.format(benchmark=benchmark))
def run_art(dex):
command = ['bash', ART, '-cp', dex, BENCHMARK_MAIN_CLASS]
utils.PrintCmd(command)
benchmark_output = subprocess.check_output(command)
return get_result(benchmark_output)
def adb(args):
command = ['adb'] + args
utils.PrintCmd(command)
return subprocess.check_output(['adb'] + args)
def get_result(output):
# There is a lot of debug output, with the actual results being in the line with:
# RESULTS,KtBench,KtBench,15719
# structure.
for result in [s for s in output.splitlines() if s.startswith('RESULTS')]:
return s.split('RESULTS,KtBench,KtBench,')[1]
def run_art_device(dex):
adb(['wait-for-device', 'root'])
device_dst = os.path.join(DEVICE_TEMP, os.path.basename(dex))
adb(['push', dex, device_dst])
benchmark_output = adb(['shell', 'dalvikvm', '-cp', device_dst, BENCHMARK_MAIN_CLASS])
return get_result(benchmark_output)
def Main():
(options, args) = parse_options()
if options.golem:
golem.link_third_party()
with utils.TempDir() as temp:
dex_path = os.path.join(temp, "classes.jar")
proguard_conf = os.path.join(temp, 'proguard.conf')
with open(proguard_conf, 'w') as f:
f.write(PROGUARD_CONF)
benchmark_jar = get_jar_for_benchmark(options.benchmark)
r8_args = [
'--lib', utils.get_android_jar(26), # Only works with api 26
'--output', dex_path,
'--pg-conf', proguard_conf,
'--min-api', str(options.api),
benchmark_jar
]
toolhelper.run('r8', r8_args, build=not options.golem)
if options.use_device:
result = run_art_device(dex_path)
else:
result = run_art(dex_path)
print('Kotlin_{}(RunTimeRaw): {} ms'.format(options.benchmark, result))
if __name__ == '__main__':
sys.exit(Main())
|
#!/usr/bin/env python
# Copyright (c) 2018, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script for running kotlin based benchmarks
import golem
import optparse
import os
import subprocess
import sys
import toolhelper
import utils
BENCHMARK_ROOT = os.path.join(utils.REPO_ROOT, 'third_party', 'benchmarks',
'kotlin-benches')
BENCHMARK_PATTERN = '{benchmark}/kotlin/perf/build/libs/perf-1.0-BENCH.jar'
BENCHMARK_MAIN_CLASS = 'com.android.kt.bms.cli.Runner'
ART = os.path.join(utils.TOOLS_DIR, 'linux', 'art', 'bin', 'art')
PROGUARD_CONF = """
# From Android rules
-keepclasseswithmembers public class * {
public static void main(java.lang.String[]);
}
# Disable obfuscation to only focus on shrinking
-dontobfuscate
# Once we're ready for optimization, we might want to relax access modifiers.
-allowaccessmodification
"""
DEVICE_TEMP='/data/local/temp/bench'
def parse_options():
result = optparse.OptionParser()
result.add_option('--api',
help='Android api level',
default='26',
choices=['21', '22', '23', '24', '25', '26'])
result.add_option('--benchmark',
help='The benchmark to run',
default='rgx',
choices=['rgx', 'deltablue', 'sta', 'empty'])
result.add_option('--golem',
help='Don\'t build r8 and link in third_party deps',
default=False, action='store_true')
result.add_option('--use-device',
help='Run the benchmark on an attaced device',
default=False, action='store_true')
return result.parse_args()
def get_jar_for_benchmark(benchmark):
return os.path.join(BENCHMARK_ROOT,
BENCHMARK_PATTERN.format(benchmark=benchmark))
def run_art(dex):
command = ['bash', ART, '-cp', dex, BENCHMARK_MAIN_CLASS]
utils.PrintCmd(command)
benchmark_output = subprocess.check_output(command)
return get_result(benchmark_output)
def adb(args):
command = ['adb'] + args
utils.PrintCmd(command)
return subprocess.check_output(['adb'] + args)
def get_result(output):
# There is a lot of debug output, with the actual results being in the line with:
# RESULTS,KtBench,KtBench,15719
# structure.
for result in [s for s in output.splitlines() if s.startswith('RESULTS')]:
return s.split('RESULTS,KtBench,KtBench,')[1]
def run_art_device(dex):
adb(['wait-for-device', 'root'])
device_dst = os.path.join(DEVICE_TEMP, os.path.basename(dex))
adb(['push', dex, device_dst])
benchmark_output = adb(['shell', 'dalvikvm', '-cp', device_dst, BENCHMARK_MAIN_CLASS])
return get_result(benchmark_output)
def Main():
(options, args) = parse_options()
if options.golem:
golem.link_third_party()
with utils.TempDir() as temp:
dex_path = os.path.join(temp, "classes.jar")
proguard_conf = os.path.join(temp, 'proguard.conf')
with open(proguard_conf, 'w') as f:
f.write(PROGUARD_CONF)
benchmark_jar = get_jar_for_benchmark(options.benchmark)
r8_args = [
'--lib', utils.get_android_jar(26), # Only works with api 26
'--output', dex_path,
'--pg-conf', proguard_conf,
'--min-api', str(options.api),
benchmark_jar
]
toolhelper.run('r8', r8_args, build=not options.golem)
if options.use_device:
result = run_art_device(dex_path)
else:
result = run_art(dex_path)
print('Kotlin_{}(RunTimeRaw): {} ms'.format(options.benchmark, result))
if __name__ == '__main__':
sys.exit(Main())
|
en
| 0.824541
|
#!/usr/bin/env python # Copyright (c) 2018, the R8 project authors. Please see the AUTHORS file # for details. All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. # Script for running kotlin based benchmarks # From Android rules -keepclasseswithmembers public class * { public static void main(java.lang.String[]); } # Disable obfuscation to only focus on shrinking -dontobfuscate # Once we're ready for optimization, we might want to relax access modifiers. -allowaccessmodification # There is a lot of debug output, with the actual results being in the line with: # RESULTS,KtBench,KtBench,15719 # structure. # Only works with api 26
| 2.069517
| 2
|
ontask/action/views/edit_personalized.py
|
pinheiroo27/ontask_b
| 33
|
6628836
|
# -*- coding: utf-8 -*-
"""Views to edit actions that send personalized information."""
from typing import Optional
from django import http
from django.contrib.auth.decorators import user_passes_test
from django.template.loader import render_to_string
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from ontask import models
from ontask.action import forms
from ontask.core import ajax_required, get_action, get_view, is_instructor
@user_passes_test(is_instructor)
@csrf_exempt
@ajax_required
@get_action(pf_related='actions')
def save_text(
request: http.HttpRequest,
pk: int,
workflow: Optional[models.Workflow] = None,
action: Optional[models.Action] = None,
) -> http.JsonResponse:
"""Save text content of the action.
:param request: HTTP request (POST)
:param pk: Action ID
:param workflow: Workflow being manipulated (set by the decorators)
:param action: Action being saved (set by the decorators)
:return: Nothing, changes reflected in the DB
"""
del pk, workflow
# Wrong type of action.
if action.is_in:
return http.JsonResponse({'html_redirect': reverse('home')})
# If the request has the 'action_content', update the action
action_content = request.POST.get('action_content')
if action_content:
action.set_text_content(action_content)
return http.JsonResponse({'html_redirect': ''})
@user_passes_test(is_instructor)
@ajax_required
@get_action()
def showurl(
request: http.HttpRequest,
pk: int,
workflow: Optional[models.Workflow] = None,
action: Optional[models.Action] = None,
) -> http.JsonResponse:
"""Create page to show URL to access action.
Function that given a JSON request with an action pk returns the URL used
to retrieve the personalised message.
:param request: Json request
:param pk: Primary key of the action to show the URL
:param workflow: Workflow being manipulated (set by the decorators)
:param action: Action being manipulated (set by the decorators)
:return: Json response with the content to show in the screen
"""
del pk, workflow
form = forms.EnableURLForm(request.POST or None, instance=action)
if request.method == 'POST' and form.is_valid():
if form.has_changed():
# Reflect the change in the action element
form.save()
# Recording the event
action.log(
request.user,
models.Log.ACTION_SERVE_TOGGLED,
served_enabled=action.serve_enabled)
return http.JsonResponse(
{'html_redirect': reverse('action:index')})
return http.JsonResponse({'html_redirect': None})
# Render the page with the absolute URI
return http.JsonResponse({
'html_form': render_to_string(
'action/includes/partial_action_showurl.html',
{'url_text': request.build_absolute_uri(
reverse('action:serve_lti') + '?id=' + str(action.id)),
'form': form,
'action': action},
request=request),
})
@user_passes_test(is_instructor)
@csrf_exempt
@ajax_required
@require_POST
@get_view()
def add_attachment(
request: http.HttpRequest,
pk: int,
action_id: int,
workflow: Optional[models.Workflow] = None,
view: Optional[models.View] = None,
) -> http.JsonResponse:
"""Add a View to an Email Report action
Function that given a JSON request with an action pk returns the URL used
to retrieve the personalised message.
:param request: Json request
:param pk: Primary key of the view to attach to the action
:param action_id: Action being manipulated
:param workflow: Workflow being manipulated (set by the decorators)
:param view: View object to be attached to the action
:return: Json response that prompts refresh after operation
"""
del pk
# Get the action
action = workflow.actions.filter(pk=action_id).first()
if not action or action.action_type != models.Action.EMAIL_REPORT:
return http.JsonResponse({'html_rediret': reverse('action:index')})
# If the request has 'action_content', update the action
action_content = request.POST.get('action_content')
if action_content:
action.set_text_content(action_content)
action.attachments.add(view)
action.save()
# Refresh the page to show the column in the list.
return http.JsonResponse({'html_redirect': ''})
@user_passes_test(is_instructor)
@csrf_exempt
@ajax_required
@require_POST
@get_view()
def remove_attachment(
request: http.HttpRequest,
pk: int,
action_id: int,
workflow: Optional[models.Workflow] = None,
view: Optional[models.View] = None,
) -> http.JsonResponse:
"""Remove a view from an Email Report action
Function that given a JSON request with an action pk returns the URL used
to retrieve the personalised message.
:param request: Json request
:param pk: Primary key of the view to attach to the action
:param action_id: Action being manipulated
:param workflow: Workflow being manipulated (set by the decorators)
:param view: View object to be attached to the action
:return: Json response that prompts refresh after operation
"""
del pk
# Get the action
action = workflow.actions.filter(pk=action_id).first()
if not action or action.action_type != models.Action.EMAIL_REPORT:
return http.JsonResponse({'html_rediret': reverse('action:index')})
# If the request has 'action_content', update the action
action_content = request.POST.get('action_content')
if action_content:
action.set_text_content(action_content)
action.attachments.remove(view)
action.save()
# Refresh the page to show the column in the list.
return http.JsonResponse({'html_redirect': ''})
|
# -*- coding: utf-8 -*-
"""Views to edit actions that send personalized information."""
from typing import Optional
from django import http
from django.contrib.auth.decorators import user_passes_test
from django.template.loader import render_to_string
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from ontask import models
from ontask.action import forms
from ontask.core import ajax_required, get_action, get_view, is_instructor
@user_passes_test(is_instructor)
@csrf_exempt
@ajax_required
@get_action(pf_related='actions')
def save_text(
request: http.HttpRequest,
pk: int,
workflow: Optional[models.Workflow] = None,
action: Optional[models.Action] = None,
) -> http.JsonResponse:
"""Save text content of the action.
:param request: HTTP request (POST)
:param pk: Action ID
:param workflow: Workflow being manipulated (set by the decorators)
:param action: Action being saved (set by the decorators)
:return: Nothing, changes reflected in the DB
"""
del pk, workflow
# Wrong type of action.
if action.is_in:
return http.JsonResponse({'html_redirect': reverse('home')})
# If the request has the 'action_content', update the action
action_content = request.POST.get('action_content')
if action_content:
action.set_text_content(action_content)
return http.JsonResponse({'html_redirect': ''})
@user_passes_test(is_instructor)
@ajax_required
@get_action()
def showurl(
request: http.HttpRequest,
pk: int,
workflow: Optional[models.Workflow] = None,
action: Optional[models.Action] = None,
) -> http.JsonResponse:
"""Create page to show URL to access action.
Function that given a JSON request with an action pk returns the URL used
to retrieve the personalised message.
:param request: Json request
:param pk: Primary key of the action to show the URL
:param workflow: Workflow being manipulated (set by the decorators)
:param action: Action being manipulated (set by the decorators)
:return: Json response with the content to show in the screen
"""
del pk, workflow
form = forms.EnableURLForm(request.POST or None, instance=action)
if request.method == 'POST' and form.is_valid():
if form.has_changed():
# Reflect the change in the action element
form.save()
# Recording the event
action.log(
request.user,
models.Log.ACTION_SERVE_TOGGLED,
served_enabled=action.serve_enabled)
return http.JsonResponse(
{'html_redirect': reverse('action:index')})
return http.JsonResponse({'html_redirect': None})
# Render the page with the absolute URI
return http.JsonResponse({
'html_form': render_to_string(
'action/includes/partial_action_showurl.html',
{'url_text': request.build_absolute_uri(
reverse('action:serve_lti') + '?id=' + str(action.id)),
'form': form,
'action': action},
request=request),
})
@user_passes_test(is_instructor)
@csrf_exempt
@ajax_required
@require_POST
@get_view()
def add_attachment(
request: http.HttpRequest,
pk: int,
action_id: int,
workflow: Optional[models.Workflow] = None,
view: Optional[models.View] = None,
) -> http.JsonResponse:
"""Add a View to an Email Report action
Function that given a JSON request with an action pk returns the URL used
to retrieve the personalised message.
:param request: Json request
:param pk: Primary key of the view to attach to the action
:param action_id: Action being manipulated
:param workflow: Workflow being manipulated (set by the decorators)
:param view: View object to be attached to the action
:return: Json response that prompts refresh after operation
"""
del pk
# Get the action
action = workflow.actions.filter(pk=action_id).first()
if not action or action.action_type != models.Action.EMAIL_REPORT:
return http.JsonResponse({'html_rediret': reverse('action:index')})
# If the request has 'action_content', update the action
action_content = request.POST.get('action_content')
if action_content:
action.set_text_content(action_content)
action.attachments.add(view)
action.save()
# Refresh the page to show the column in the list.
return http.JsonResponse({'html_redirect': ''})
@user_passes_test(is_instructor)
@csrf_exempt
@ajax_required
@require_POST
@get_view()
def remove_attachment(
request: http.HttpRequest,
pk: int,
action_id: int,
workflow: Optional[models.Workflow] = None,
view: Optional[models.View] = None,
) -> http.JsonResponse:
"""Remove a view from an Email Report action
Function that given a JSON request with an action pk returns the URL used
to retrieve the personalised message.
:param request: Json request
:param pk: Primary key of the view to attach to the action
:param action_id: Action being manipulated
:param workflow: Workflow being manipulated (set by the decorators)
:param view: View object to be attached to the action
:return: Json response that prompts refresh after operation
"""
del pk
# Get the action
action = workflow.actions.filter(pk=action_id).first()
if not action or action.action_type != models.Action.EMAIL_REPORT:
return http.JsonResponse({'html_rediret': reverse('action:index')})
# If the request has 'action_content', update the action
action_content = request.POST.get('action_content')
if action_content:
action.set_text_content(action_content)
action.attachments.remove(view)
action.save()
# Refresh the page to show the column in the list.
return http.JsonResponse({'html_redirect': ''})
|
en
| 0.804724
|
# -*- coding: utf-8 -*- Views to edit actions that send personalized information. Save text content of the action. :param request: HTTP request (POST) :param pk: Action ID :param workflow: Workflow being manipulated (set by the decorators) :param action: Action being saved (set by the decorators) :return: Nothing, changes reflected in the DB # Wrong type of action. # If the request has the 'action_content', update the action Create page to show URL to access action. Function that given a JSON request with an action pk returns the URL used to retrieve the personalised message. :param request: Json request :param pk: Primary key of the action to show the URL :param workflow: Workflow being manipulated (set by the decorators) :param action: Action being manipulated (set by the decorators) :return: Json response with the content to show in the screen # Reflect the change in the action element # Recording the event # Render the page with the absolute URI Add a View to an Email Report action Function that given a JSON request with an action pk returns the URL used to retrieve the personalised message. :param request: Json request :param pk: Primary key of the view to attach to the action :param action_id: Action being manipulated :param workflow: Workflow being manipulated (set by the decorators) :param view: View object to be attached to the action :return: Json response that prompts refresh after operation # Get the action # If the request has 'action_content', update the action # Refresh the page to show the column in the list. Remove a view from an Email Report action Function that given a JSON request with an action pk returns the URL used to retrieve the personalised message. :param request: Json request :param pk: Primary key of the view to attach to the action :param action_id: Action being manipulated :param workflow: Workflow being manipulated (set by the decorators) :param view: View object to be attached to the action :return: Json response that prompts refresh after operation # Get the action # If the request has 'action_content', update the action # Refresh the page to show the column in the list.
| 2.217854
| 2
|
gui/settingspanel.py
|
yemikudaisi/GIS-Lite
| 2
|
6628837
|
<reponame>yemikudaisi/GIS-Lite<filename>gui/settingspanel.py<gh_stars>1-10
import wx
class SettingsPanel(wx.Dialog):
def __init__(self, settings, *args, **kwargs):
wx.Dialog.__init__(self, *args, **kwargs)
self.settings = settings
self.panel = wx.Panel(self)
self.button_ok = wx.Button(self.panel, label="OK")
self.button_cancel = wx.Button(self.panel, label="Cancel")
self.button_ok.Bind(wx.EVT_BUTTON, self.onOk)
self.button_cancel.Bind(wx.EVT_BUTTON, self.onCancel)
self.checkboxes = []
for i in range(3):
checkbox = wx.CheckBox(self.panel, label=str(i))
checkbox.SetValue(self.settings[i])
self.checkboxes.append(checkbox)
self.sizer = wx.BoxSizer()
for checkbox in self.checkboxes:
self.sizer.Add(checkbox)
self.sizer.Add(self.button_ok)
self.sizer.Add(self.button_cancel)
self.panel.SetSizerAndFit(self.sizer)
def onCancel(self, e):
self.EndModal(wx.ID_CANCEL)
def onOk(self, e):
for i in range(3):
self.settings[i] = self.checkboxes[i].GetValue()
self.EndModal(wx.ID_OK)
def GetSettings(self):
return self.settings
|
import wx
class SettingsPanel(wx.Dialog):
def __init__(self, settings, *args, **kwargs):
wx.Dialog.__init__(self, *args, **kwargs)
self.settings = settings
self.panel = wx.Panel(self)
self.button_ok = wx.Button(self.panel, label="OK")
self.button_cancel = wx.Button(self.panel, label="Cancel")
self.button_ok.Bind(wx.EVT_BUTTON, self.onOk)
self.button_cancel.Bind(wx.EVT_BUTTON, self.onCancel)
self.checkboxes = []
for i in range(3):
checkbox = wx.CheckBox(self.panel, label=str(i))
checkbox.SetValue(self.settings[i])
self.checkboxes.append(checkbox)
self.sizer = wx.BoxSizer()
for checkbox in self.checkboxes:
self.sizer.Add(checkbox)
self.sizer.Add(self.button_ok)
self.sizer.Add(self.button_cancel)
self.panel.SetSizerAndFit(self.sizer)
def onCancel(self, e):
self.EndModal(wx.ID_CANCEL)
def onOk(self, e):
for i in range(3):
self.settings[i] = self.checkboxes[i].GetValue()
self.EndModal(wx.ID_OK)
def GetSettings(self):
return self.settings
|
none
| 1
| 2.281349
| 2
|
|
tnetwork/utils/bidict/_abc.py
|
tomjorquera/tnetwork
| 4
|
6628838
|
<filename>tnetwork/utils/bidict/_abc.py
# -*- coding: utf-8 -*-
# Copyright 2017 <NAME>. All Rights Reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Provides bidict ABCs."""
from collections import Mapping
from .compat import iteritems
class BidirectionalMapping(Mapping): # pylint: disable=abstract-method
"""Abstract base class for bidirectional mappings.
Extends :class:`collections.abc.Mapping`.
.. py:attribute:: inv
The inverse mapping.
.. py:attribute:: _subclsattrs
The attributes that :attr:`__subclasshook__` checks for to determine
whether a class is a subclass of :class:`BidirectionalMapping`.
"""
__slots__ = ()
inv = NotImplemented
def __inverted__(self):
"""Get an iterator over the items in :attr:`inv`."""
return iteritems(self.inv)
_subclsattrs = frozenset({
'inv', '__inverted__',
# see "Mapping" in the table at
# https://docs.python.org/3/library/collections.abc.html#collections-abstract-base-classes
'__getitem__', '__iter__', '__len__', # abstract methods
'__contains__', 'keys', 'items', 'values', 'get', '__eq__', '__ne__', # mixin methods
})
@classmethod
def __subclasshook__(cls, C): # noqa: N803 ("argument name should be lowercase")
# Standard to use "C" for this arg in __subclasshook__, e.g.:
# https://github.com/python/cpython/blob/d505a2/Lib/_collections_abc.py#L93
"""Check if C provides all the attributes in :attr:`_subclsattrs`.
Causes conforming dyn_graph to be virtual subclasses automatically.
"""
if cls is not BidirectionalMapping: # lgtm [py/comparison-using-is]
return NotImplemented
mro = getattr(C, '__mro__', None)
if mro is None:
return NotImplemented
return all(any(B.__dict__.get(i) for B in mro) for i in cls._subclsattrs)
|
<filename>tnetwork/utils/bidict/_abc.py
# -*- coding: utf-8 -*-
# Copyright 2017 <NAME>. All Rights Reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Provides bidict ABCs."""
from collections import Mapping
from .compat import iteritems
class BidirectionalMapping(Mapping): # pylint: disable=abstract-method
"""Abstract base class for bidirectional mappings.
Extends :class:`collections.abc.Mapping`.
.. py:attribute:: inv
The inverse mapping.
.. py:attribute:: _subclsattrs
The attributes that :attr:`__subclasshook__` checks for to determine
whether a class is a subclass of :class:`BidirectionalMapping`.
"""
__slots__ = ()
inv = NotImplemented
def __inverted__(self):
"""Get an iterator over the items in :attr:`inv`."""
return iteritems(self.inv)
_subclsattrs = frozenset({
'inv', '__inverted__',
# see "Mapping" in the table at
# https://docs.python.org/3/library/collections.abc.html#collections-abstract-base-classes
'__getitem__', '__iter__', '__len__', # abstract methods
'__contains__', 'keys', 'items', 'values', 'get', '__eq__', '__ne__', # mixin methods
})
@classmethod
def __subclasshook__(cls, C): # noqa: N803 ("argument name should be lowercase")
# Standard to use "C" for this arg in __subclasshook__, e.g.:
# https://github.com/python/cpython/blob/d505a2/Lib/_collections_abc.py#L93
"""Check if C provides all the attributes in :attr:`_subclsattrs`.
Causes conforming dyn_graph to be virtual subclasses automatically.
"""
if cls is not BidirectionalMapping: # lgtm [py/comparison-using-is]
return NotImplemented
mro = getattr(C, '__mro__', None)
if mro is None:
return NotImplemented
return all(any(B.__dict__.get(i) for B in mro) for i in cls._subclsattrs)
|
en
| 0.682314
|
# -*- coding: utf-8 -*- # Copyright 2017 <NAME>. All Rights Reserved. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. Provides bidict ABCs. # pylint: disable=abstract-method Abstract base class for bidirectional mappings. Extends :class:`collections.abc.Mapping`. .. py:attribute:: inv The inverse mapping. .. py:attribute:: _subclsattrs The attributes that :attr:`__subclasshook__` checks for to determine whether a class is a subclass of :class:`BidirectionalMapping`. Get an iterator over the items in :attr:`inv`. # see "Mapping" in the table at # https://docs.python.org/3/library/collections.abc.html#collections-abstract-base-classes # abstract methods # mixin methods # noqa: N803 ("argument name should be lowercase") # Standard to use "C" for this arg in __subclasshook__, e.g.: # https://github.com/python/cpython/blob/d505a2/Lib/_collections_abc.py#L93 Check if C provides all the attributes in :attr:`_subclsattrs`. Causes conforming dyn_graph to be virtual subclasses automatically. # lgtm [py/comparison-using-is]
| 2.172283
| 2
|
codewar/Counting Duplicates-7k/Counting Duplicates.py
|
z7211979/practise-Python
| 1
|
6628839
|
<reponame>z7211979/practise-Python
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import cw as test
debug = 1
def debug_print(flag, out):
if debug:
print("temp" + str(flag) + ":" + str(out))
def duplicate_count(text):
# Your code goes here
out_temp = 0
count_t = [0] * 128
for temp in text:
if ord(temp) >= ord('a') :
count_t[ord(temp)-32] += 1
else:
count_t[ord(temp)] += 1
for i in count_t:
if i > 1:
out_temp += 1
return out_temp
if __name__ == "__main__":
test.assert_equals(duplicate_count("abcde"), 0)
test.assert_equals(duplicate_count("abcdea"), 1)
test.assert_equals(duplicate_count("indivisibility"), 1)
test.assert_equals(duplicate_count("aabBcde"), 2)
test.assert_equals(duplicate_count("aA11"), 2)
'''
def duplicate_count(s):
return len([c for c in set(s.lower()) if s.lower().count(c)>1])
'''
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import cw as test
debug = 1
def debug_print(flag, out):
if debug:
print("temp" + str(flag) + ":" + str(out))
def duplicate_count(text):
# Your code goes here
out_temp = 0
count_t = [0] * 128
for temp in text:
if ord(temp) >= ord('a') :
count_t[ord(temp)-32] += 1
else:
count_t[ord(temp)] += 1
for i in count_t:
if i > 1:
out_temp += 1
return out_temp
if __name__ == "__main__":
test.assert_equals(duplicate_count("abcde"), 0)
test.assert_equals(duplicate_count("abcdea"), 1)
test.assert_equals(duplicate_count("indivisibility"), 1)
test.assert_equals(duplicate_count("aabBcde"), 2)
test.assert_equals(duplicate_count("aA11"), 2)
'''
def duplicate_count(s):
return len([c for c in set(s.lower()) if s.lower().count(c)>1])
'''
|
en
| 0.37242
|
#!/usr/bin/python3 # -*- coding: utf-8 -*- # Your code goes here def duplicate_count(s): return len([c for c in set(s.lower()) if s.lower().count(c)>1])
| 3.902939
| 4
|
vtp/models.py
|
CzechInvest/ciis
| 1
|
6628840
|
<gh_stars>1-10
from django.db import models
from addresses.models import Address
from django.utils.translation import ugettext_lazy as _
class VtpType(models.Model):
type = models.CharField(
max_length=16
)
def __str__(self):
return self.type
class Service(models.Model):
service = models.CharField(
max_length=128)
def __str__(self):
return self.service
class Vtp(models.Model):
name = models.CharField(
verbose_name=_("Název"),
max_length=256
)
type = models.ManyToManyField(VtpType)
services = models.ManyToManyField(Service)
url = models.URLField()
address = models.ForeignKey(
Address,
on_delete=models.PROTECT
)
def __str__(self):
return self.name
|
from django.db import models
from addresses.models import Address
from django.utils.translation import ugettext_lazy as _
class VtpType(models.Model):
type = models.CharField(
max_length=16
)
def __str__(self):
return self.type
class Service(models.Model):
service = models.CharField(
max_length=128)
def __str__(self):
return self.service
class Vtp(models.Model):
name = models.CharField(
verbose_name=_("Název"),
max_length=256
)
type = models.ManyToManyField(VtpType)
services = models.ManyToManyField(Service)
url = models.URLField()
address = models.ForeignKey(
Address,
on_delete=models.PROTECT
)
def __str__(self):
return self.name
|
none
| 1
| 2.226928
| 2
|
|
recsys_challenge/loader.py
|
pilipolio/recsys_challenge
| 1
|
6628841
|
<reponame>pilipolio/recsys_challenge<gh_stars>1-10
import os
import pandas as pd
import numpy as np
BUYS_COLUMNS = ['SESSION_ID', 'TS', 'ITEM_ID', 'PRICE', 'Q']
CLICKS_COLUMNS = ['SESSION_ID', 'TS', 'ITEM_ID', 'CATEGORY']
def time_collapsed_clicks(clicks):
"""
>>> clicks_columns = ['SESSION_ID', 'TS', 'ITEM_ID', 'CATEGORY']
>>> clicks_logs = [[1, 't0', 10, 0], [1, 't1', 20, 0], [1, 't2', 20, 0]]
>>> clicks = pd.DataFrame(columns=clicks_columns, data=clicks_logs)
>>> time_collapsed_clicks(clicks)
SESSION_ID ITEM_ID CATEGORY ITEM_START ITEM_STOP N_CLICKS SESSION_START SESSION_STOP
0 1 10 0 t0 t0 1 t0 t2
1 1 20 0 t1 t2 2 t0 t2
"""
clicks_by_sessions = clicks.groupby(['SESSION_ID'])
session_limits = clicks_by_sessions.aggregate({'TS': [np.min, np.max]})
session_limits.columns = ['SESSION_START', 'SESSION_STOP']
clicks_by_item_and_sessions = clicks.groupby(['SESSION_ID', 'ITEM_ID'])
item_sub_sessions = clicks_by_item_and_sessions.aggregate({
'TS': [np.min, np.max],
'CATEGORY': np.min,
'SESSION_ID': len
})
# kind of fragile as based on column names alphabetical orders as dict keys?
item_sub_sessions.columns = ['CATEGORY', 'ITEM_START', 'ITEM_STOP', 'N_CLICKS']
collapsed_clicks = pd.merge(
left=item_sub_sessions.reset_index(),
right=session_limits.reset_index(),
on='SESSION_ID',
how='inner')
return collapsed_clicks
def collapse_time_and_join(buys, clicks):
""" Collapsing the time dimension by grouping by (session_id, item_id)
and then join clicks and buys rows.
"""
buys = buys.groupby(['SESSION_ID', 'ITEM_ID']).size().reset_index()
buys.columns = ['SESSION_ID', 'ITEM_ID', 'N_BUYS']
clicks = time_collapsed_clicks(clicks)
clicks_and_buys = buys.merge(
right=clicks, on=['SESSION_ID', 'ITEM_ID'],
how='outer')
# fills N_CLICKS and N_BUYS to 0 when missing
clicks_and_buys = clicks_and_buys.fillna(0)
# for some reasone SESSION_ID and ITEM_ID get also converted to float by the merge
clicks_and_buys[['N_CLICKS', 'N_BUYS']] = clicks_and_buys[['N_CLICKS', 'N_BUYS']].astype(np.int64)
clicks_and_buys[['SESSION_ID', 'ITEM_ID']] = clicks_and_buys[['SESSION_ID', 'ITEM_ID']].astype(np.int64)
return clicks_and_buys
def to_pickled_df(data_directory, **kwargs):
for name, df in kwargs.iteritems():
df.to_pickle(os.path.join(data_directory, name + '.df'))
if __name__ == '__main__':
data_directory = 'data'
buys = pd.read_csv(os.path.join(data_directory, 'yoochoose-buys.dat'), names=BUYS_COLUMNS, parse_dates=['TS'])
clicks = pd.read_csv(os.path.join(data_directory, 'yoochoose-clicks.dat'), names=CLICKS_COLUMNS, parse_dates=['TS'])
clicks_and_buys = collapse_time_and_join(buys, clicks)
to_pickled_df(data_directory, clicks_and_buys=clicks_and_buys)
test_clicks = pd.read_csv(os.path.join(data_directory, 'yoochoose-test.dat'), names=CLICKS_COLUMNS)
test_clicks = time_collapsed_clicks(test_clicks)
to_pickled_df(data_directory, test_clicks=test_clicks)
|
import os
import pandas as pd
import numpy as np
BUYS_COLUMNS = ['SESSION_ID', 'TS', 'ITEM_ID', 'PRICE', 'Q']
CLICKS_COLUMNS = ['SESSION_ID', 'TS', 'ITEM_ID', 'CATEGORY']
def time_collapsed_clicks(clicks):
"""
>>> clicks_columns = ['SESSION_ID', 'TS', 'ITEM_ID', 'CATEGORY']
>>> clicks_logs = [[1, 't0', 10, 0], [1, 't1', 20, 0], [1, 't2', 20, 0]]
>>> clicks = pd.DataFrame(columns=clicks_columns, data=clicks_logs)
>>> time_collapsed_clicks(clicks)
SESSION_ID ITEM_ID CATEGORY ITEM_START ITEM_STOP N_CLICKS SESSION_START SESSION_STOP
0 1 10 0 t0 t0 1 t0 t2
1 1 20 0 t1 t2 2 t0 t2
"""
clicks_by_sessions = clicks.groupby(['SESSION_ID'])
session_limits = clicks_by_sessions.aggregate({'TS': [np.min, np.max]})
session_limits.columns = ['SESSION_START', 'SESSION_STOP']
clicks_by_item_and_sessions = clicks.groupby(['SESSION_ID', 'ITEM_ID'])
item_sub_sessions = clicks_by_item_and_sessions.aggregate({
'TS': [np.min, np.max],
'CATEGORY': np.min,
'SESSION_ID': len
})
# kind of fragile as based on column names alphabetical orders as dict keys?
item_sub_sessions.columns = ['CATEGORY', 'ITEM_START', 'ITEM_STOP', 'N_CLICKS']
collapsed_clicks = pd.merge(
left=item_sub_sessions.reset_index(),
right=session_limits.reset_index(),
on='SESSION_ID',
how='inner')
return collapsed_clicks
def collapse_time_and_join(buys, clicks):
""" Collapsing the time dimension by grouping by (session_id, item_id)
and then join clicks and buys rows.
"""
buys = buys.groupby(['SESSION_ID', 'ITEM_ID']).size().reset_index()
buys.columns = ['SESSION_ID', 'ITEM_ID', 'N_BUYS']
clicks = time_collapsed_clicks(clicks)
clicks_and_buys = buys.merge(
right=clicks, on=['SESSION_ID', 'ITEM_ID'],
how='outer')
# fills N_CLICKS and N_BUYS to 0 when missing
clicks_and_buys = clicks_and_buys.fillna(0)
# for some reasone SESSION_ID and ITEM_ID get also converted to float by the merge
clicks_and_buys[['N_CLICKS', 'N_BUYS']] = clicks_and_buys[['N_CLICKS', 'N_BUYS']].astype(np.int64)
clicks_and_buys[['SESSION_ID', 'ITEM_ID']] = clicks_and_buys[['SESSION_ID', 'ITEM_ID']].astype(np.int64)
return clicks_and_buys
def to_pickled_df(data_directory, **kwargs):
for name, df in kwargs.iteritems():
df.to_pickle(os.path.join(data_directory, name + '.df'))
if __name__ == '__main__':
data_directory = 'data'
buys = pd.read_csv(os.path.join(data_directory, 'yoochoose-buys.dat'), names=BUYS_COLUMNS, parse_dates=['TS'])
clicks = pd.read_csv(os.path.join(data_directory, 'yoochoose-clicks.dat'), names=CLICKS_COLUMNS, parse_dates=['TS'])
clicks_and_buys = collapse_time_and_join(buys, clicks)
to_pickled_df(data_directory, clicks_and_buys=clicks_and_buys)
test_clicks = pd.read_csv(os.path.join(data_directory, 'yoochoose-test.dat'), names=CLICKS_COLUMNS)
test_clicks = time_collapsed_clicks(test_clicks)
to_pickled_df(data_directory, test_clicks=test_clicks)
|
en
| 0.72686
|
>>> clicks_columns = ['SESSION_ID', 'TS', 'ITEM_ID', 'CATEGORY'] >>> clicks_logs = [[1, 't0', 10, 0], [1, 't1', 20, 0], [1, 't2', 20, 0]] >>> clicks = pd.DataFrame(columns=clicks_columns, data=clicks_logs) >>> time_collapsed_clicks(clicks) SESSION_ID ITEM_ID CATEGORY ITEM_START ITEM_STOP N_CLICKS SESSION_START SESSION_STOP 0 1 10 0 t0 t0 1 t0 t2 1 1 20 0 t1 t2 2 t0 t2 # kind of fragile as based on column names alphabetical orders as dict keys? Collapsing the time dimension by grouping by (session_id, item_id) and then join clicks and buys rows. # fills N_CLICKS and N_BUYS to 0 when missing # for some reasone SESSION_ID and ITEM_ID get also converted to float by the merge
| 2.683087
| 3
|
mitmproxy/proxy/layers/http/__init__.py
|
waterdrops/mitmproxy
| 1
|
6628842
|
<filename>mitmproxy/proxy/layers/http/__init__.py
import collections
import enum
import time
from dataclasses import dataclass
from typing import DefaultDict, Dict, List, Optional, Tuple, Union
import wsproto.handshake
from mitmproxy import flow, http
from mitmproxy.connection import Connection, Server
from mitmproxy.net import server_spec
from mitmproxy.net.http import status_codes, url
from mitmproxy.proxy import commands, events, layer, tunnel
from mitmproxy.proxy.layers import tcp, tls, websocket
from mitmproxy.proxy.layers.http import _upstream_proxy
from mitmproxy.proxy.utils import expect
from mitmproxy.utils import human
from mitmproxy.websocket import WebSocketData
from ._base import HttpCommand, HttpConnection, ReceiveHttp, StreamId
from ._events import HttpEvent, RequestData, RequestEndOfMessage, RequestHeaders, RequestProtocolError, ResponseData, \
ResponseEndOfMessage, ResponseHeaders, ResponseProtocolError
from ._hooks import HttpConnectHook, HttpErrorHook, HttpRequestHeadersHook, HttpRequestHook, HttpResponseHeadersHook, \
HttpResponseHook
from ._http1 import Http1Client, Http1Server
from ._http2 import Http2Client, Http2Server
from ...context import Context
class HTTPMode(enum.Enum):
regular = 1
transparent = 2
upstream = 3
def validate_request(mode, request) -> Optional[str]:
if request.scheme not in ("http", "https", ""):
return f"Invalid request scheme: {request.scheme}"
if mode is HTTPMode.transparent and request.method == "CONNECT":
return (
f"mitmproxy received an HTTP CONNECT request even though it is not running in regular/upstream mode. "
f"This usually indicates a misconfiguration, please see the mitmproxy mode documentation for details."
)
return None
@dataclass
class GetHttpConnection(HttpCommand):
"""
Open an HTTP Connection. This may not actually open a connection, but return an existing HTTP connection instead.
"""
blocking = True
address: Tuple[str, int]
tls: bool
via: Optional[server_spec.ServerSpec]
def __hash__(self):
return id(self)
def connection_spec_matches(self, connection: Connection) -> bool:
return (
isinstance(connection, Server)
and
self.address == connection.address
and
self.tls == connection.tls
and
self.via == connection.via
)
@dataclass
class GetHttpConnectionCompleted(events.CommandCompleted):
command: GetHttpConnection
reply: Union[Tuple[None, str], Tuple[Connection, None]]
"""connection object, error message"""
@dataclass
class RegisterHttpConnection(HttpCommand):
"""
Register that a HTTP connection attempt has been completed.
"""
connection: Connection
err: Optional[str]
@dataclass
class SendHttp(HttpCommand):
event: HttpEvent
connection: Connection
def __repr__(self) -> str:
return f"Send({self.event})"
class HttpStream(layer.Layer):
request_body_buf: bytes
response_body_buf: bytes
flow: http.HTTPFlow
stream_id: StreamId
child_layer: Optional[layer.Layer] = None
@property
def mode(self):
i = self.context.layers.index(self)
parent: HttpLayer = self.context.layers[i - 1]
return parent.mode
def __init__(self, context: Context, stream_id: int):
super().__init__(context)
self.request_body_buf = b""
self.response_body_buf = b""
self.client_state = self.state_uninitialized
self.server_state = self.state_uninitialized
self.stream_id = stream_id
def __repr__(self):
return (
f"HttpStream("
f"id={self.stream_id}, "
f"client_state={self.client_state.__name__}, "
f"server_state={self.server_state.__name__}"
f")"
)
@expect(events.Start, HttpEvent)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.Start):
self.client_state = self.state_wait_for_request_headers
elif isinstance(event, (RequestProtocolError, ResponseProtocolError)):
yield from self.handle_protocol_error(event)
elif isinstance(event, (RequestHeaders, RequestData, RequestEndOfMessage)):
yield from self.client_state(event)
else:
yield from self.server_state(event)
@expect(RequestHeaders)
def state_wait_for_request_headers(self, event: RequestHeaders) -> layer.CommandGenerator[None]:
if not event.replay_flow:
self.flow = http.HTTPFlow(
self.context.client,
self.context.server
)
else:
self.flow = event.replay_flow
self.flow.request = event.request
if err := validate_request(self.mode, self.flow.request):
self.flow.response = http.Response.make(502, str(err))
self.client_state = self.state_errored
return (yield from self.send_response())
if self.flow.request.method == "CONNECT":
return (yield from self.handle_connect())
if self.mode is HTTPMode.transparent:
# Determine .scheme, .host and .port attributes for transparent requests
assert self.context.server.address
self.flow.request.data.host = self.context.server.address[0]
self.flow.request.data.port = self.context.server.address[1]
self.flow.request.scheme = "https" if self.context.server.tls else "http"
elif not self.flow.request.host:
# We need to extract destination information from the host header.
try:
host, port = url.parse_authority(self.flow.request.host_header or "", check=True)
except ValueError:
self.flow.response = http.Response.make(
400,
"HTTP request has no host header, destination unknown."
)
self.client_state = self.state_errored
return (yield from self.send_response())
else:
if port is None:
port = 443 if self.context.client.tls else 80
self.flow.request.data.host = host
self.flow.request.data.port = port
self.flow.request.scheme = "https" if self.context.client.tls else "http"
if self.mode is HTTPMode.regular and not self.flow.request.is_http2:
# Set the request target to origin-form for HTTP/1, some servers don't support absolute-form requests.
# see https://github.com/mitmproxy/mitmproxy/issues/1759
self.flow.request.authority = ""
# update host header in reverse proxy mode
if self.context.options.mode.startswith("reverse:") and not self.context.options.keep_host_header:
assert self.context.server.address
self.flow.request.host_header = url.hostport(
"https" if self.context.server.tls else "http",
self.context.server.address[0],
self.context.server.address[1],
)
yield HttpRequestHeadersHook(self.flow)
if (yield from self.check_killed(True)):
return
if self.flow.request.headers.get("expect", "").lower() == "100-continue":
continue_response = http.Response.make(100)
continue_response.headers.clear()
yield SendHttp(ResponseHeaders(self.stream_id, continue_response), self.context.client)
self.flow.request.headers.pop("expect")
if self.flow.request.stream:
yield from self.start_request_stream()
else:
self.client_state = self.state_consume_request_body
self.server_state = self.state_wait_for_response_headers
def start_request_stream(self) -> layer.CommandGenerator[None]:
if self.flow.response:
raise NotImplementedError("Can't set a response and enable streaming at the same time.")
yield HttpRequestHook(self.flow)
ok = yield from self.make_server_connection()
if not ok:
return
yield SendHttp(
RequestHeaders(self.stream_id, self.flow.request, end_stream=False),
self.context.server
)
self.client_state = self.state_stream_request_body
@expect(RequestData, RequestEndOfMessage)
def state_stream_request_body(self, event: Union[RequestData, RequestEndOfMessage]) -> layer.CommandGenerator[None]:
if isinstance(event, RequestData):
if callable(self.flow.request.stream):
event.data = self.flow.request.stream(event.data)
elif isinstance(event, RequestEndOfMessage):
self.flow.request.timestamp_end = time.time()
self.client_state = self.state_done
# edge case found while fuzzing:
# we may arrive here after a hook unpaused the stream,
# but the server may have sent us a RST_STREAM in the meantime.
# We need to 1) check the server state and 2) peek into the event queue to
# see if this is the case.
if self.server_state == self.state_errored:
return
for evt in self._paused_event_queue:
if isinstance(evt, ResponseProtocolError):
return
yield SendHttp(event, self.context.server)
@expect(RequestData, RequestEndOfMessage)
def state_consume_request_body(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, RequestData):
self.request_body_buf += event.data
elif isinstance(event, RequestEndOfMessage):
self.flow.request.timestamp_end = time.time()
self.flow.request.data.content = self.request_body_buf
self.request_body_buf = b""
self.client_state = self.state_done
yield HttpRequestHook(self.flow)
if (yield from self.check_killed(True)):
return
elif self.flow.response:
# response was set by an inline script.
# we now need to emulate the responseheaders hook.
self.flow.response.timestamp_start = time.time()
yield HttpResponseHeadersHook(self.flow)
if (yield from self.check_killed(True)):
return
yield from self.send_response()
else:
ok = yield from self.make_server_connection()
if not ok:
return
content = self.flow.request.raw_content
yield SendHttp(RequestHeaders(self.stream_id, self.flow.request, not content), self.context.server)
if content:
yield SendHttp(RequestData(self.stream_id, content), self.context.server)
yield SendHttp(RequestEndOfMessage(self.stream_id), self.context.server)
@expect(ResponseHeaders)
def state_wait_for_response_headers(self, event: ResponseHeaders) -> layer.CommandGenerator[None]:
self.flow.response = event.response
yield HttpResponseHeadersHook(self.flow)
if (yield from self.check_killed(True)):
return
elif self.flow.response.stream:
yield SendHttp(event, self.context.client)
self.server_state = self.state_stream_response_body
else:
self.server_state = self.state_consume_response_body
@expect(ResponseData, ResponseEndOfMessage)
def state_stream_response_body(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.flow.response
if isinstance(event, ResponseData):
if callable(self.flow.response.stream):
data = self.flow.response.stream(event.data)
else:
data = event.data
yield SendHttp(ResponseData(self.stream_id, data), self.context.client)
elif isinstance(event, ResponseEndOfMessage):
yield from self.send_response(already_streamed=True)
@expect(ResponseData, ResponseEndOfMessage)
def state_consume_response_body(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, ResponseData):
self.response_body_buf += event.data
elif isinstance(event, ResponseEndOfMessage):
assert self.flow.response
self.flow.response.data.content = self.response_body_buf
self.response_body_buf = b""
yield from self.send_response()
def send_response(self, already_streamed: bool = False):
"""We have either consumed the entire response from the server or the response was set by an addon."""
assert self.flow.response
self.flow.response.timestamp_end = time.time()
is_websocket = (
self.flow.response.status_code == 101
and
self.flow.response.headers.get("upgrade", "").lower() == "websocket"
and
self.flow.request.headers.get("Sec-WebSocket-Version", "").encode() == wsproto.handshake.WEBSOCKET_VERSION
and
self.context.options.websocket
)
if is_websocket:
# We need to set this before calling the response hook
# so that addons can determine if a WebSocket connection is following up.
self.flow.websocket = WebSocketData()
yield HttpResponseHook(self.flow)
self.server_state = self.state_done
if (yield from self.check_killed(False)):
return
if not already_streamed:
content = self.flow.response.raw_content
yield SendHttp(ResponseHeaders(self.stream_id, self.flow.response, not content), self.context.client)
if content:
yield SendHttp(ResponseData(self.stream_id, content), self.context.client)
yield SendHttp(ResponseEndOfMessage(self.stream_id), self.context.client)
if self.flow.response.status_code == 101:
if is_websocket:
self.child_layer = websocket.WebsocketLayer(self.context, self.flow)
elif self.context.options.rawtcp:
self.child_layer = tcp.TCPLayer(self.context)
else:
yield commands.Log(f"Sent HTTP 101 response, but no protocol is enabled to upgrade to.", "warn")
yield commands.CloseConnection(self.context.client)
self.client_state = self.server_state = self.state_errored
return
if self.debug:
yield commands.Log(f"{self.debug}[http] upgrading to {self.child_layer}", "debug")
yield from self.child_layer.handle_event(events.Start())
self._handle_event = self.passthrough
return
def check_killed(self, emit_error_hook: bool) -> layer.CommandGenerator[bool]:
killed_by_us = (
self.flow.error and self.flow.error.msg == flow.Error.KILLED_MESSAGE
)
# The client may have closed the connection while we were waiting for the hook to complete.
# We peek into the event queue to see if that is the case.
killed_by_remote = None
for evt in self._paused_event_queue:
if isinstance(evt, RequestProtocolError):
killed_by_remote = evt.message
break
if killed_by_remote:
if not self.flow.error:
self.flow.error = flow.Error(killed_by_remote)
if killed_by_us or killed_by_remote:
if emit_error_hook:
yield HttpErrorHook(self.flow)
# Use the special NO_RESPONSE status code to make sure that no error message is sent to the client.
yield SendHttp(
ResponseProtocolError(self.stream_id, "killed", status_codes.NO_RESPONSE),
self.context.client
)
self._handle_event = self.state_errored
return True
return False
def handle_protocol_error(
self,
event: Union[RequestProtocolError, ResponseProtocolError]
) -> layer.CommandGenerator[None]:
is_client_error_but_we_already_talk_upstream = (
isinstance(event, RequestProtocolError)
and self.client_state in (self.state_stream_request_body, self.state_done)
and self.server_state not in (self.state_done, self.state_errored)
)
need_error_hook = not (
self.client_state in (self.state_wait_for_request_headers, self.state_errored)
or
self.server_state in (self.state_done, self.state_errored)
)
if is_client_error_but_we_already_talk_upstream:
yield SendHttp(event, self.context.server)
self.client_state = self.state_errored
if need_error_hook:
# We don't want to trigger both a response hook and an error hook,
# so we need to check if the response is done yet or not.
self.flow.error = flow.Error(event.message)
yield HttpErrorHook(self.flow)
if (yield from self.check_killed(False)):
return
if isinstance(event, ResponseProtocolError):
if self.client_state != self.state_errored:
yield SendHttp(event, self.context.client)
self.server_state = self.state_errored
def make_server_connection(self) -> layer.CommandGenerator[bool]:
connection, err = yield GetHttpConnection(
(self.flow.request.host, self.flow.request.port),
self.flow.request.scheme == "https",
self.context.server.via,
)
if err:
yield from self.handle_protocol_error(ResponseProtocolError(self.stream_id, err))
return False
else:
self.context.server = self.flow.server_conn = connection
return True
def handle_connect(self) -> layer.CommandGenerator[None]:
yield HttpConnectHook(self.flow)
if (yield from self.check_killed(False)):
return
self.context.server.address = (self.flow.request.host, self.flow.request.port)
if self.mode == HTTPMode.regular:
yield from self.handle_connect_regular()
else:
yield from self.handle_connect_upstream()
def handle_connect_regular(self):
if not self.flow.response and self.context.options.connection_strategy == "eager":
err = yield commands.OpenConnection(self.context.server)
if err:
self.flow.response = http.Response.make(
502, f"Cannot connect to {human.format_address(self.context.server.address)}: {err}"
)
self.child_layer = layer.NextLayer(self.context)
yield from self.handle_connect_finish()
def handle_connect_upstream(self):
assert self.context.server.via.scheme in ("http", "https")
http_proxy = Server(self.context.server.via.address)
stack = tunnel.LayerStack()
if self.context.server.via.scheme == "https":
http_proxy.sni = self.context.server.via.address[0]
stack /= tls.ServerTLSLayer(self.context, http_proxy)
stack /= _upstream_proxy.HttpUpstreamProxy(self.context, http_proxy, True)
self.child_layer = stack[0]
yield from self.handle_connect_finish()
def handle_connect_finish(self):
if not self.flow.response:
# Do not send any response headers as it breaks proxying non-80 ports on
# Android emulators using the -http-proxy option.
self.flow.response = http.Response(
self.flow.request.data.http_version,
200,
b"Connection established",
http.Headers(),
b"",
None,
time.time(),
time.time(),
)
if 200 <= self.flow.response.status_code < 300:
yield SendHttp(ResponseHeaders(self.stream_id, self.flow.response, True), self.context.client)
yield SendHttp(ResponseEndOfMessage(self.stream_id), self.context.client)
self.child_layer = self.child_layer or layer.NextLayer(self.context)
yield from self.child_layer.handle_event(events.Start())
self._handle_event = self.passthrough
else:
yield from self.send_response()
@expect(RequestData, RequestEndOfMessage, events.Event)
def passthrough(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.flow.response
assert self.child_layer
# HTTP events -> normal connection events
if isinstance(event, RequestData):
event = events.DataReceived(self.context.client, event.data)
elif isinstance(event, ResponseData):
event = events.DataReceived(self.context.server, event.data)
elif isinstance(event, RequestEndOfMessage):
event = events.ConnectionClosed(self.context.client)
elif isinstance(event, ResponseEndOfMessage):
event = events.ConnectionClosed(self.context.server)
for command in self.child_layer.handle_event(event):
# normal connection events -> HTTP events
if isinstance(command, commands.SendData):
if command.connection == self.context.client:
yield SendHttp(ResponseData(self.stream_id, command.data), self.context.client)
elif command.connection == self.context.server and self.flow.response.status_code == 101:
# there only is a HTTP server connection if we have switched protocols,
# not if a connection is established via CONNECT.
yield SendHttp(RequestData(self.stream_id, command.data), self.context.server)
else:
yield command
elif isinstance(command, commands.CloseConnection):
if command.connection == self.context.client:
yield SendHttp(ResponseProtocolError(self.stream_id, "EOF"), self.context.client)
elif command.connection == self.context.server and self.flow.response.status_code == 101:
yield SendHttp(RequestProtocolError(self.stream_id, "EOF"), self.context.server)
else:
# If we are running TCP over HTTP we want to be consistent with half-closes.
# The easiest approach for this is to just always full close for now.
# Alternatively, we could signal that we want a half close only through ResponseProtocolError,
# but that is more complex to implement.
command.half_close = False
yield command
else:
yield command
@expect()
def state_uninitialized(self, _) -> layer.CommandGenerator[None]:
yield from ()
@expect()
def state_done(self, _) -> layer.CommandGenerator[None]:
yield from ()
def state_errored(self, _) -> layer.CommandGenerator[None]:
# silently consume every event.
yield from ()
class HttpLayer(layer.Layer):
"""
ConnectionEvent: We have received b"GET /\r\n\r\n" from the client.
HttpEvent: We have received request headers
HttpCommand: Send request headers to X
ConnectionCommand: Send b"GET /\r\n\r\n" to server.
ConnectionEvent -> HttpEvent -> HttpCommand -> ConnectionCommand
"""
mode: HTTPMode
command_sources: Dict[commands.Command, layer.Layer]
streams: Dict[int, HttpStream]
connections: Dict[Connection, layer.Layer]
waiting_for_establishment: DefaultDict[Connection, List[GetHttpConnection]]
def __init__(self, context: Context, mode: HTTPMode):
super().__init__(context)
self.mode = mode
self.waiting_for_establishment = collections.defaultdict(list)
self.streams = {}
self.command_sources = {}
http_conn: HttpConnection
if self.context.client.alpn == b"h2":
http_conn = Http2Server(context.fork())
else:
http_conn = Http1Server(context.fork())
self.connections = {
context.client: http_conn
}
def __repr__(self):
return f"HttpLayer({self.mode.name}, conns: {len(self.connections)})"
def _handle_event(self, event: events.Event):
if isinstance(event, events.Start):
yield from self.event_to_child(self.connections[self.context.client], event)
if self.mode is HTTPMode.upstream:
self.context.server.via = server_spec.parse_with_mode(self.context.options.mode)[1]
elif isinstance(event, events.CommandCompleted):
stream = self.command_sources.pop(event.command)
yield from self.event_to_child(stream, event)
elif isinstance(event, events.ConnectionEvent):
if event.connection == self.context.server and self.context.server not in self.connections:
# We didn't do anything with this connection yet, now the peer has closed it - let's close it too!
yield commands.CloseConnection(event.connection)
else:
handler = self.connections[event.connection]
yield from self.event_to_child(handler, event)
else:
raise AssertionError(f"Unexpected event: {event}")
def event_to_child(
self,
child: Union[layer.Layer, HttpStream],
event: events.Event,
) -> layer.CommandGenerator[None]:
for command in child.handle_event(event):
assert isinstance(command, commands.Command)
# Streams may yield blocking commands, which ultimately generate CommandCompleted events.
# Those need to be routed back to the correct stream, so we need to keep track of that.
if command.blocking:
self.command_sources[command] = child
if isinstance(command, ReceiveHttp):
if isinstance(command.event, RequestHeaders):
yield from self.make_stream(command.event.stream_id)
stream = self.streams[command.event.stream_id]
yield from self.event_to_child(stream, command.event)
elif isinstance(command, SendHttp):
conn = self.connections[command.connection]
yield from self.event_to_child(conn, command.event)
elif isinstance(command, GetHttpConnection):
yield from self.get_connection(command)
elif isinstance(command, RegisterHttpConnection):
yield from self.register_connection(command)
elif isinstance(command, commands.OpenConnection):
self.connections[command.connection] = child
yield command
elif isinstance(command, commands.Command):
yield command
else:
raise AssertionError(f"Not a command: {event}")
def make_stream(self, stream_id: int) -> layer.CommandGenerator[None]:
ctx = self.context.fork()
self.streams[stream_id] = HttpStream(ctx, stream_id)
yield from self.event_to_child(self.streams[stream_id], events.Start())
def get_connection(self, event: GetHttpConnection, *, reuse: bool = True) -> layer.CommandGenerator[None]:
# Do we already have a connection we can re-use?
if reuse:
for connection in self.connections:
# see "tricky multiplexing edge case" in make_http_connection for an explanation
conn_is_pending_or_h2 = (
connection.alpn == b"h2"
or connection in self.waiting_for_establishment
)
h2_to_h1 = self.context.client.alpn == b"h2" and not conn_is_pending_or_h2
connection_suitable = (
event.connection_spec_matches(connection)
and not h2_to_h1
)
if connection_suitable:
if connection in self.waiting_for_establishment:
self.waiting_for_establishment[connection].append(event)
return
elif connection.connected:
stream = self.command_sources.pop(event)
yield from self.event_to_child(stream, GetHttpConnectionCompleted(event, (connection, None)))
return
else:
pass # the connection is at least half-closed already, we want a new one.
can_use_context_connection = (
self.context.server not in self.connections and
self.context.server.connected and
event.connection_spec_matches(self.context.server)
)
context = self.context.fork()
stack = tunnel.LayerStack()
if not can_use_context_connection:
context.server = Server(event.address)
if event.tls:
context.server.sni = event.address[0]
if event.via:
assert event.via.scheme in ("http", "https")
http_proxy = Server(event.via.address)
if event.via.scheme == "https":
http_proxy.alpn_offers = tls.HTTP_ALPNS
http_proxy.sni = event.via.address[0]
stack /= tls.ServerTLSLayer(context, http_proxy)
send_connect = not (self.mode == HTTPMode.upstream and not event.tls)
stack /= _upstream_proxy.HttpUpstreamProxy(context, http_proxy, send_connect)
if event.tls:
stack /= tls.ServerTLSLayer(context)
stack /= HttpClient(context)
self.connections[context.server] = stack[0]
self.waiting_for_establishment[context.server].append(event)
yield from self.event_to_child(stack[0], events.Start())
def register_connection(self, command: RegisterHttpConnection) -> layer.CommandGenerator[None]:
waiting = self.waiting_for_establishment.pop(command.connection)
reply: Union[Tuple[None, str], Tuple[Connection, None]]
if command.err:
reply = (None, command.err)
else:
reply = (command.connection, None)
for cmd in waiting:
stream = self.command_sources.pop(cmd)
yield from self.event_to_child(stream, GetHttpConnectionCompleted(cmd, reply))
# Somewhat ugly edge case: If we do HTTP/2 -> HTTP/1 proxying we don't want
# to handle everything over a single connection.
# Tricky multiplexing edge case: Assume we are doing HTTP/2 -> HTTP/1 proxying,
#
# that receives two responses
# that neither have a content-length specified nor a chunked transfer encoding.
# We can't process these two flows to the same h1 connection as they would both have
# "read until eof" semantics. The only workaround left is to open a separate connection for each flow.
if not command.err and self.context.client.alpn == b"h2" and command.connection.alpn != b"h2":
for cmd in waiting[1:]:
yield from self.get_connection(cmd, reuse=False)
break
class HttpClient(layer.Layer):
@expect(events.Start)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
err: Optional[str]
if self.context.server.connected:
err = None
else:
err = yield commands.OpenConnection(self.context.server)
if not err:
child_layer: layer.Layer
if self.context.server.alpn == b"h2":
child_layer = Http2Client(self.context)
else:
child_layer = Http1Client(self.context)
self._handle_event = child_layer.handle_event
yield from self._handle_event(event)
yield RegisterHttpConnection(self.context.server, err)
|
<filename>mitmproxy/proxy/layers/http/__init__.py
import collections
import enum
import time
from dataclasses import dataclass
from typing import DefaultDict, Dict, List, Optional, Tuple, Union
import wsproto.handshake
from mitmproxy import flow, http
from mitmproxy.connection import Connection, Server
from mitmproxy.net import server_spec
from mitmproxy.net.http import status_codes, url
from mitmproxy.proxy import commands, events, layer, tunnel
from mitmproxy.proxy.layers import tcp, tls, websocket
from mitmproxy.proxy.layers.http import _upstream_proxy
from mitmproxy.proxy.utils import expect
from mitmproxy.utils import human
from mitmproxy.websocket import WebSocketData
from ._base import HttpCommand, HttpConnection, ReceiveHttp, StreamId
from ._events import HttpEvent, RequestData, RequestEndOfMessage, RequestHeaders, RequestProtocolError, ResponseData, \
ResponseEndOfMessage, ResponseHeaders, ResponseProtocolError
from ._hooks import HttpConnectHook, HttpErrorHook, HttpRequestHeadersHook, HttpRequestHook, HttpResponseHeadersHook, \
HttpResponseHook
from ._http1 import Http1Client, Http1Server
from ._http2 import Http2Client, Http2Server
from ...context import Context
class HTTPMode(enum.Enum):
regular = 1
transparent = 2
upstream = 3
def validate_request(mode, request) -> Optional[str]:
if request.scheme not in ("http", "https", ""):
return f"Invalid request scheme: {request.scheme}"
if mode is HTTPMode.transparent and request.method == "CONNECT":
return (
f"mitmproxy received an HTTP CONNECT request even though it is not running in regular/upstream mode. "
f"This usually indicates a misconfiguration, please see the mitmproxy mode documentation for details."
)
return None
@dataclass
class GetHttpConnection(HttpCommand):
"""
Open an HTTP Connection. This may not actually open a connection, but return an existing HTTP connection instead.
"""
blocking = True
address: Tuple[str, int]
tls: bool
via: Optional[server_spec.ServerSpec]
def __hash__(self):
return id(self)
def connection_spec_matches(self, connection: Connection) -> bool:
return (
isinstance(connection, Server)
and
self.address == connection.address
and
self.tls == connection.tls
and
self.via == connection.via
)
@dataclass
class GetHttpConnectionCompleted(events.CommandCompleted):
command: GetHttpConnection
reply: Union[Tuple[None, str], Tuple[Connection, None]]
"""connection object, error message"""
@dataclass
class RegisterHttpConnection(HttpCommand):
"""
Register that a HTTP connection attempt has been completed.
"""
connection: Connection
err: Optional[str]
@dataclass
class SendHttp(HttpCommand):
event: HttpEvent
connection: Connection
def __repr__(self) -> str:
return f"Send({self.event})"
class HttpStream(layer.Layer):
request_body_buf: bytes
response_body_buf: bytes
flow: http.HTTPFlow
stream_id: StreamId
child_layer: Optional[layer.Layer] = None
@property
def mode(self):
i = self.context.layers.index(self)
parent: HttpLayer = self.context.layers[i - 1]
return parent.mode
def __init__(self, context: Context, stream_id: int):
super().__init__(context)
self.request_body_buf = b""
self.response_body_buf = b""
self.client_state = self.state_uninitialized
self.server_state = self.state_uninitialized
self.stream_id = stream_id
def __repr__(self):
return (
f"HttpStream("
f"id={self.stream_id}, "
f"client_state={self.client_state.__name__}, "
f"server_state={self.server_state.__name__}"
f")"
)
@expect(events.Start, HttpEvent)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.Start):
self.client_state = self.state_wait_for_request_headers
elif isinstance(event, (RequestProtocolError, ResponseProtocolError)):
yield from self.handle_protocol_error(event)
elif isinstance(event, (RequestHeaders, RequestData, RequestEndOfMessage)):
yield from self.client_state(event)
else:
yield from self.server_state(event)
@expect(RequestHeaders)
def state_wait_for_request_headers(self, event: RequestHeaders) -> layer.CommandGenerator[None]:
if not event.replay_flow:
self.flow = http.HTTPFlow(
self.context.client,
self.context.server
)
else:
self.flow = event.replay_flow
self.flow.request = event.request
if err := validate_request(self.mode, self.flow.request):
self.flow.response = http.Response.make(502, str(err))
self.client_state = self.state_errored
return (yield from self.send_response())
if self.flow.request.method == "CONNECT":
return (yield from self.handle_connect())
if self.mode is HTTPMode.transparent:
# Determine .scheme, .host and .port attributes for transparent requests
assert self.context.server.address
self.flow.request.data.host = self.context.server.address[0]
self.flow.request.data.port = self.context.server.address[1]
self.flow.request.scheme = "https" if self.context.server.tls else "http"
elif not self.flow.request.host:
# We need to extract destination information from the host header.
try:
host, port = url.parse_authority(self.flow.request.host_header or "", check=True)
except ValueError:
self.flow.response = http.Response.make(
400,
"HTTP request has no host header, destination unknown."
)
self.client_state = self.state_errored
return (yield from self.send_response())
else:
if port is None:
port = 443 if self.context.client.tls else 80
self.flow.request.data.host = host
self.flow.request.data.port = port
self.flow.request.scheme = "https" if self.context.client.tls else "http"
if self.mode is HTTPMode.regular and not self.flow.request.is_http2:
# Set the request target to origin-form for HTTP/1, some servers don't support absolute-form requests.
# see https://github.com/mitmproxy/mitmproxy/issues/1759
self.flow.request.authority = ""
# update host header in reverse proxy mode
if self.context.options.mode.startswith("reverse:") and not self.context.options.keep_host_header:
assert self.context.server.address
self.flow.request.host_header = url.hostport(
"https" if self.context.server.tls else "http",
self.context.server.address[0],
self.context.server.address[1],
)
yield HttpRequestHeadersHook(self.flow)
if (yield from self.check_killed(True)):
return
if self.flow.request.headers.get("expect", "").lower() == "100-continue":
continue_response = http.Response.make(100)
continue_response.headers.clear()
yield SendHttp(ResponseHeaders(self.stream_id, continue_response), self.context.client)
self.flow.request.headers.pop("expect")
if self.flow.request.stream:
yield from self.start_request_stream()
else:
self.client_state = self.state_consume_request_body
self.server_state = self.state_wait_for_response_headers
def start_request_stream(self) -> layer.CommandGenerator[None]:
if self.flow.response:
raise NotImplementedError("Can't set a response and enable streaming at the same time.")
yield HttpRequestHook(self.flow)
ok = yield from self.make_server_connection()
if not ok:
return
yield SendHttp(
RequestHeaders(self.stream_id, self.flow.request, end_stream=False),
self.context.server
)
self.client_state = self.state_stream_request_body
@expect(RequestData, RequestEndOfMessage)
def state_stream_request_body(self, event: Union[RequestData, RequestEndOfMessage]) -> layer.CommandGenerator[None]:
if isinstance(event, RequestData):
if callable(self.flow.request.stream):
event.data = self.flow.request.stream(event.data)
elif isinstance(event, RequestEndOfMessage):
self.flow.request.timestamp_end = time.time()
self.client_state = self.state_done
# edge case found while fuzzing:
# we may arrive here after a hook unpaused the stream,
# but the server may have sent us a RST_STREAM in the meantime.
# We need to 1) check the server state and 2) peek into the event queue to
# see if this is the case.
if self.server_state == self.state_errored:
return
for evt in self._paused_event_queue:
if isinstance(evt, ResponseProtocolError):
return
yield SendHttp(event, self.context.server)
@expect(RequestData, RequestEndOfMessage)
def state_consume_request_body(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, RequestData):
self.request_body_buf += event.data
elif isinstance(event, RequestEndOfMessage):
self.flow.request.timestamp_end = time.time()
self.flow.request.data.content = self.request_body_buf
self.request_body_buf = b""
self.client_state = self.state_done
yield HttpRequestHook(self.flow)
if (yield from self.check_killed(True)):
return
elif self.flow.response:
# response was set by an inline script.
# we now need to emulate the responseheaders hook.
self.flow.response.timestamp_start = time.time()
yield HttpResponseHeadersHook(self.flow)
if (yield from self.check_killed(True)):
return
yield from self.send_response()
else:
ok = yield from self.make_server_connection()
if not ok:
return
content = self.flow.request.raw_content
yield SendHttp(RequestHeaders(self.stream_id, self.flow.request, not content), self.context.server)
if content:
yield SendHttp(RequestData(self.stream_id, content), self.context.server)
yield SendHttp(RequestEndOfMessage(self.stream_id), self.context.server)
@expect(ResponseHeaders)
def state_wait_for_response_headers(self, event: ResponseHeaders) -> layer.CommandGenerator[None]:
self.flow.response = event.response
yield HttpResponseHeadersHook(self.flow)
if (yield from self.check_killed(True)):
return
elif self.flow.response.stream:
yield SendHttp(event, self.context.client)
self.server_state = self.state_stream_response_body
else:
self.server_state = self.state_consume_response_body
@expect(ResponseData, ResponseEndOfMessage)
def state_stream_response_body(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.flow.response
if isinstance(event, ResponseData):
if callable(self.flow.response.stream):
data = self.flow.response.stream(event.data)
else:
data = event.data
yield SendHttp(ResponseData(self.stream_id, data), self.context.client)
elif isinstance(event, ResponseEndOfMessage):
yield from self.send_response(already_streamed=True)
@expect(ResponseData, ResponseEndOfMessage)
def state_consume_response_body(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, ResponseData):
self.response_body_buf += event.data
elif isinstance(event, ResponseEndOfMessage):
assert self.flow.response
self.flow.response.data.content = self.response_body_buf
self.response_body_buf = b""
yield from self.send_response()
def send_response(self, already_streamed: bool = False):
"""We have either consumed the entire response from the server or the response was set by an addon."""
assert self.flow.response
self.flow.response.timestamp_end = time.time()
is_websocket = (
self.flow.response.status_code == 101
and
self.flow.response.headers.get("upgrade", "").lower() == "websocket"
and
self.flow.request.headers.get("Sec-WebSocket-Version", "").encode() == wsproto.handshake.WEBSOCKET_VERSION
and
self.context.options.websocket
)
if is_websocket:
# We need to set this before calling the response hook
# so that addons can determine if a WebSocket connection is following up.
self.flow.websocket = WebSocketData()
yield HttpResponseHook(self.flow)
self.server_state = self.state_done
if (yield from self.check_killed(False)):
return
if not already_streamed:
content = self.flow.response.raw_content
yield SendHttp(ResponseHeaders(self.stream_id, self.flow.response, not content), self.context.client)
if content:
yield SendHttp(ResponseData(self.stream_id, content), self.context.client)
yield SendHttp(ResponseEndOfMessage(self.stream_id), self.context.client)
if self.flow.response.status_code == 101:
if is_websocket:
self.child_layer = websocket.WebsocketLayer(self.context, self.flow)
elif self.context.options.rawtcp:
self.child_layer = tcp.TCPLayer(self.context)
else:
yield commands.Log(f"Sent HTTP 101 response, but no protocol is enabled to upgrade to.", "warn")
yield commands.CloseConnection(self.context.client)
self.client_state = self.server_state = self.state_errored
return
if self.debug:
yield commands.Log(f"{self.debug}[http] upgrading to {self.child_layer}", "debug")
yield from self.child_layer.handle_event(events.Start())
self._handle_event = self.passthrough
return
def check_killed(self, emit_error_hook: bool) -> layer.CommandGenerator[bool]:
killed_by_us = (
self.flow.error and self.flow.error.msg == flow.Error.KILLED_MESSAGE
)
# The client may have closed the connection while we were waiting for the hook to complete.
# We peek into the event queue to see if that is the case.
killed_by_remote = None
for evt in self._paused_event_queue:
if isinstance(evt, RequestProtocolError):
killed_by_remote = evt.message
break
if killed_by_remote:
if not self.flow.error:
self.flow.error = flow.Error(killed_by_remote)
if killed_by_us or killed_by_remote:
if emit_error_hook:
yield HttpErrorHook(self.flow)
# Use the special NO_RESPONSE status code to make sure that no error message is sent to the client.
yield SendHttp(
ResponseProtocolError(self.stream_id, "killed", status_codes.NO_RESPONSE),
self.context.client
)
self._handle_event = self.state_errored
return True
return False
def handle_protocol_error(
self,
event: Union[RequestProtocolError, ResponseProtocolError]
) -> layer.CommandGenerator[None]:
is_client_error_but_we_already_talk_upstream = (
isinstance(event, RequestProtocolError)
and self.client_state in (self.state_stream_request_body, self.state_done)
and self.server_state not in (self.state_done, self.state_errored)
)
need_error_hook = not (
self.client_state in (self.state_wait_for_request_headers, self.state_errored)
or
self.server_state in (self.state_done, self.state_errored)
)
if is_client_error_but_we_already_talk_upstream:
yield SendHttp(event, self.context.server)
self.client_state = self.state_errored
if need_error_hook:
# We don't want to trigger both a response hook and an error hook,
# so we need to check if the response is done yet or not.
self.flow.error = flow.Error(event.message)
yield HttpErrorHook(self.flow)
if (yield from self.check_killed(False)):
return
if isinstance(event, ResponseProtocolError):
if self.client_state != self.state_errored:
yield SendHttp(event, self.context.client)
self.server_state = self.state_errored
def make_server_connection(self) -> layer.CommandGenerator[bool]:
connection, err = yield GetHttpConnection(
(self.flow.request.host, self.flow.request.port),
self.flow.request.scheme == "https",
self.context.server.via,
)
if err:
yield from self.handle_protocol_error(ResponseProtocolError(self.stream_id, err))
return False
else:
self.context.server = self.flow.server_conn = connection
return True
def handle_connect(self) -> layer.CommandGenerator[None]:
yield HttpConnectHook(self.flow)
if (yield from self.check_killed(False)):
return
self.context.server.address = (self.flow.request.host, self.flow.request.port)
if self.mode == HTTPMode.regular:
yield from self.handle_connect_regular()
else:
yield from self.handle_connect_upstream()
def handle_connect_regular(self):
if not self.flow.response and self.context.options.connection_strategy == "eager":
err = yield commands.OpenConnection(self.context.server)
if err:
self.flow.response = http.Response.make(
502, f"Cannot connect to {human.format_address(self.context.server.address)}: {err}"
)
self.child_layer = layer.NextLayer(self.context)
yield from self.handle_connect_finish()
def handle_connect_upstream(self):
assert self.context.server.via.scheme in ("http", "https")
http_proxy = Server(self.context.server.via.address)
stack = tunnel.LayerStack()
if self.context.server.via.scheme == "https":
http_proxy.sni = self.context.server.via.address[0]
stack /= tls.ServerTLSLayer(self.context, http_proxy)
stack /= _upstream_proxy.HttpUpstreamProxy(self.context, http_proxy, True)
self.child_layer = stack[0]
yield from self.handle_connect_finish()
def handle_connect_finish(self):
if not self.flow.response:
# Do not send any response headers as it breaks proxying non-80 ports on
# Android emulators using the -http-proxy option.
self.flow.response = http.Response(
self.flow.request.data.http_version,
200,
b"Connection established",
http.Headers(),
b"",
None,
time.time(),
time.time(),
)
if 200 <= self.flow.response.status_code < 300:
yield SendHttp(ResponseHeaders(self.stream_id, self.flow.response, True), self.context.client)
yield SendHttp(ResponseEndOfMessage(self.stream_id), self.context.client)
self.child_layer = self.child_layer or layer.NextLayer(self.context)
yield from self.child_layer.handle_event(events.Start())
self._handle_event = self.passthrough
else:
yield from self.send_response()
@expect(RequestData, RequestEndOfMessage, events.Event)
def passthrough(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.flow.response
assert self.child_layer
# HTTP events -> normal connection events
if isinstance(event, RequestData):
event = events.DataReceived(self.context.client, event.data)
elif isinstance(event, ResponseData):
event = events.DataReceived(self.context.server, event.data)
elif isinstance(event, RequestEndOfMessage):
event = events.ConnectionClosed(self.context.client)
elif isinstance(event, ResponseEndOfMessage):
event = events.ConnectionClosed(self.context.server)
for command in self.child_layer.handle_event(event):
# normal connection events -> HTTP events
if isinstance(command, commands.SendData):
if command.connection == self.context.client:
yield SendHttp(ResponseData(self.stream_id, command.data), self.context.client)
elif command.connection == self.context.server and self.flow.response.status_code == 101:
# there only is a HTTP server connection if we have switched protocols,
# not if a connection is established via CONNECT.
yield SendHttp(RequestData(self.stream_id, command.data), self.context.server)
else:
yield command
elif isinstance(command, commands.CloseConnection):
if command.connection == self.context.client:
yield SendHttp(ResponseProtocolError(self.stream_id, "EOF"), self.context.client)
elif command.connection == self.context.server and self.flow.response.status_code == 101:
yield SendHttp(RequestProtocolError(self.stream_id, "EOF"), self.context.server)
else:
# If we are running TCP over HTTP we want to be consistent with half-closes.
# The easiest approach for this is to just always full close for now.
# Alternatively, we could signal that we want a half close only through ResponseProtocolError,
# but that is more complex to implement.
command.half_close = False
yield command
else:
yield command
@expect()
def state_uninitialized(self, _) -> layer.CommandGenerator[None]:
yield from ()
@expect()
def state_done(self, _) -> layer.CommandGenerator[None]:
yield from ()
def state_errored(self, _) -> layer.CommandGenerator[None]:
# silently consume every event.
yield from ()
class HttpLayer(layer.Layer):
"""
ConnectionEvent: We have received b"GET /\r\n\r\n" from the client.
HttpEvent: We have received request headers
HttpCommand: Send request headers to X
ConnectionCommand: Send b"GET /\r\n\r\n" to server.
ConnectionEvent -> HttpEvent -> HttpCommand -> ConnectionCommand
"""
mode: HTTPMode
command_sources: Dict[commands.Command, layer.Layer]
streams: Dict[int, HttpStream]
connections: Dict[Connection, layer.Layer]
waiting_for_establishment: DefaultDict[Connection, List[GetHttpConnection]]
def __init__(self, context: Context, mode: HTTPMode):
super().__init__(context)
self.mode = mode
self.waiting_for_establishment = collections.defaultdict(list)
self.streams = {}
self.command_sources = {}
http_conn: HttpConnection
if self.context.client.alpn == b"h2":
http_conn = Http2Server(context.fork())
else:
http_conn = Http1Server(context.fork())
self.connections = {
context.client: http_conn
}
def __repr__(self):
return f"HttpLayer({self.mode.name}, conns: {len(self.connections)})"
def _handle_event(self, event: events.Event):
if isinstance(event, events.Start):
yield from self.event_to_child(self.connections[self.context.client], event)
if self.mode is HTTPMode.upstream:
self.context.server.via = server_spec.parse_with_mode(self.context.options.mode)[1]
elif isinstance(event, events.CommandCompleted):
stream = self.command_sources.pop(event.command)
yield from self.event_to_child(stream, event)
elif isinstance(event, events.ConnectionEvent):
if event.connection == self.context.server and self.context.server not in self.connections:
# We didn't do anything with this connection yet, now the peer has closed it - let's close it too!
yield commands.CloseConnection(event.connection)
else:
handler = self.connections[event.connection]
yield from self.event_to_child(handler, event)
else:
raise AssertionError(f"Unexpected event: {event}")
def event_to_child(
self,
child: Union[layer.Layer, HttpStream],
event: events.Event,
) -> layer.CommandGenerator[None]:
for command in child.handle_event(event):
assert isinstance(command, commands.Command)
# Streams may yield blocking commands, which ultimately generate CommandCompleted events.
# Those need to be routed back to the correct stream, so we need to keep track of that.
if command.blocking:
self.command_sources[command] = child
if isinstance(command, ReceiveHttp):
if isinstance(command.event, RequestHeaders):
yield from self.make_stream(command.event.stream_id)
stream = self.streams[command.event.stream_id]
yield from self.event_to_child(stream, command.event)
elif isinstance(command, SendHttp):
conn = self.connections[command.connection]
yield from self.event_to_child(conn, command.event)
elif isinstance(command, GetHttpConnection):
yield from self.get_connection(command)
elif isinstance(command, RegisterHttpConnection):
yield from self.register_connection(command)
elif isinstance(command, commands.OpenConnection):
self.connections[command.connection] = child
yield command
elif isinstance(command, commands.Command):
yield command
else:
raise AssertionError(f"Not a command: {event}")
def make_stream(self, stream_id: int) -> layer.CommandGenerator[None]:
ctx = self.context.fork()
self.streams[stream_id] = HttpStream(ctx, stream_id)
yield from self.event_to_child(self.streams[stream_id], events.Start())
def get_connection(self, event: GetHttpConnection, *, reuse: bool = True) -> layer.CommandGenerator[None]:
# Do we already have a connection we can re-use?
if reuse:
for connection in self.connections:
# see "tricky multiplexing edge case" in make_http_connection for an explanation
conn_is_pending_or_h2 = (
connection.alpn == b"h2"
or connection in self.waiting_for_establishment
)
h2_to_h1 = self.context.client.alpn == b"h2" and not conn_is_pending_or_h2
connection_suitable = (
event.connection_spec_matches(connection)
and not h2_to_h1
)
if connection_suitable:
if connection in self.waiting_for_establishment:
self.waiting_for_establishment[connection].append(event)
return
elif connection.connected:
stream = self.command_sources.pop(event)
yield from self.event_to_child(stream, GetHttpConnectionCompleted(event, (connection, None)))
return
else:
pass # the connection is at least half-closed already, we want a new one.
can_use_context_connection = (
self.context.server not in self.connections and
self.context.server.connected and
event.connection_spec_matches(self.context.server)
)
context = self.context.fork()
stack = tunnel.LayerStack()
if not can_use_context_connection:
context.server = Server(event.address)
if event.tls:
context.server.sni = event.address[0]
if event.via:
assert event.via.scheme in ("http", "https")
http_proxy = Server(event.via.address)
if event.via.scheme == "https":
http_proxy.alpn_offers = tls.HTTP_ALPNS
http_proxy.sni = event.via.address[0]
stack /= tls.ServerTLSLayer(context, http_proxy)
send_connect = not (self.mode == HTTPMode.upstream and not event.tls)
stack /= _upstream_proxy.HttpUpstreamProxy(context, http_proxy, send_connect)
if event.tls:
stack /= tls.ServerTLSLayer(context)
stack /= HttpClient(context)
self.connections[context.server] = stack[0]
self.waiting_for_establishment[context.server].append(event)
yield from self.event_to_child(stack[0], events.Start())
def register_connection(self, command: RegisterHttpConnection) -> layer.CommandGenerator[None]:
waiting = self.waiting_for_establishment.pop(command.connection)
reply: Union[Tuple[None, str], Tuple[Connection, None]]
if command.err:
reply = (None, command.err)
else:
reply = (command.connection, None)
for cmd in waiting:
stream = self.command_sources.pop(cmd)
yield from self.event_to_child(stream, GetHttpConnectionCompleted(cmd, reply))
# Somewhat ugly edge case: If we do HTTP/2 -> HTTP/1 proxying we don't want
# to handle everything over a single connection.
# Tricky multiplexing edge case: Assume we are doing HTTP/2 -> HTTP/1 proxying,
#
# that receives two responses
# that neither have a content-length specified nor a chunked transfer encoding.
# We can't process these two flows to the same h1 connection as they would both have
# "read until eof" semantics. The only workaround left is to open a separate connection for each flow.
if not command.err and self.context.client.alpn == b"h2" and command.connection.alpn != b"h2":
for cmd in waiting[1:]:
yield from self.get_connection(cmd, reuse=False)
break
class HttpClient(layer.Layer):
@expect(events.Start)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
err: Optional[str]
if self.context.server.connected:
err = None
else:
err = yield commands.OpenConnection(self.context.server)
if not err:
child_layer: layer.Layer
if self.context.server.alpn == b"h2":
child_layer = Http2Client(self.context)
else:
child_layer = Http1Client(self.context)
self._handle_event = child_layer.handle_event
yield from self._handle_event(event)
yield RegisterHttpConnection(self.context.server, err)
|
en
| 0.92779
|
Open an HTTP Connection. This may not actually open a connection, but return an existing HTTP connection instead. connection object, error message Register that a HTTP connection attempt has been completed. # Determine .scheme, .host and .port attributes for transparent requests # We need to extract destination information from the host header. # Set the request target to origin-form for HTTP/1, some servers don't support absolute-form requests. # see https://github.com/mitmproxy/mitmproxy/issues/1759 # update host header in reverse proxy mode # edge case found while fuzzing: # we may arrive here after a hook unpaused the stream, # but the server may have sent us a RST_STREAM in the meantime. # We need to 1) check the server state and 2) peek into the event queue to # see if this is the case. # response was set by an inline script. # we now need to emulate the responseheaders hook. We have either consumed the entire response from the server or the response was set by an addon. # We need to set this before calling the response hook # so that addons can determine if a WebSocket connection is following up. # The client may have closed the connection while we were waiting for the hook to complete. # We peek into the event queue to see if that is the case. # Use the special NO_RESPONSE status code to make sure that no error message is sent to the client. # We don't want to trigger both a response hook and an error hook, # so we need to check if the response is done yet or not. # Do not send any response headers as it breaks proxying non-80 ports on # Android emulators using the -http-proxy option. # HTTP events -> normal connection events # normal connection events -> HTTP events # there only is a HTTP server connection if we have switched protocols, # not if a connection is established via CONNECT. # If we are running TCP over HTTP we want to be consistent with half-closes. # The easiest approach for this is to just always full close for now. # Alternatively, we could signal that we want a half close only through ResponseProtocolError, # but that is more complex to implement. # silently consume every event. ConnectionEvent: We have received b"GET /\r\n\r\n" from the client. HttpEvent: We have received request headers HttpCommand: Send request headers to X ConnectionCommand: Send b"GET /\r\n\r\n" to server. ConnectionEvent -> HttpEvent -> HttpCommand -> ConnectionCommand # We didn't do anything with this connection yet, now the peer has closed it - let's close it too! # Streams may yield blocking commands, which ultimately generate CommandCompleted events. # Those need to be routed back to the correct stream, so we need to keep track of that. # Do we already have a connection we can re-use? # see "tricky multiplexing edge case" in make_http_connection for an explanation # the connection is at least half-closed already, we want a new one. # Somewhat ugly edge case: If we do HTTP/2 -> HTTP/1 proxying we don't want # to handle everything over a single connection. # Tricky multiplexing edge case: Assume we are doing HTTP/2 -> HTTP/1 proxying, # # that receives two responses # that neither have a content-length specified nor a chunked transfer encoding. # We can't process these two flows to the same h1 connection as they would both have # "read until eof" semantics. The only workaround left is to open a separate connection for each flow.
| 2.136348
| 2
|
toscaparser/tosca_template.py
|
mikidep/tosca-parser
| 99
|
6628843
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from copy import deepcopy
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import InvalidTemplateVersion
from toscaparser.common.exception import MissingRequiredFieldError
from toscaparser.common.exception import UnknownFieldError
from toscaparser.common.exception import ValidationError
from toscaparser.elements.entity_type import update_definitions
from toscaparser.extensions.exttools import ExtTools
import toscaparser.imports
from toscaparser.prereq.csar import CSAR
from toscaparser.repositories import Repository
from toscaparser.topology_template import TopologyTemplate
from toscaparser.tpl_relationship_graph import ToscaGraph
from toscaparser.utils.gettextutils import _
import toscaparser.utils.yamlparser
# TOSCA template key names
SECTIONS = (DEFINITION_VERSION, DEFAULT_NAMESPACE, TEMPLATE_NAME,
TOPOLOGY_TEMPLATE, TEMPLATE_AUTHOR, TEMPLATE_VERSION,
DESCRIPTION, IMPORTS, DSL_DEFINITIONS, NODE_TYPES,
RELATIONSHIP_TYPES, RELATIONSHIP_TEMPLATES,
CAPABILITY_TYPES, ARTIFACT_TYPES, DATA_TYPES, INTERFACE_TYPES,
POLICY_TYPES, GROUP_TYPES, REPOSITORIES) = \
('tosca_definitions_version', 'tosca_default_namespace',
'template_name', 'topology_template', 'template_author',
'template_version', 'description', 'imports', 'dsl_definitions',
'node_types', 'relationship_types', 'relationship_templates',
'capability_types', 'artifact_types', 'data_types',
'interface_types', 'policy_types', 'group_types', 'repositories')
# Sections that are specific to individual template definitions
SPECIAL_SECTIONS = (METADATA) = ('metadata')
log = logging.getLogger("tosca.model")
YAML_LOADER = toscaparser.utils.yamlparser.load_yaml
class ToscaTemplate(object):
exttools = ExtTools()
MAIN_TEMPLATE_VERSIONS = ['tosca_simple_yaml_1_0',
'tosca_simple_yaml_1_2']
VALID_TEMPLATE_VERSIONS = MAIN_TEMPLATE_VERSIONS + exttools.get_versions()
ADDITIONAL_SECTIONS = {'tosca_simple_yaml_1_0': SPECIAL_SECTIONS,
'tosca_simple_yaml_1_2': SPECIAL_SECTIONS}
ADDITIONAL_SECTIONS.update(exttools.get_sections())
'''Load the template data.'''
def __init__(self, path=None, parsed_params=None, a_file=True,
yaml_dict_tpl=None):
ExceptionCollector.start()
self.a_file = a_file
self.input_path = None
self.path = None
self.tpl = None
self.nested_tosca_tpls_with_topology = {}
self.nested_tosca_templates_with_topology = []
if path:
self.input_path = path
self.path = self._get_path(path)
if self.path:
self.tpl = YAML_LOADER(self.path, self.a_file)
if yaml_dict_tpl:
msg = (_('Both path and yaml_dict_tpl arguments were '
'provided. Using path and ignoring yaml_dict_tpl.'))
log.info(msg)
print(msg)
else:
if yaml_dict_tpl:
self.tpl = yaml_dict_tpl
else:
ExceptionCollector.appendException(
ValueError(_('No path or yaml_dict_tpl was provided. '
'There is nothing to parse.')))
if self.tpl:
self.parsed_params = parsed_params
self._validate_field()
self.version = self._tpl_version()
self.relationship_types = self._tpl_relationship_types()
self.description = self._tpl_description()
self.topology_template = self._topology_template()
self.repositories = self._tpl_repositories()
if self.topology_template.tpl:
self.inputs = self._inputs()
self.relationship_templates = self._relationship_templates()
self.nodetemplates = self._nodetemplates()
self.outputs = self._outputs()
self.policies = self._policies()
self._handle_nested_tosca_templates_with_topology()
self.graph = ToscaGraph(self.nodetemplates)
ExceptionCollector.stop()
self.verify_template()
def _topology_template(self):
return TopologyTemplate(self._tpl_topology_template(),
self._get_all_custom_defs(),
self.relationship_types,
self.parsed_params,
None)
def _inputs(self):
return self.topology_template.inputs
def _nodetemplates(self):
return self.topology_template.nodetemplates
def _relationship_templates(self):
return self.topology_template.relationship_templates
def _outputs(self):
return self.topology_template.outputs
def _tpl_version(self):
return self.tpl.get(DEFINITION_VERSION)
def _tpl_description(self):
desc = self.tpl.get(DESCRIPTION)
if desc:
return desc.rstrip()
def _tpl_imports(self):
return self.tpl.get(IMPORTS)
def _tpl_repositories(self):
repositories = self.tpl.get(REPOSITORIES)
reposit = []
if repositories:
for name, val in repositories.items():
reposits = Repository(name, val)
reposit.append(reposits)
return reposit
def _tpl_relationship_types(self):
custom_rel, _ = self._get_custom_types(RELATIONSHIP_TYPES)
return custom_rel
def _tpl_relationship_templates(self):
topology_template = self._tpl_topology_template()
return topology_template.get(RELATIONSHIP_TEMPLATES)
def _tpl_topology_template(self):
return self.tpl.get(TOPOLOGY_TEMPLATE)
def _policies(self):
return self.topology_template.policies
def _get_all_custom_defs(self, imports=None, path=None):
types = [IMPORTS, NODE_TYPES, CAPABILITY_TYPES, RELATIONSHIP_TYPES,
DATA_TYPES, INTERFACE_TYPES, POLICY_TYPES, GROUP_TYPES]
custom_defs_final = {}
custom_defs, nested_imports = self._get_custom_types(
types, imports, path)
if custom_defs:
custom_defs_final.update(custom_defs)
if nested_imports:
for a_file, nested_import in nested_imports.items():
import_defs = self._get_all_custom_defs(
nested_import, a_file)
custom_defs_final.update(import_defs)
# As imports are not custom_types, removing from the dict
custom_defs_final.pop(IMPORTS, None)
return custom_defs_final
def _get_custom_types(self, type_definitions, imports=None,
path=None):
"""Handle custom types defined in imported template files
This method loads the custom type definitions referenced in "imports"
section of the TOSCA YAML template.
"""
custom_defs = {}
nested_imports = None
type_defs = []
if not isinstance(type_definitions, list):
type_defs.append(type_definitions)
else:
type_defs = type_definitions
if not imports:
imports = self._tpl_imports()
if not path:
path = self.path
if imports:
custom_service = toscaparser.imports.\
ImportsLoader(imports, path, type_defs, self.tpl)
nested_tosca_tpls = custom_service.get_nested_tosca_tpls()
self._update_nested_tosca_tpls_with_topology(nested_tosca_tpls)
nested_imports = custom_service.get_nested_imports()
custom_defs = custom_service.get_custom_defs()
if not custom_defs:
return None, None
# Handle custom types defined in current template file
for type_def in type_defs:
if type_def != IMPORTS:
inner_custom_types = self.tpl.get(type_def) or {}
if inner_custom_types:
custom_defs.update(inner_custom_types)
return custom_defs, nested_imports
def _update_nested_tosca_tpls_with_topology(self, nested_tosca_tpls):
for tpl in nested_tosca_tpls:
filename, tosca_tpl = list(tpl.items())[0]
if (tosca_tpl.get(TOPOLOGY_TEMPLATE) and
filename not in list(
self.nested_tosca_tpls_with_topology.keys())):
self.nested_tosca_tpls_with_topology.update(tpl)
def _handle_nested_tosca_templates_with_topology(self):
for fname, tosca_tpl in self.nested_tosca_tpls_with_topology.items():
for nodetemplate in self.nodetemplates:
if self._is_sub_mapped_node(nodetemplate, tosca_tpl):
parsed_params = self._get_params_for_nested_template(
nodetemplate)
topology_tpl = tosca_tpl.get(TOPOLOGY_TEMPLATE)
topology_with_sub_mapping = TopologyTemplate(
topology_tpl,
self._get_all_custom_defs(),
self.relationship_types,
parsed_params,
nodetemplate)
if topology_with_sub_mapping.substitution_mappings:
# Record nested topo templates in top level template
self.nested_tosca_templates_with_topology.\
append(topology_with_sub_mapping)
# Set substitution mapping object for mapped node
nodetemplate.sub_mapping_tosca_template = \
topology_with_sub_mapping.substitution_mappings
def _validate_field(self):
version = self._tpl_version()
if not version:
ExceptionCollector.appendException(
MissingRequiredFieldError(what='Template',
required=DEFINITION_VERSION))
else:
self._validate_version(version)
self.version = version
for name in self.tpl:
if (name not in SECTIONS and
name not in self.ADDITIONAL_SECTIONS.get(version, ())):
ExceptionCollector.appendException(
UnknownFieldError(what='Template', field=name))
def _validate_version(self, version):
if version not in self.VALID_TEMPLATE_VERSIONS:
ExceptionCollector.appendException(
InvalidTemplateVersion(
what=version,
valid_versions='", "'. join(self.VALID_TEMPLATE_VERSIONS)))
else:
if version not in self.MAIN_TEMPLATE_VERSIONS:
update_definitions(version)
def _get_path(self, path):
if path.lower().endswith('.yaml') or path.lower().endswith('.yml'):
return path
elif path.lower().endswith(('.zip', '.csar')):
# a CSAR archive
csar = CSAR(path, self.a_file)
if csar.validate():
csar.decompress()
self.a_file = True # the file has been decompressed locally
return os.path.join(csar.temp_dir, csar.get_main_template())
else:
ExceptionCollector.appendException(
ValueError(_('"%(path)s" is not a valid file.')
% {'path': path}))
def verify_template(self):
if ExceptionCollector.exceptionsCaught():
if self.input_path:
raise ValidationError(
message=(_('\nThe input "%(path)s" failed validation with '
'the following error(s): \n\n\t')
% {'path': self.input_path}) +
'\n\t'.join(ExceptionCollector.getExceptionsReport()))
else:
raise ValidationError(
message=_('\nThe pre-parsed input failed validation with '
'the following error(s): \n\n\t') +
'\n\t'.join(ExceptionCollector.getExceptionsReport()))
else:
if self.input_path:
msg = (_('The input "%(path)s" successfully passed '
'validation.') % {'path': self.input_path})
else:
msg = _('The pre-parsed input successfully passed validation.')
log.info(msg)
def _is_sub_mapped_node(self, nodetemplate, tosca_tpl):
"""Return True if the nodetemple is substituted."""
# NOTE(ueha): Since condition "not nodetemplate.sub_mapping_tosca_\
# template" was deleted as a fix for bug/1883220, there is
# some possibility of breaking something on translator side
# that current tests not coverd.
# And this enhancement does not align with TOSCA standard
# but needed for ETSI NFV-SOL 001.
if (nodetemplate and
self.get_sub_mapping_node_type(tosca_tpl) == nodetemplate.type
and len(nodetemplate.interfaces) < 1):
return True
else:
return False
def _get_params_for_nested_template(self, nodetemplate):
"""Return total params for nested_template."""
parsed_params = deepcopy(self.parsed_params) \
if self.parsed_params else {}
if nodetemplate:
for pname in nodetemplate.get_properties():
parsed_params.update({pname:
nodetemplate.get_property_value(pname)})
return parsed_params
def get_sub_mapping_node_type(self, tosca_tpl):
"""Return substitution mappings node type."""
if tosca_tpl:
return TopologyTemplate.get_sub_mapping_node_type(
tosca_tpl.get(TOPOLOGY_TEMPLATE))
def _has_substitution_mappings(self):
"""Return True if the template has valid substitution mappings."""
return self.topology_template is not None and \
self.topology_template.substitution_mappings is not None
def has_nested_templates(self):
"""Return True if the tosca template has nested templates."""
return self.nested_tosca_templates_with_topology is not None and \
len(self.nested_tosca_templates_with_topology) >= 1
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from copy import deepcopy
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import InvalidTemplateVersion
from toscaparser.common.exception import MissingRequiredFieldError
from toscaparser.common.exception import UnknownFieldError
from toscaparser.common.exception import ValidationError
from toscaparser.elements.entity_type import update_definitions
from toscaparser.extensions.exttools import ExtTools
import toscaparser.imports
from toscaparser.prereq.csar import CSAR
from toscaparser.repositories import Repository
from toscaparser.topology_template import TopologyTemplate
from toscaparser.tpl_relationship_graph import ToscaGraph
from toscaparser.utils.gettextutils import _
import toscaparser.utils.yamlparser
# TOSCA template key names
SECTIONS = (DEFINITION_VERSION, DEFAULT_NAMESPACE, TEMPLATE_NAME,
TOPOLOGY_TEMPLATE, TEMPLATE_AUTHOR, TEMPLATE_VERSION,
DESCRIPTION, IMPORTS, DSL_DEFINITIONS, NODE_TYPES,
RELATIONSHIP_TYPES, RELATIONSHIP_TEMPLATES,
CAPABILITY_TYPES, ARTIFACT_TYPES, DATA_TYPES, INTERFACE_TYPES,
POLICY_TYPES, GROUP_TYPES, REPOSITORIES) = \
('tosca_definitions_version', 'tosca_default_namespace',
'template_name', 'topology_template', 'template_author',
'template_version', 'description', 'imports', 'dsl_definitions',
'node_types', 'relationship_types', 'relationship_templates',
'capability_types', 'artifact_types', 'data_types',
'interface_types', 'policy_types', 'group_types', 'repositories')
# Sections that are specific to individual template definitions
SPECIAL_SECTIONS = (METADATA) = ('metadata')
log = logging.getLogger("tosca.model")
YAML_LOADER = toscaparser.utils.yamlparser.load_yaml
class ToscaTemplate(object):
exttools = ExtTools()
MAIN_TEMPLATE_VERSIONS = ['tosca_simple_yaml_1_0',
'tosca_simple_yaml_1_2']
VALID_TEMPLATE_VERSIONS = MAIN_TEMPLATE_VERSIONS + exttools.get_versions()
ADDITIONAL_SECTIONS = {'tosca_simple_yaml_1_0': SPECIAL_SECTIONS,
'tosca_simple_yaml_1_2': SPECIAL_SECTIONS}
ADDITIONAL_SECTIONS.update(exttools.get_sections())
'''Load the template data.'''
def __init__(self, path=None, parsed_params=None, a_file=True,
yaml_dict_tpl=None):
ExceptionCollector.start()
self.a_file = a_file
self.input_path = None
self.path = None
self.tpl = None
self.nested_tosca_tpls_with_topology = {}
self.nested_tosca_templates_with_topology = []
if path:
self.input_path = path
self.path = self._get_path(path)
if self.path:
self.tpl = YAML_LOADER(self.path, self.a_file)
if yaml_dict_tpl:
msg = (_('Both path and yaml_dict_tpl arguments were '
'provided. Using path and ignoring yaml_dict_tpl.'))
log.info(msg)
print(msg)
else:
if yaml_dict_tpl:
self.tpl = yaml_dict_tpl
else:
ExceptionCollector.appendException(
ValueError(_('No path or yaml_dict_tpl was provided. '
'There is nothing to parse.')))
if self.tpl:
self.parsed_params = parsed_params
self._validate_field()
self.version = self._tpl_version()
self.relationship_types = self._tpl_relationship_types()
self.description = self._tpl_description()
self.topology_template = self._topology_template()
self.repositories = self._tpl_repositories()
if self.topology_template.tpl:
self.inputs = self._inputs()
self.relationship_templates = self._relationship_templates()
self.nodetemplates = self._nodetemplates()
self.outputs = self._outputs()
self.policies = self._policies()
self._handle_nested_tosca_templates_with_topology()
self.graph = ToscaGraph(self.nodetemplates)
ExceptionCollector.stop()
self.verify_template()
def _topology_template(self):
return TopologyTemplate(self._tpl_topology_template(),
self._get_all_custom_defs(),
self.relationship_types,
self.parsed_params,
None)
def _inputs(self):
return self.topology_template.inputs
def _nodetemplates(self):
return self.topology_template.nodetemplates
def _relationship_templates(self):
return self.topology_template.relationship_templates
def _outputs(self):
return self.topology_template.outputs
def _tpl_version(self):
return self.tpl.get(DEFINITION_VERSION)
def _tpl_description(self):
desc = self.tpl.get(DESCRIPTION)
if desc:
return desc.rstrip()
def _tpl_imports(self):
return self.tpl.get(IMPORTS)
def _tpl_repositories(self):
repositories = self.tpl.get(REPOSITORIES)
reposit = []
if repositories:
for name, val in repositories.items():
reposits = Repository(name, val)
reposit.append(reposits)
return reposit
def _tpl_relationship_types(self):
custom_rel, _ = self._get_custom_types(RELATIONSHIP_TYPES)
return custom_rel
def _tpl_relationship_templates(self):
topology_template = self._tpl_topology_template()
return topology_template.get(RELATIONSHIP_TEMPLATES)
def _tpl_topology_template(self):
return self.tpl.get(TOPOLOGY_TEMPLATE)
def _policies(self):
return self.topology_template.policies
def _get_all_custom_defs(self, imports=None, path=None):
types = [IMPORTS, NODE_TYPES, CAPABILITY_TYPES, RELATIONSHIP_TYPES,
DATA_TYPES, INTERFACE_TYPES, POLICY_TYPES, GROUP_TYPES]
custom_defs_final = {}
custom_defs, nested_imports = self._get_custom_types(
types, imports, path)
if custom_defs:
custom_defs_final.update(custom_defs)
if nested_imports:
for a_file, nested_import in nested_imports.items():
import_defs = self._get_all_custom_defs(
nested_import, a_file)
custom_defs_final.update(import_defs)
# As imports are not custom_types, removing from the dict
custom_defs_final.pop(IMPORTS, None)
return custom_defs_final
def _get_custom_types(self, type_definitions, imports=None,
path=None):
"""Handle custom types defined in imported template files
This method loads the custom type definitions referenced in "imports"
section of the TOSCA YAML template.
"""
custom_defs = {}
nested_imports = None
type_defs = []
if not isinstance(type_definitions, list):
type_defs.append(type_definitions)
else:
type_defs = type_definitions
if not imports:
imports = self._tpl_imports()
if not path:
path = self.path
if imports:
custom_service = toscaparser.imports.\
ImportsLoader(imports, path, type_defs, self.tpl)
nested_tosca_tpls = custom_service.get_nested_tosca_tpls()
self._update_nested_tosca_tpls_with_topology(nested_tosca_tpls)
nested_imports = custom_service.get_nested_imports()
custom_defs = custom_service.get_custom_defs()
if not custom_defs:
return None, None
# Handle custom types defined in current template file
for type_def in type_defs:
if type_def != IMPORTS:
inner_custom_types = self.tpl.get(type_def) or {}
if inner_custom_types:
custom_defs.update(inner_custom_types)
return custom_defs, nested_imports
def _update_nested_tosca_tpls_with_topology(self, nested_tosca_tpls):
for tpl in nested_tosca_tpls:
filename, tosca_tpl = list(tpl.items())[0]
if (tosca_tpl.get(TOPOLOGY_TEMPLATE) and
filename not in list(
self.nested_tosca_tpls_with_topology.keys())):
self.nested_tosca_tpls_with_topology.update(tpl)
def _handle_nested_tosca_templates_with_topology(self):
for fname, tosca_tpl in self.nested_tosca_tpls_with_topology.items():
for nodetemplate in self.nodetemplates:
if self._is_sub_mapped_node(nodetemplate, tosca_tpl):
parsed_params = self._get_params_for_nested_template(
nodetemplate)
topology_tpl = tosca_tpl.get(TOPOLOGY_TEMPLATE)
topology_with_sub_mapping = TopologyTemplate(
topology_tpl,
self._get_all_custom_defs(),
self.relationship_types,
parsed_params,
nodetemplate)
if topology_with_sub_mapping.substitution_mappings:
# Record nested topo templates in top level template
self.nested_tosca_templates_with_topology.\
append(topology_with_sub_mapping)
# Set substitution mapping object for mapped node
nodetemplate.sub_mapping_tosca_template = \
topology_with_sub_mapping.substitution_mappings
def _validate_field(self):
version = self._tpl_version()
if not version:
ExceptionCollector.appendException(
MissingRequiredFieldError(what='Template',
required=DEFINITION_VERSION))
else:
self._validate_version(version)
self.version = version
for name in self.tpl:
if (name not in SECTIONS and
name not in self.ADDITIONAL_SECTIONS.get(version, ())):
ExceptionCollector.appendException(
UnknownFieldError(what='Template', field=name))
def _validate_version(self, version):
if version not in self.VALID_TEMPLATE_VERSIONS:
ExceptionCollector.appendException(
InvalidTemplateVersion(
what=version,
valid_versions='", "'. join(self.VALID_TEMPLATE_VERSIONS)))
else:
if version not in self.MAIN_TEMPLATE_VERSIONS:
update_definitions(version)
def _get_path(self, path):
if path.lower().endswith('.yaml') or path.lower().endswith('.yml'):
return path
elif path.lower().endswith(('.zip', '.csar')):
# a CSAR archive
csar = CSAR(path, self.a_file)
if csar.validate():
csar.decompress()
self.a_file = True # the file has been decompressed locally
return os.path.join(csar.temp_dir, csar.get_main_template())
else:
ExceptionCollector.appendException(
ValueError(_('"%(path)s" is not a valid file.')
% {'path': path}))
def verify_template(self):
if ExceptionCollector.exceptionsCaught():
if self.input_path:
raise ValidationError(
message=(_('\nThe input "%(path)s" failed validation with '
'the following error(s): \n\n\t')
% {'path': self.input_path}) +
'\n\t'.join(ExceptionCollector.getExceptionsReport()))
else:
raise ValidationError(
message=_('\nThe pre-parsed input failed validation with '
'the following error(s): \n\n\t') +
'\n\t'.join(ExceptionCollector.getExceptionsReport()))
else:
if self.input_path:
msg = (_('The input "%(path)s" successfully passed '
'validation.') % {'path': self.input_path})
else:
msg = _('The pre-parsed input successfully passed validation.')
log.info(msg)
def _is_sub_mapped_node(self, nodetemplate, tosca_tpl):
"""Return True if the nodetemple is substituted."""
# NOTE(ueha): Since condition "not nodetemplate.sub_mapping_tosca_\
# template" was deleted as a fix for bug/1883220, there is
# some possibility of breaking something on translator side
# that current tests not coverd.
# And this enhancement does not align with TOSCA standard
# but needed for ETSI NFV-SOL 001.
if (nodetemplate and
self.get_sub_mapping_node_type(tosca_tpl) == nodetemplate.type
and len(nodetemplate.interfaces) < 1):
return True
else:
return False
def _get_params_for_nested_template(self, nodetemplate):
"""Return total params for nested_template."""
parsed_params = deepcopy(self.parsed_params) \
if self.parsed_params else {}
if nodetemplate:
for pname in nodetemplate.get_properties():
parsed_params.update({pname:
nodetemplate.get_property_value(pname)})
return parsed_params
def get_sub_mapping_node_type(self, tosca_tpl):
"""Return substitution mappings node type."""
if tosca_tpl:
return TopologyTemplate.get_sub_mapping_node_type(
tosca_tpl.get(TOPOLOGY_TEMPLATE))
def _has_substitution_mappings(self):
"""Return True if the template has valid substitution mappings."""
return self.topology_template is not None and \
self.topology_template.substitution_mappings is not None
def has_nested_templates(self):
"""Return True if the tosca template has nested templates."""
return self.nested_tosca_templates_with_topology is not None and \
len(self.nested_tosca_templates_with_topology) >= 1
|
en
| 0.779422
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TOSCA template key names # Sections that are specific to individual template definitions Load the template data. # As imports are not custom_types, removing from the dict Handle custom types defined in imported template files This method loads the custom type definitions referenced in "imports" section of the TOSCA YAML template. # Handle custom types defined in current template file # Record nested topo templates in top level template # Set substitution mapping object for mapped node # a CSAR archive # the file has been decompressed locally Return True if the nodetemple is substituted. # NOTE(ueha): Since condition "not nodetemplate.sub_mapping_tosca_\ # template" was deleted as a fix for bug/1883220, there is # some possibility of breaking something on translator side # that current tests not coverd. # And this enhancement does not align with TOSCA standard # but needed for ETSI NFV-SOL 001. Return total params for nested_template. Return substitution mappings node type. Return True if the template has valid substitution mappings. Return True if the tosca template has nested templates.
| 1.441108
| 1
|
app/models/dashboard.py
|
mateusvarelo/viz_web
| 0
|
6628844
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from .plotagem import cria_fig_vertical_barra,cores_padrao,cria_fig_horizontal_barra,cria_grafico_linha_youtube
"""Nesse modulo será possivel criar um layout para a pagina do dashboard"""
def init_dashboard(server):
"""Create a Plotly Dash dashboard como um servidor dash."""
"""Estilo css para a pagina"""
external_stylesheets = ['../static/css/bootstrap.min.css']
#Inicializando App dash
dash_app = dash.Dash(
server=server,
routes_pathname_prefix='/dashboard/',
external_stylesheets= external_stylesheets
)
"""Definição de cores padrão"""
colors = cores_padrao()
"""Create Dash Layout"""
"""Gráfico de barra ertical com dados de genêros, primeiros 7 generos mais ouvidos"""
"""Gráficos pronto para inserir no layout """
fig_bar_vertical = cria_fig_vertical_barra()
fig_barh_horizontal = cria_fig_horizontal_barra()
fig_line_youtube = cria_grafico_linha_youtube()
"""Criando layout para pagina"""
dash_app.layout = html.Div(
style={'backgroundColor': colors['background']},
children=[
html.H1(
children='Dashboard :)',
style={
'textAlign': 'left',
'color': colors['text']
}
),
html.H4(
children='Vamos ver o que diz os dados de utilização das plataformas youtube e spotify sobre Mateus :).',
style={
'textAlign': 'left',
'color': colors['text']
}
),
html.Div(children='Dash:Confira dados do Mateus em 3 gráficos.',
style={
'textAlign': 'left',
'color': colors['text']
}),
dcc.Graph(
id='example-graph-1',
figure=fig_barh_horizontal
),
dcc.Graph(
id='example-graph-2',
figure=fig_bar_vertical
),
dcc.Graph(
id='example-graph-',
figure=fig_line_youtube
),
html.A(children='Voltar para início',
href = "https://vizwebdash.herokuapp.com/"
),
]
)
return dash_app.server
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from .plotagem import cria_fig_vertical_barra,cores_padrao,cria_fig_horizontal_barra,cria_grafico_linha_youtube
"""Nesse modulo será possivel criar um layout para a pagina do dashboard"""
def init_dashboard(server):
"""Create a Plotly Dash dashboard como um servidor dash."""
"""Estilo css para a pagina"""
external_stylesheets = ['../static/css/bootstrap.min.css']
#Inicializando App dash
dash_app = dash.Dash(
server=server,
routes_pathname_prefix='/dashboard/',
external_stylesheets= external_stylesheets
)
"""Definição de cores padrão"""
colors = cores_padrao()
"""Create Dash Layout"""
"""Gráfico de barra ertical com dados de genêros, primeiros 7 generos mais ouvidos"""
"""Gráficos pronto para inserir no layout """
fig_bar_vertical = cria_fig_vertical_barra()
fig_barh_horizontal = cria_fig_horizontal_barra()
fig_line_youtube = cria_grafico_linha_youtube()
"""Criando layout para pagina"""
dash_app.layout = html.Div(
style={'backgroundColor': colors['background']},
children=[
html.H1(
children='Dashboard :)',
style={
'textAlign': 'left',
'color': colors['text']
}
),
html.H4(
children='Vamos ver o que diz os dados de utilização das plataformas youtube e spotify sobre Mateus :).',
style={
'textAlign': 'left',
'color': colors['text']
}
),
html.Div(children='Dash:Confira dados do Mateus em 3 gráficos.',
style={
'textAlign': 'left',
'color': colors['text']
}),
dcc.Graph(
id='example-graph-1',
figure=fig_barh_horizontal
),
dcc.Graph(
id='example-graph-2',
figure=fig_bar_vertical
),
dcc.Graph(
id='example-graph-',
figure=fig_line_youtube
),
html.A(children='Voltar para início',
href = "https://vizwebdash.herokuapp.com/"
),
]
)
return dash_app.server
|
pt
| 0.878535
|
Nesse modulo será possivel criar um layout para a pagina do dashboard Create a Plotly Dash dashboard como um servidor dash. Estilo css para a pagina #Inicializando App dash Definição de cores padrão Create Dash Layout Gráfico de barra ertical com dados de genêros, primeiros 7 generos mais ouvidos Gráficos pronto para inserir no layout Criando layout para pagina
| 3.064654
| 3
|
src/reinforce.py
|
jarbus/hackRPI2019
| 0
|
6628845
|
<gh_stars>0
""" Policy Gradient optimizer for the Recovery environment, based on https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5#file-pg-pong-py-L68 """
import gym
import numpy as np
# Model Parameters
hidden_n = 4
input_n = 9
output_n = 2
def sigmoid(x: np.array) -> np.array:
return 1.0/(1.0 + np.exp(-x))
class ReinforceAgent:
def __init__(self, *,
render: bool = False,
batch_size: int = 5,
episode_length: int = 300,
learning_rate: float = 1e-4,
gamma: float = 0.99,
decay_rate: float = 0.99):
"""
Optimization Parameters
learning_rate
gamma - Reduced effect of reward on earlier actions
decay_rate - RMSProp decay factor """
self.render = render
self.batch_size = batch_size
self.episode_length = episode_length
self.learning_rate = learning_rate
self.gamma = gamma
self.decay_rate = decay_rate
# Xavier initial weights
self.model = {
"w1": np.random.randn(hidden_n, input_n) / np.sqrt(input_n),
"w2": np.random.randn(output_n, hidden_n) / np.sqrt(hidden_n)
}
def train(self, env: gym.Env):
""" Trains model using passed environment. """
episode = 0
batch_counter = 0
gradient_buffer = { k: np.zeros_like(v) for k,v in self.model }
rmsprop_cache = { k : np.zeros_like(v) for k,v in self.model }
recent_reward = 0
self.toFile("results-%d.txt" % episode)
while True: # Continuously train over batches of 5 episodes
reward_sum = 0
obs = env.reset() # np.array of [x = params, y = agents]
drone_count = obs.shape()[0]
xs = [] # An np array per tick (x = params, y = agents)
hs = [] # An np array per tick (x = agents, y = neurons)
dlogps1 = [] # Per tick, the action each agent took
dlogps2 = []
drs = []
for tick in range(self.episode_length):
if self.render: env.render()
actions, hidden = self.act(obs)
# Record information for backprop
xs.append(obs)
hs.append(hidden)
dlogps1.append([1 - x if x >= 0.5 else -x for x in actions[0,]])
dlogps2.append([1 - x if x >= 0.5 else -x for x in actions[1,]])
obs, reward, done, _ = env.step(2*actions - 1)
recent_reward = reward
drs.append(reward) # We'll need to copy this for each agent.
episode += 1
batch_counter += 1
discount_rewards = self._discount_reward(drs)
discount_rewards -= np.mean(discount_rewards)
discount_rewards /= np.std(discount_rewards)
states = np.stack(xs, axis=2)
hidden_stack = np.stack(hs, axis=2)
dlogps1_stack = np.array(dlogps1)
dlogps2_stack = np.array(dlogps2)
# Generate a gradient per agent
for i in range(self.drone_count):
episode_states = states[:,i,:]
episode_hidden = hidden_stack[i,:,:]
episode_dlogp1 = dlogps1_stack[:,i]
episode_dlogp2 = dlogps2_stack[:,i]
episode_dlogp1 *= discount_rewards
episode_dlogp2 *= discount_rewards
grad1W1, grad1W2 = self._backprop(episode_states, episode_hidden, episode_dlogp1)
grad2W1, grad2W2 = self._backprop(episode_states, episode_hidden, episode_dlogp2)
gradient_buffer["w1"] += (grad1W1 + grad2W1)/2
gradient_buffer["w2"] += (grad1W2 + grad2W2)/2
if batch_counter == self.batch_size:
for k,v in self.model:
rmsprop_cache[k] = self.decay_rate * rmsprop_cache[k] + (1 - self.decay_rate) * gradient_buffer[k]**2
self.model[k] += self.learning_rate*gradient_buffer[k] / np.sqrt(rmsprop_cache[k] + 1e-5)
gradient_buffer[k] = np.zeros_like(v)
print("Iteration %d: batch update, most recent reward: %f" % (episode, recent_reward))
batch_counter = 0
if episode % 100 == 0:
print("Writting to file %d..." % episode)
self.toFile("results-%d.txt" % episode)
print("Done.")
reward_sum = 0
obs = env.reset()
def _discount_reward(self, rewards: np.array) -> np.array:
""" Reduces the magnitude of the reward for the earlier actions. """
discount_rewards = np.zeros_like(rewards)
summation = 0
for tick in range(len(rewards), 0, -1):
summation = summation * self.gamma + rewards[tick]
discount_rewards[tick] = summation
return discount_rewards
def act(self, x: np.array) -> np.array:
""" @param x: np.array (size x = params, y = agents)
Returns np.array (size x = agents, y = 2) of how each agent moves and
an np.array (size x = agents, y = hidden neurons)"""
hidden = np.dot(self.model["w1"], np.transpose(x))
hidden[hidden < 0] = 0 # Apply lower bound to values (ReLU)
signal = np.dot(self.model["w2"], hidden)
return sigmoid(signal), hidden
def _backprop(self, *,
episode_dprob: np.array,
episode_stack: np.array,
episode_states: np.array) -> np.array:
""" Returns gradient for weights, ordered dW1, dW2. Does so for one agent. """
dW2 = np.dot(episode_stack.T, episode_dprob).ravel()
dh = np.outer(episode_dprob, self.model["w2"])
dh[episode_stack < 0] = 0 # (PreLu)
dW1 = np.dot(dh.T, episode_states)
return dW1, dW2
def toFile(self, name: str):
f = open(name, "a")
f.write("w1\n")
for x in range(input_n):
for y in range(hidden_n):
f.write("%d " % self.model["w1"][y,x])
f.write("\n")
f.write("w2\n")
for x in range(hidden_n):
for y in range(output_n):
f.write("%d " % self.model["w2"][y,x])
f.write("\n")
f.close()
|
""" Policy Gradient optimizer for the Recovery environment, based on https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5#file-pg-pong-py-L68 """
import gym
import numpy as np
# Model Parameters
hidden_n = 4
input_n = 9
output_n = 2
def sigmoid(x: np.array) -> np.array:
return 1.0/(1.0 + np.exp(-x))
class ReinforceAgent:
def __init__(self, *,
render: bool = False,
batch_size: int = 5,
episode_length: int = 300,
learning_rate: float = 1e-4,
gamma: float = 0.99,
decay_rate: float = 0.99):
"""
Optimization Parameters
learning_rate
gamma - Reduced effect of reward on earlier actions
decay_rate - RMSProp decay factor """
self.render = render
self.batch_size = batch_size
self.episode_length = episode_length
self.learning_rate = learning_rate
self.gamma = gamma
self.decay_rate = decay_rate
# Xavier initial weights
self.model = {
"w1": np.random.randn(hidden_n, input_n) / np.sqrt(input_n),
"w2": np.random.randn(output_n, hidden_n) / np.sqrt(hidden_n)
}
def train(self, env: gym.Env):
""" Trains model using passed environment. """
episode = 0
batch_counter = 0
gradient_buffer = { k: np.zeros_like(v) for k,v in self.model }
rmsprop_cache = { k : np.zeros_like(v) for k,v in self.model }
recent_reward = 0
self.toFile("results-%d.txt" % episode)
while True: # Continuously train over batches of 5 episodes
reward_sum = 0
obs = env.reset() # np.array of [x = params, y = agents]
drone_count = obs.shape()[0]
xs = [] # An np array per tick (x = params, y = agents)
hs = [] # An np array per tick (x = agents, y = neurons)
dlogps1 = [] # Per tick, the action each agent took
dlogps2 = []
drs = []
for tick in range(self.episode_length):
if self.render: env.render()
actions, hidden = self.act(obs)
# Record information for backprop
xs.append(obs)
hs.append(hidden)
dlogps1.append([1 - x if x >= 0.5 else -x for x in actions[0,]])
dlogps2.append([1 - x if x >= 0.5 else -x for x in actions[1,]])
obs, reward, done, _ = env.step(2*actions - 1)
recent_reward = reward
drs.append(reward) # We'll need to copy this for each agent.
episode += 1
batch_counter += 1
discount_rewards = self._discount_reward(drs)
discount_rewards -= np.mean(discount_rewards)
discount_rewards /= np.std(discount_rewards)
states = np.stack(xs, axis=2)
hidden_stack = np.stack(hs, axis=2)
dlogps1_stack = np.array(dlogps1)
dlogps2_stack = np.array(dlogps2)
# Generate a gradient per agent
for i in range(self.drone_count):
episode_states = states[:,i,:]
episode_hidden = hidden_stack[i,:,:]
episode_dlogp1 = dlogps1_stack[:,i]
episode_dlogp2 = dlogps2_stack[:,i]
episode_dlogp1 *= discount_rewards
episode_dlogp2 *= discount_rewards
grad1W1, grad1W2 = self._backprop(episode_states, episode_hidden, episode_dlogp1)
grad2W1, grad2W2 = self._backprop(episode_states, episode_hidden, episode_dlogp2)
gradient_buffer["w1"] += (grad1W1 + grad2W1)/2
gradient_buffer["w2"] += (grad1W2 + grad2W2)/2
if batch_counter == self.batch_size:
for k,v in self.model:
rmsprop_cache[k] = self.decay_rate * rmsprop_cache[k] + (1 - self.decay_rate) * gradient_buffer[k]**2
self.model[k] += self.learning_rate*gradient_buffer[k] / np.sqrt(rmsprop_cache[k] + 1e-5)
gradient_buffer[k] = np.zeros_like(v)
print("Iteration %d: batch update, most recent reward: %f" % (episode, recent_reward))
batch_counter = 0
if episode % 100 == 0:
print("Writting to file %d..." % episode)
self.toFile("results-%d.txt" % episode)
print("Done.")
reward_sum = 0
obs = env.reset()
def _discount_reward(self, rewards: np.array) -> np.array:
""" Reduces the magnitude of the reward for the earlier actions. """
discount_rewards = np.zeros_like(rewards)
summation = 0
for tick in range(len(rewards), 0, -1):
summation = summation * self.gamma + rewards[tick]
discount_rewards[tick] = summation
return discount_rewards
def act(self, x: np.array) -> np.array:
""" @param x: np.array (size x = params, y = agents)
Returns np.array (size x = agents, y = 2) of how each agent moves and
an np.array (size x = agents, y = hidden neurons)"""
hidden = np.dot(self.model["w1"], np.transpose(x))
hidden[hidden < 0] = 0 # Apply lower bound to values (ReLU)
signal = np.dot(self.model["w2"], hidden)
return sigmoid(signal), hidden
def _backprop(self, *,
episode_dprob: np.array,
episode_stack: np.array,
episode_states: np.array) -> np.array:
""" Returns gradient for weights, ordered dW1, dW2. Does so for one agent. """
dW2 = np.dot(episode_stack.T, episode_dprob).ravel()
dh = np.outer(episode_dprob, self.model["w2"])
dh[episode_stack < 0] = 0 # (PreLu)
dW1 = np.dot(dh.T, episode_states)
return dW1, dW2
def toFile(self, name: str):
f = open(name, "a")
f.write("w1\n")
for x in range(input_n):
for y in range(hidden_n):
f.write("%d " % self.model["w1"][y,x])
f.write("\n")
f.write("w2\n")
for x in range(hidden_n):
for y in range(output_n):
f.write("%d " % self.model["w2"][y,x])
f.write("\n")
f.close()
|
en
| 0.738964
|
Policy Gradient optimizer for the Recovery environment, based on https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5#file-pg-pong-py-L68 # Model Parameters Optimization Parameters learning_rate gamma - Reduced effect of reward on earlier actions decay_rate - RMSProp decay factor # Xavier initial weights Trains model using passed environment. # Continuously train over batches of 5 episodes # np.array of [x = params, y = agents] # An np array per tick (x = params, y = agents) # An np array per tick (x = agents, y = neurons) # Per tick, the action each agent took # Record information for backprop # We'll need to copy this for each agent. # Generate a gradient per agent Reduces the magnitude of the reward for the earlier actions. @param x: np.array (size x = params, y = agents) Returns np.array (size x = agents, y = 2) of how each agent moves and an np.array (size x = agents, y = hidden neurons) # Apply lower bound to values (ReLU) Returns gradient for weights, ordered dW1, dW2. Does so for one agent. # (PreLu)
| 2.464388
| 2
|
ex090.py
|
dsjocimar/python
| 0
|
6628846
|
# Exercício 090
boletim = dict()
boletim['Nome'] = str(input('Nome: '))
boletim['Media'] = float(input(f'Média de {boletim["Nome"]}: '))
if boletim['Media'] >= 7:
boletim['Situaçao'] = 'Aprovado'
else:
boletim['Situaçao'] = 'Reprovado'
for k, v in boletim.items():
print(f'{k} é igual a {v}')
|
# Exercício 090
boletim = dict()
boletim['Nome'] = str(input('Nome: '))
boletim['Media'] = float(input(f'Média de {boletim["Nome"]}: '))
if boletim['Media'] >= 7:
boletim['Situaçao'] = 'Aprovado'
else:
boletim['Situaçao'] = 'Reprovado'
for k, v in boletim.items():
print(f'{k} é igual a {v}')
|
pt
| 0.962807
|
# Exercício 090
| 3.67908
| 4
|
parser_lib/parser.py
|
mdcallag/mysql-tools
| 226
|
6628847
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MySQL query parser.
Tools to parse SQL queries into a pyparsing parse tree. The primary method here
is SQLParser.ParseString, which takes a string that you might pipe to the mysql
CLI (containing multiple delimited queries) and parses it. The parsing grammar
is far from complete, and focuses on DDL.
GoogleSQLParser adds ON SHARD support to the grammar.
"""
# based on the work of <NAME> (<EMAIL>)
__author__ = '<EMAIL> (<NAME>)'
import logging
import pyparsing as pyp
import re
try:
from ..pylib import db
except (ImportError, ValueError):
from pylib import db
class Error(Exception): pass
class ParseError(Error):
def __init__(self, msg, loc):
self.msg = msg
self.loc = loc
def __str__(self):
return '%s (at char %d)' % (self.msg, self.loc)
class SQLParser(object):
"""SQL Parser"""
def _LogStart(self, instring, loc, expr):
logging.debug('Start: base_loc: %d, loc: %d, expr: %s',
self._base_loc, loc, expr.name)
def _LogSuccess(self, instring, start, loc, expr, tokens):
logging.debug('Success: base_loc: %d, loc: %d, expr: %s, tokens: %s',
self._base_loc, loc, expr.name, tokens)
tokens['loc'] = self._base_loc + loc
def _LogFailure(self, instring, start, expr, err):
logging.debug('Failure: base_loc: %d, loc: %d, expr: %s, err: %s',
self._base_loc, err.loc, expr.name, err)
def __init__(self, progress_callback=None):
"""Constructor.
Args:
progress_callback: If specified, called with the character location of
the end of the last-yielded statement.
"""
# Get all the class variables that matches _*_TOKEN
keywords = list(SQLParser.__dict__[k]
for k in SQLParser.__dict__
if re.match(r'^_([_\w])+_TOKEN$', k))
# Fill the grammar rule _KEYWORDS with all the keywords possible
SQLParser.__dict__['_KEYWORDS'] << pyp.MatchFirst(keywords)
self._loc = 0 # Last yielded line end
self._base_loc = 0 # Start of this statement
self._callback = progress_callback
for key in dir(self):
grammar_rule = getattr(self, key)
if isinstance(grammar_rule, pyp.ParserElement):
grammar_rule.setName(key)
grammar_rule.setDebugActions(
self._LogStart, self._LogSuccess, self._LogFailure)
def _OnNewLine(self, loc):
self._loc = loc
def ParseString(self, string):
logging.debug('Parsing: %r', string)
try:
for statement in db.XCombineSQL(db.XSplit(string, '\n',
callback=self._OnNewLine)):
yield self._QUERY.parseString(statement)[0]
if self._callback:
self._callback(self._loc)
self._base_loc = self._loc + len(statement) + 1
except pyp.ParseException as e:
raise ParseError(e.msg, self._base_loc + e.loc)
except db.InputRemaining as e:
raise ParseError('Input remaining: %s' % e, self._base_loc + self._loc)
# DISCARDED
_COMMENT_START = pyp.Keyword(
'--', identChars=pyp.Keyword.DEFAULT_KEYWORD_CHARS + '-')
_COMMENT_LINE = _COMMENT_START + pyp.restOfLine
_COMMENT_BLOCK = pyp.Regex(r'/\*(?=[^!])(?:[^*]*\*+)+?/')
# TERMINALS
_LINE_DELIMITER = pyp.Suppress(';').setName(';')
_ALTER_TOKEN = pyp.CaselessKeyword('alter')
_SELECT_TOKEN = pyp.CaselessKeyword('select')
_CREATE_TOKEN = pyp.CaselessKeyword('create')
_UPDATE_TOKEN = pyp.CaselessKeyword('update')
_INSERT_TOKEN = pyp.CaselessKeyword('insert')
_REPLACE_TOKEN = pyp.CaselessKeyword('replace')
_DELETE_TOKEN = pyp.CaselessKeyword('delete')
_MODIFY_TOKEN = pyp.CaselessKeyword('modify')
_ADD_TOKEN = pyp.CaselessKeyword('add')
_CHANGE_TOKEN = pyp.CaselessKeyword('change')
_DROP_TOKEN = pyp.CaselessKeyword('drop')
_CONVERT_TOKEN = pyp.CaselessKeyword('convert')
_TO_TOKEN = pyp.CaselessKeyword('to')
_ALL_TOKEN = pyp.CaselessKeyword('all')
_DISTINCT_TOKEN = pyp.CaselessKeyword('distinct')
_DISTINCTROW_TOKEN = pyp.CaselessKeyword('distinctrow')
_FROM_TOKEN = pyp.CaselessKeyword('from').suppress()
_WHERE_TOKEN = pyp.CaselessKeyword('where').suppress()
_ORDER_TOKEN = pyp.CaselessKeyword('order').suppress()
_GROUP_TOKEN = pyp.CaselessKeyword('group').suppress()
_HAVING_TOKEN = pyp.CaselessKeyword('having').suppress()
_LIMIT_TOKEN = pyp.CaselessKeyword('limit').suppress()
_BY_TOKEN = pyp.CaselessKeyword('by').suppress()
_AS_TOKEN = pyp.CaselessKeyword('as').suppress()
_INTO_TOKEN = pyp.CaselessKeyword('into').suppress()
_VALUES_TOKEN = pyp.CaselessKeyword('values').suppress()
_IS_TOKEN = pyp.CaselessKeyword('is')
_NOT_TOKEN = pyp.CaselessKeyword('not')
_NULL_TOKEN = pyp.CaselessKeyword('null')
_TRUE_TOKEN = pyp.CaselessKeyword('true')
_FALSE_TOKEN = pyp.CaselessKeyword('false')
_UNKNOWN_TOKEN = pyp.CaselessKeyword('unknown')
_IN_TOKEN = pyp.CaselessKeyword('in')
_CASE_TOKEN = pyp.CaselessKeyword('case')
_WHEN_TOKEN = pyp.CaselessKeyword('when')
_THEN_TOKEN = pyp.CaselessKeyword('then')
_ELSE_TOKEN = pyp.CaselessKeyword('else')
_START_TOKEN = pyp.CaselessKeyword('start')
_END_TOKEN = pyp.CaselessKeyword('end')
_JOIN_TOKEN = pyp.CaselessKeyword('join')
_LEFT_TOKEN = pyp.CaselessKeyword('left')
_RIGHT_TOKEN = pyp.CaselessKeyword('right')
_CROSS_TOKEN = pyp.CaselessKeyword('cross')
_INNER_TOKEN = pyp.CaselessKeyword('inner')
_OUTER_TOKEN = pyp.CaselessKeyword('outer')
_NATURAL_TOKEN = pyp.CaselessKeyword('natural')
_ON_TOKEN = pyp.CaselessKeyword('on')
_USING_TOKEN = pyp.CaselessKeyword('using')
_STRAIGHT_JOIN_TOKEN = pyp.CaselessKeyword('straight_join')
_LIKE_TOKEN = pyp.CaselessKeyword('like')
_ENGINE_TOKEN = pyp.CaselessKeyword('engine')
_IF_TOKEN = pyp.CaselessKeyword('if').suppress()
_EXISTS_TOKEN = pyp.CaselessKeyword('exists').suppress()
_CHARSET_TOKEN = pyp.CaselessKeyword('charset')
_CHARACTER_TOKEN = pyp.CaselessKeyword('character')
_NAMES_TOKEN = pyp.CaselessKeyword('names')
_COLLATE_TOKEN = pyp.CaselessKeyword('collate')
_INTERVAL_TOKEN = pyp.CaselessKeyword('interval')
_DATABASE_TOKEN = pyp.CaselessKeyword('database')
_TABLE_TOKEN = pyp.CaselessKeyword('table').suppress()
_COLUMN_TOKEN = pyp.CaselessKeyword('column').suppress()
_INDEX_TOKEN = pyp.CaselessKeyword('index')
_PRIMARY_TOKEN = pyp.CaselessKeyword('primary')
_KEY_TOKEN = pyp.CaselessKeyword('key')
_UNIQUE_TOKEN = pyp.CaselessKeyword('unique')
_DUPLICATE_TOKEN = pyp.CaselessKeyword('duplicate').suppress()
_AUTO_INCREMENT_TOKEN = pyp.CaselessKeyword('auto_increment')
_DEFAULT_TOKEN = pyp.CaselessKeyword('default').suppress()
_USE_TOKEN = pyp.CaselessKeyword('use')
_IGNORE_TOKEN = pyp.CaselessKeyword('ignore')
_FORCE_TOKEN = pyp.CaselessKeyword('force')
_CONSTRAINT_TOKEN = pyp.CaselessKeyword('constraint')
_FOREIGN_TOKEN = pyp.CaselessKeyword('foreign')
_RESTRICT_TOKEN = pyp.CaselessKeyword('restrict')
_CASCADE_TOKEN = pyp.CaselessKeyword('cascade')
_NO_TOKEN = pyp.CaselessKeyword('no')
_ACTION_TOKEN = pyp.CaselessKeyword('action')
_REFERENCES_TOKEN = pyp.CaselessKeyword('references')
_TINYINT_TOKEN = pyp.CaselessKeyword('tinyint')
_SMALLINT_TOKEN = pyp.CaselessKeyword('smallint')
_MEDIUMINT_TOKEN = pyp.CaselessKeyword('mediumint')
_INT_TOKEN = pyp.CaselessKeyword('int')
_INTEGER_TOKEN = pyp.CaselessKeyword('integer')
_BIGINT_TOKEN = pyp.CaselessKeyword('bigint')
_UNSIGNED_TOKEN = pyp.CaselessKeyword('unsigned')
_DECIMAL_TOKEN = pyp.CaselessKeyword('decimal')
_DEC_TOKEN = pyp.CaselessKeyword('dec')
_FIXED_TOKEN = pyp.CaselessKeyword('fixed')
_FLOAT_TOKEN = pyp.CaselessKeyword('float')
_DOUBLE_TOKEN = pyp.CaselessKeyword('double')
_PRECISION_TOKEN = pyp.CaselessKeyword('precision')
_DATE_TOKEN = pyp.CaselessKeyword('date')
_DATETIME_TOKEN = pyp.CaselessKeyword('datetime')
_TIMESTAMP_TOKEN = pyp.CaselessKeyword('timestamp')
_TIME_TOKEN = pyp.CaselessKeyword('time')
_YEAR_TOKEN = pyp.CaselessKeyword('year')
_CHAR_TOKEN = pyp.CaselessKeyword('char')
_VARCHAR_TOKEN = pyp.CaselessKeyword('varchar')
_BINARY_TOKEN = pyp.CaselessKeyword('binary')
_VARBINARY_TOKEN = pyp.CaselessKeyword('varbinary')
_TINYBLOB_TOKEN = pyp.CaselessKeyword('tinyblob')
_BLOB_TOKEN = pyp.CaselessKeyword('blob')
_MEDIUMBLOB_TOKEN = pyp.CaselessKeyword('mediumblob')
_LONGBLOB_TOKEN = pyp.CaselessKeyword('longblob')
_TINYTEXT_TOKEN = pyp.CaselessKeyword('tinytext')
_TEXT_TOKEN = pyp.CaselessKeyword('text')
_MEDIUMTEXT_TOKEN = pyp.CaselessKeyword('mediumtext')
_LONGTEXT_TOKEN = pyp.CaselessKeyword('longtext')
_ENUM_TOKEN = pyp.CaselessKeyword('enum')
_SET_TOKEN = pyp.CaselessKeyword('set')
_BIT_TOKEN = pyp.CaselessKeyword('bit')
_FIRST_TOKEN = pyp.CaselessKeyword('first')
_BEFORE_TOKEN = pyp.CaselessKeyword('before')
_AFTER_TOKEN = pyp.CaselessKeyword('after')
_CURRENT_TIMESTAMP_TOKEN = pyp.CaselessKeyword('current_timestamp')
_BEGIN_TOKEN = pyp.CaselessKeyword('begin')
_TRANSACTION_TOKEN = pyp.CaselessKeyword('transaction')
_COMMIT_TOKEN = pyp.CaselessKeyword('commit')
_ROLLBACK_TOKEN = pyp.CaselessKeyword('rollback')
_LOCAL_TOKEN = pyp.CaselessKeyword('local')
_SESSION_TOKEN = pyp.CaselessKeyword('session')
_GLOBAL_TOKEN = pyp.CaselessKeyword('global')
## IDENTIFIER
_KEYWORDS = pyp.Forward() # list of keywords, defined by __init__()
_IDENTIFIER = pyp.Group(pyp.Word(pyp.alphas, pyp.alphanums + '_$')
| pyp.QuotedString('`', multiline=True, escChar='\\'))
_CHARSET = '_' + pyp.Word(pyp.alphanums).setResultsName('character_set')
_STRING = (pyp.Optional(_CHARSET)
+ (pyp.QuotedString('\'', multiline=True, escChar='\\')
| pyp.QuotedString('\"', multiline=True, escChar='\\')))
_NUMBER = pyp.Word(pyp.nums)
_ARITH_SIGN = pyp.Word('+-', exact=1)
_E = pyp.CaselessLiteral('E')
_REAL_NUMBER = pyp.Combine(pyp.Optional(_ARITH_SIGN)
+ pyp.Optional(_NUMBER) + '.' + _NUMBER
+ pyp.Optional(_E
+ pyp.Optional(_ARITH_SIGN)
+ _NUMBER))
_INT_NUMBER = pyp.Combine(pyp.Optional(_ARITH_SIGN)
+ _NUMBER
+ pyp.Optional(_E
+ pyp.Optional('+')
+ _NUMBER))
_HEX = ((pyp.CaselessLiteral('0x').suppress()
+ pyp.Word(pyp.hexnums))
| pyp.Regex(r"x'(?:[0-9a-fA-F])+'"))
_VAL = pyp.Group(
_HEX
| pyp.OneOrMore(_STRING)
| _REAL_NUMBER
| _INT_NUMBER
| _NULL_TOKEN
| _TRUE_TOKEN
| _FALSE_TOKEN).setResultsName('val')
## TYPES
_FIELD_LIST = pyp.Group(pyp.Suppress('(')
+ pyp.delimitedList(_IDENTIFIER)
+ pyp.Suppress(')')
).setResultsName('fields')
_STRING_LIST = pyp.Group(pyp.Suppress('(')
+ pyp.delimitedList(_STRING)
+ pyp.Suppress(')')
).setResultsName('values')
_TYPE_SIZE = (pyp.Suppress('(')
+ _NUMBER.setName('type_size')
+ pyp.Suppress(')'))
_TYPE_PRECISION = (pyp.Suppress('(')
+ _NUMBER.setName('type_precision')
+ pyp.Suppress(',')
+ _NUMBER.setName('type_scale')
+ pyp.Suppress(')'))
# Types that don't take arguments.
_SIMPLE_TYPE = (_DATE_TOKEN
| _DATETIME_TOKEN
| _TIMESTAMP_TOKEN
| _TIME_TOKEN
| _YEAR_TOKEN
| _TINYTEXT_TOKEN
| _TEXT_TOKEN
| _MEDIUMTEXT_TOKEN
| _LONGTEXT_TOKEN
| _TINYBLOB_TOKEN
| _BLOB_TOKEN
| _MEDIUMBLOB_TOKEN
| _LONGBLOB_TOKEN).setResultsName('type_type')
_BIT = (_BIT_TOKEN.setResultsName('type_type')
+ pyp.Optional(_TYPE_SIZE))
_ENUM = (_ENUM_TOKEN.setResultsName('type_type')
+ _STRING_LIST)
_SET_TYPE = (_SET_TOKEN.setResultsName('type_type')
+ _STRING_LIST)
_INTS = ((_TINYINT_TOKEN
| _SMALLINT_TOKEN
| _MEDIUMINT_TOKEN
| _INT_TOKEN
| _INTEGER_TOKEN
| _BIGINT_TOKEN).setResultsName('type_type')
+ pyp.Optional(_TYPE_SIZE)
+ pyp.Optional(_UNSIGNED_TOKEN))
_REALS = ((_DECIMAL_TOKEN
| _DEC_TOKEN
| _FIXED_TOKEN
| _FLOAT_TOKEN
| _DOUBLE_TOKEN + pyp.Optional(_PRECISION_TOKEN)
).setResultsName('type_type')
+ pyp.Optional(_TYPE_PRECISION))
_CHARS = ((_VARCHAR_TOKEN
| _CHAR_TOKEN
| _BINARY_TOKEN
| _VARBINARY_TOKEN).setResultsName('type_type')
+ pyp.Optional(_TYPE_SIZE)
+ pyp.Optional(_BINARY_TOKEN))
_TYPE = pyp.Group(_BIT
| _ENUM
| _SET_TYPE
| _INTS
| _REALS
| _CHARS
| _SIMPLE_TYPE
).setResultsName('type')
## GRAMMAR
# COMMONS
_DB_NAME = _IDENTIFIER.setResultsName('database')
_TABLE_NAME_ONLY = _IDENTIFIER.setResultsName('table')
_TABLE_NAME = pyp.Group((_DB_NAME + '.' + _TABLE_NAME_ONLY)
| _TABLE_NAME_ONLY).setResultsName('table_spec')
_COLUMN_NAME_WILD = (_IDENTIFIER | '*').setResultsName('column')
_COLUMN_NAME = pyp.Group(
(_DB_NAME + '.' + _TABLE_NAME_ONLY + '.' + _COLUMN_NAME_WILD)
| (_TABLE_NAME_ONLY + '.' + _COLUMN_NAME_WILD)
| _COLUMN_NAME_WILD).setResultsName('column_spec')
_INDEX_NAME = _IDENTIFIER.setResultsName('index')
_COLUMN_LIST = pyp.Group(pyp.Suppress('(')
+ pyp.delimitedList(_COLUMN_NAME)
+ pyp.Suppress(')')
).setResultsName('columns')
# DATA DEFINITION COMMONS
_DEFAULT_VAL = (_DEFAULT_TOKEN
+ pyp.Group(_NULL_TOKEN
| _VAL
| _CURRENT_TIMESTAMP_TOKEN
).setResultsName('default'))
_COLUMN_CONSTRAINT = pyp.Group(pyp.Optional(_NOT_TOKEN)
+ _NULL_TOKEN
).setResultsName('constraint')
_POSITIONAL = pyp.Group(_FIRST_TOKEN
| ((_BEFORE_TOKEN | _AFTER_TOKEN) + _COLUMN_NAME)
).setResultsName('position')
# Optional column flags:
# - CHARSET <charset>
# - CHARACTER SET <charset>
# - COLLATE <collate name>
# - DEFAULT '<value>'
# - AUTO_INCREMENT
# - NOT NULL
# - ON UPDATE CURRENT_TIMESTAMP
_COLUMN_FLAGS = pyp.Group(
(_CHARSET_TOKEN + _IDENTIFIER.setResultsName('charset'))
| (_CHARACTER_TOKEN + _SET_TOKEN + _IDENTIFIER.setResultsName('charset'))
| (_COLLATE_TOKEN + _IDENTIFIER.setResultsName('collate'))
| _COLUMN_CONSTRAINT
| _DEFAULT_VAL
| _AUTO_INCREMENT_TOKEN.setResultsName('option')
| (_ON_TOKEN + _UPDATE_TOKEN + _CURRENT_TIMESTAMP_TOKEN)
).setResultsName('column_flags')
_COLUMN_DEFINITION = pyp.Group(_TYPE
+ pyp.ZeroOrMore(_COLUMN_FLAGS)
).setResultsName('column_definition')
_KEY_DEFINITION = pyp.Group(
(((pyp.Optional(_UNIQUE_TOKEN).setResultsName('key_option')
+ (_INDEX_TOKEN | _KEY_TOKEN).setResultsName('key_type'))
| _UNIQUE_TOKEN.setResultsName('key_type'))
+ pyp.Optional(_IDENTIFIER).setResultsName('key_name')
+ _FIELD_LIST)
| ((_PRIMARY_TOKEN + _KEY_TOKEN).setResultsName('key_type')
+ _FIELD_LIST)
).setResultsName('key_definition')
# ALTER STATEMENTS
# ADD COLUMN columnname TYPE [BEFORE | AFTER ...]
# ADD COLUMN (columnname TYPE, ...) [BEFORE | AFTER ...]
_ALTER_TABLE_ADD_COLUMN = pyp.Group(
_ADD_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ ((_COLUMN_NAME + _COLUMN_DEFINITION)
| (pyp.Suppress('(')
+ pyp.delimitedList(_COLUMN_NAME + _COLUMN_DEFINITION)
+ pyp.Suppress(')')))
+ pyp.ZeroOrMore(_COLUMN_FLAGS)
+ pyp.Optional(_PRIMARY_TOKEN + _KEY_TOKEN)
+ pyp.Optional(_POSITIONAL)
).setResultsName('add_column')
_REFERENCE_OPTION = pyp.Group(
_RESTRICT_TOKEN
| _CASCADE_TOKEN
| (_SET_TOKEN + _NULL_TOKEN)
| (_NO_TOKEN + _ACTION_TOKEN)
).setResultsName('reference_option')
_CONSTRAINT_DEFINITION = pyp.Group(
pyp.Optional(
_CONSTRAINT_TOKEN
+ pyp.Optional(_IDENTIFIER).setResultsName('constraint_name')
)
+ _FOREIGN_TOKEN + _KEY_TOKEN
+ pyp.Optional(_IDENTIFIER).setResultsName('key_name')
+ _FIELD_LIST
+ _REFERENCES_TOKEN
+ _TABLE_NAME
+ _FIELD_LIST
+ pyp.Optional(_ON_TOKEN
+ _DELETE_TOKEN
+ _REFERENCE_OPTION)
+ pyp.Optional(_ON_TOKEN
+ _UPDATE_TOKEN
+ _REFERENCE_OPTION)
)
_ALTER_TABLE_ADD_CONSTRAINT = pyp.Group(
_ADD_TOKEN
+ _CONSTRAINT_DEFINITION
).setResultsName('add_constraint')
_ALTER_TABLE_DROP_FOREIGN_KEY = pyp.Group(
_DROP_TOKEN
+ _FOREIGN_TOKEN
+ _KEY_TOKEN
+ _IDENTIFIER.setResultsName('constraint_name')
).setResultsName('drop_foreign_key')
# ADD [UNIQUE] INDEX | KEY ...
# ADD UNIQUE ...
_ALTER_TABLE_ADD_INDEX = pyp.Group(
_ADD_TOKEN
+ ((pyp.Optional(_UNIQUE_TOKEN).setResultsName('key_option')
+ (_INDEX_TOKEN | _KEY_TOKEN))
| (_UNIQUE_TOKEN).setResultsName('key_type'))
+ pyp.Optional(_IDENTIFIER).setResultsName('key_name')
+ _FIELD_LIST
).setResultsName('add_index')
_ALTER_TABLE_ADD_PRIMARY_KEY = pyp.Group(
_ADD_TOKEN + _PRIMARY_TOKEN + _KEY_TOKEN
+ _FIELD_LIST
).setResultsName('add_primary_key')
_ALTER_TABLE_ALTER = pyp.Group(
_ALTER_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ _COLUMN_NAME
+ ((_SET_TOKEN + _DEFAULT_VAL)
| (_DROP_TOKEN + _DEFAULT_TOKEN))
).setResultsName('alter_column')
_ALTER_TABLE_MODIFY = pyp.Group(
_MODIFY_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ (_COLUMN_NAME + _COLUMN_DEFINITION)
+ pyp.Optional(_POSITIONAL)
).setResultsName('modify_column')
_ALTER_TABLE_CHANGE = pyp.Group(
_CHANGE_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ _COLUMN_NAME
+ _COLUMN_NAME.setResultsName('column_spec_new')
+ _COLUMN_DEFINITION
).setResultsName('change_column')
_ALTER_TABLE_DROP_COLUMN = pyp.Group(
_DROP_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ _COLUMN_NAME
).setResultsName('drop_column')
_ALTER_TABLE_DROP_PRIMARY_KEY = pyp.Group(
_DROP_TOKEN + _PRIMARY_TOKEN + _KEY_TOKEN
).setResultsName('drop_primary_key')
_ALTER_TABLE_DROP_INDEX = pyp.Group(
_DROP_TOKEN + (_INDEX_TOKEN | _KEY_TOKEN)
+ _IDENTIFIER.setResultsName('key_name')
).setResultsName('drop_index')
_ALTER_TABLE_CONVERT = pyp.Group(
_CONVERT_TOKEN + _TO_TOKEN + _CHARACTER_TOKEN + _SET_TOKEN
+ _IDENTIFIER.setResultsName('character_set')
).setResultsName('convert')
_ALTER_CHARACTER_SET = pyp.Group(
_CHARACTER_TOKEN + _SET_TOKEN
+ _IDENTIFIER.setResultsName('character_set')
).setResultsName('alter_charset')
# The various ALTER TABLE operations supported:
# - ADD PRIMARY KEY
# - ADD INDEX
# - ADD COLUMN
# - CHANGE
# - DROP
# - ALTER
_ALTER_TABLE_OPERATIONS = pyp.Group(
_ALTER_TABLE_MODIFY
| _ALTER_TABLE_ADD_PRIMARY_KEY
| _ALTER_TABLE_ADD_CONSTRAINT
| _ALTER_TABLE_DROP_FOREIGN_KEY
| _ALTER_TABLE_ADD_INDEX
| _ALTER_TABLE_ADD_COLUMN
| _ALTER_TABLE_CHANGE
| _ALTER_TABLE_DROP_PRIMARY_KEY
| _ALTER_TABLE_DROP_INDEX
| _ALTER_TABLE_DROP_COLUMN
| _ALTER_TABLE_ALTER
| _ALTER_TABLE_CONVERT
| _ALTER_CHARACTER_SET
).setResultsName('operations')
_ALTER_TABLE_SQL = pyp.Group(_ALTER_TOKEN
+ _TABLE_TOKEN
+ _TABLE_NAME
+ pyp.delimitedList(_ALTER_TABLE_OPERATIONS)
).setResultsName('alter')
_ALTER_DATABASE_OPERATIONS = pyp.Group(
_ALTER_CHARACTER_SET
).setResultsName('operations')
_ALTER_DATABASE_SQL = pyp.Group(
_ALTER_TOKEN
+ _DATABASE_TOKEN
+ _DB_NAME
+ pyp.delimitedList(_ALTER_DATABASE_OPERATIONS)
).setResultsName('alter_db')
# CREATE STATEMENTS
_CREATE_DEFINITION = pyp.Group(_KEY_DEFINITION
| _CONSTRAINT_DEFINITION
| (_COLUMN_NAME
+ _COLUMN_DEFINITION)
).setResultsName('operation')
# Match on IF NOT EXISTS
_CREATE_NO_OVERWRITE = _IF_TOKEN + _NOT_TOKEN + _EXISTS_TOKEN
_CREATE_OPERATIONS = pyp.Group(pyp.delimitedList(_CREATE_DEFINITION)
).setResultsName('operations')
# CREATE TABLE table options can come in any order. There may be
# zero or many of them
_TABLE_FLAGS = pyp.Group(_ENGINE_TOKEN
| (_DEFAULT_TOKEN + _CHARSET_TOKEN)
| _CHARSET_TOKEN
| (_CHARACTER_TOKEN + _SET_TOKEN)
| (_DEFAULT_TOKEN + _CHARACTER_TOKEN + _SET_TOKEN)
| _COLLATE_TOKEN
).setResultsName('table_flags_type')
# CREATE TABLE table options are always of the format: FLAG=VALUE
_TABLE_FLAGS_DEF = pyp.Group(
_TABLE_FLAGS
+ pyp.Optional(pyp.Suppress('='))
+ _IDENTIFIER.setResultsName('table_flags_identifier')
).setResultsName('table_flags_definition')
_CREATE_TABLE_SQL = pyp.Group(
_CREATE_TOKEN
+ _TABLE_TOKEN
+ pyp.Optional(_CREATE_NO_OVERWRITE)
+ _TABLE_NAME
+ pyp.Suppress('(')
+ _CREATE_OPERATIONS
+ pyp.Suppress(')')
+ pyp.ZeroOrMore(_TABLE_FLAGS_DEF).setResultsName('table_flags')
).setResultsName('create_table')
_CREATE_TABLE_LIKE_SQL = pyp.Group(
_CREATE_TOKEN
+ _TABLE_TOKEN
+ pyp.Optional(_CREATE_NO_OVERWRITE)
+ _TABLE_NAME
+ _LIKE_TOKEN
+ _TABLE_NAME
).setResultsName('create_table_like')
# DROP TABLE [IF EXISTS] table
_DROP_TABLE_SQL = pyp.Group(_DROP_TOKEN
+ _TABLE_TOKEN
+ pyp.Optional(_IF_TOKEN + _EXISTS_TOKEN)
+ pyp.delimitedList(_TABLE_NAME)
).setResultsName('drop_table')
# CREATE DATABASE dbname
_CREATE_DATABASE_SQL = pyp.Group(_CREATE_TOKEN
+ _DATABASE_TOKEN
+ pyp.Optional(_CREATE_NO_OVERWRITE)
+ _DB_NAME
).setResultsName('create_database')
# DROP DATABASE dbname
_DROP_DATABASE_SQL = pyp.Group(_DROP_TOKEN
+ _DATABASE_TOKEN
+ pyp.Optional(_IF_TOKEN + _EXISTS_TOKEN)
+ _DB_NAME
).setResultsName('drop_database')
# CREATE INDEX idx ON table (column, ...)
_CREATE_INDEX_SQL = (
_CREATE_TOKEN
+ pyp.Optional(_UNIQUE_TOKEN).setResultsName('key_option')
+ _INDEX_TOKEN
+ _INDEX_NAME.setResultsName('key_name')
+ _ON_TOKEN
+ _TABLE_NAME
+ _COLUMN_LIST)
# EXPRESSIONS
_BINOP1 = pyp.oneOf("* / %")
_BINOP2 = pyp.oneOf("+ - << >> | &")
_BINOP3 = pyp.oneOf(":= = != <> < > >= <=")
_BINOP4 = pyp.oneOf("like between regexp", caseless=True) # optional "NOT"
_BINOP5 = pyp.oneOf("and", caseless=True)
_BINOP6 = pyp.oneOf("or", caseless=True)
_EXPRESSION = pyp.Forward() # _EXPRESSION is recursive
_DATE_FUNCTION_NAME = pyp.oneOf("date_add date_sub", caseless=True
).setResultsName('function_name')
_INTERVAL_UNIT = pyp.oneOf(
"microsecond second minute hour day week month quarter year "
"second_microsecond minute_microsecond minute_second hour_microsecond "
"hour_second hour_minute day_microsecond day_second day_minute "
"day_hour year_month", caseless=True
).setResultsName('interval_unit')
_DATE_FUNCTION = pyp.Group(
_DATE_FUNCTION_NAME
+ pyp.Suppress('(')
+ _EXPRESSION.setResultsName('arg')
+ pyp.Suppress(',')
+ _INTERVAL_TOKEN
+ _EXPRESSION.setResultsName('interval_val')
+ _INTERVAL_UNIT
+ pyp.Suppress(')')
).setResultsName('function')
_FUNCTION_NAME = (_IDENTIFIER
).setResultsName('function_name')
_ARG_LIST = pyp.Group(
pyp.Suppress('(')
+ pyp.Optional(pyp.delimitedList(_EXPRESSION.setResultsName('arg')))
+ pyp.Suppress(')')
).setResultsName('args')
_FUNCTION = pyp.Group(
_FUNCTION_NAME
+ _ARG_LIST
).setResultsName('function')
_VARIABLE = pyp.Group(
pyp.Group(pyp.Literal('@@')
| pyp.Literal('@')
).setResultsName('scope')
+ _IDENTIFIER.setResultsName('variable'))
_LVAL = ((pyp.Suppress('(') + _EXPRESSION + pyp.Suppress(')'))
| _VAL
| _FUNCTION
| _DATE_FUNCTION
| _COLUMN_NAME + pyp.Optional(
_COLLATE_TOKEN + _IDENTIFIER.setResultsName('collate'))
| _VARIABLE)
_IN_EXPRESSION = pyp.Group(
_LVAL
+ pyp.Optional(_NOT_TOKEN)
+ _IN_TOKEN
+ pyp.Suppress('(')
+ pyp.delimitedList(_VAL)
+ pyp.Suppress(')')
).setResultsName('in')
_IS_EXPRESSION = pyp.Group(
_LVAL
+ _IS_TOKEN
+ pyp.Optional(_NOT_TOKEN)
+ (_NULL_TOKEN | _TRUE_TOKEN | _FALSE_TOKEN | _UNKNOWN_TOKEN)
).setResultsName('is')
_CASES_LIST = (
pyp.OneOrMore(_WHEN_TOKEN
+ _EXPRESSION
+ _THEN_TOKEN
+ _EXPRESSION)
+ pyp.Optional(_ELSE_TOKEN
+ _EXPRESSION))
_CASE_EXPRESSION = pyp.Group(
_CASE_TOKEN
+ (_CASES_LIST
| (_EXPRESSION + _CASES_LIST))
+ _END_TOKEN).setResultsName('case')
_UNARY = (
_NOT_TOKEN
| '!'
| '-')
_EXPRESSION0 = (
_IS_EXPRESSION
| _IN_EXPRESSION
| _CASE_EXPRESSION
| (pyp.Optional(_UNARY) + _LVAL))
_EXPRESSION1 = (
pyp.Group(_EXPRESSION0
+ pyp.ZeroOrMore(_BINOP1 + _EXPRESSION0)).setResultsName('ex'))
_EXPRESSION2 = (
pyp.Group(_EXPRESSION1
+ pyp.ZeroOrMore(_BINOP2 + _EXPRESSION1)).setResultsName('ex'))
_EXPRESSION3 = (
pyp.Group(_EXPRESSION2
+ pyp.ZeroOrMore(_BINOP3 + _EXPRESSION2)).setResultsName('ex'))
_EXPRESSION4 = (
pyp.Group(_EXPRESSION3
+ pyp.ZeroOrMore(
pyp.Optional(_NOT_TOKEN) + _BINOP4 + _EXPRESSION3)
).setResultsName('ex'))
_EXPRESSION5 = (
pyp.Group(_EXPRESSION4
+ pyp.ZeroOrMore(_BINOP5 + _EXPRESSION4)).setResultsName('ex'))
_EXPRESSION << (
pyp.Group(_EXPRESSION5
+ pyp.ZeroOrMore(_BINOP6 + _EXPRESSION5)).setResultsName('ex'))
# SET STATEMENT
_SET_VARIABLE = (
pyp.Optional(
_LOCAL_TOKEN
| _SESSION_TOKEN
| _GLOBAL_TOKEN
| pyp.Literal('@@')
| pyp.Literal('@')
).setResultsName('scope')
+ _IDENTIFIER.setResultsName('variable')
+ pyp.Literal('=')
+ _EXPRESSION)
_SET_CHARSET = (
_CHARACTER_TOKEN
+ _SET_TOKEN
+ _EXPRESSION)
_SET_NAMES = (
_NAMES_TOKEN
+ _EXPRESSION)
_SET_SQL = pyp.Group(
_SET_TOKEN
+ pyp.delimitedList(_SET_VARIABLE
| _SET_CHARSET
| _SET_NAMES))
# TABLE REFERENCE
_INDEX_HINT = ((_USE_TOKEN | _IGNORE_TOKEN | _FORCE_TOKEN)
+ (_INDEX_TOKEN | _KEY_TOKEN)
+ pyp.Suppress('(')
+ pyp.delimitedList(_IDENTIFIER)
+ pyp.Suppress(')'))
_ALIAS = (pyp.Optional(_AS_TOKEN)
+ pyp.NotAny(_KEYWORDS)
+ _IDENTIFIER.setResultsName('alias'))
_TABLE = (pyp.Group(_TABLE_NAME
+ pyp.Optional(_ALIAS)).setResultsName('table_alias')
+ pyp.Optional(pyp.delimitedList(_INDEX_HINT)))
_JOIN_CONDITION = ((_ON_TOKEN + _EXPRESSION)
| pyp.Group(_USING_TOKEN
+ _COLUMN_LIST).setResultsName('using'))
_JOIN_LEFT_RIGHT = ((_LEFT_TOKEN | _RIGHT_TOKEN)
+ pyp.Optional(_OUTER_TOKEN))
_JOIN_SIDE = pyp.Group((_INNER_TOKEN | _CROSS_TOKEN)
|(_NATURAL_TOKEN
+ pyp.Optional(_JOIN_LEFT_RIGHT))
| _JOIN_LEFT_RIGHT
).setResultsName('join_side')
_TABLE_JOIN = pyp.Group(
pyp.Optional(_JOIN_SIDE)
+ (_JOIN_TOKEN | _STRAIGHT_JOIN_TOKEN)
+ _TABLE
+ pyp.Optional(_JOIN_CONDITION)).setResultsName('tablejoin')
_TABLE_REFERENCE = _TABLE + pyp.ZeroOrMore(_TABLE_JOIN)
_TABLE_REFERENCES = pyp.Group(pyp.delimitedList(_TABLE_REFERENCE))
# DATA MANIPULATION COMMONS
_EXPRESSION_LIST = pyp.Group(pyp.delimitedList(_EXPRESSION))
_WHERE = (_WHERE_TOKEN
+ _EXPRESSION_LIST.setResultsName('where'))
_ORDER_BY = (_ORDER_TOKEN
+ _BY_TOKEN
+ _EXPRESSION_LIST.setResultsName('order_by'))
_GROUP_BY = (_GROUP_TOKEN
+ _BY_TOKEN
+ _EXPRESSION_LIST.setResultsName('group_by'))
_HAVING = (_HAVING_TOKEN
+ _EXPRESSION_LIST.setResultsName('having'))
_LIMIT = (_LIMIT_TOKEN
+ _NUMBER.setResultsName('limit'))
_SET_VALUE = pyp.Group(_COLUMN_NAME
+ pyp.Suppress('=')
+ _EXPRESSION.setResultsName('set_value')
).setResultsName('set')
_SET_VALUE_LIST = pyp.Group(pyp.delimitedList(_SET_VALUE)
).setResultsName('sets')
_SET = (_SET_TOKEN.suppress()
+ _SET_VALUE_LIST)
# SELECT STATEMENTS
_SELECT_EXPRESSION = (pyp.Group(
_EXPRESSION.setResultsName('select_expression')
+ pyp.Optional(_AS_TOKEN
+ _IDENTIFIER.setResultsName('alias')))
| pyp.Suppress('*'))
_SELECT_FROM = pyp.Group(_FROM_TOKEN
+ _TABLE_REFERENCES).setResultsName('select_from')
_SELECT_SQL_2 = (_SELECT_FROM
+ pyp.Optional(_WHERE)
+ pyp.Optional(_GROUP_BY)
+ pyp.Optional(_HAVING)
+ pyp.Optional(_ORDER_BY)
+ pyp.Optional(_LIMIT))
_SELECT_OPTIONS = (_ALL_TOKEN
| _DISTINCT_TOKEN
| _DISTINCTROW_TOKEN)
_SELECT_SQL = pyp.Group(_SELECT_TOKEN
+ pyp.Optional(_SELECT_OPTIONS)
+ pyp.delimitedList(_SELECT_EXPRESSION)
.setResultsName('select_expressions')
+ pyp.Optional(_SELECT_SQL_2)
).setResultsName('select')
# UPDATE STATEMENTS
_UPDATE_TABLE = (_TABLE_NAME
+ _SET
+ pyp.Optional(_WHERE)
+ pyp.Optional(_ORDER_BY)
+ pyp.Optional(_LIMIT))
_UPDATE_TABLE_REFERENCE = (_TABLE_REFERENCES
+ _SET
+ pyp.Optional(_WHERE))
_UPDATE_SQL = pyp.Group(_UPDATE_TOKEN
+ (_UPDATE_TABLE
| _UPDATE_TABLE_REFERENCE)
).setResultsName('update')
# INSERT/REPLACE STATEMENTS
_VALUES = pyp.Group(pyp.Suppress('(')
+ pyp.delimitedList(_EXPRESSION)
+ pyp.Suppress(')')
).setResultsName('vals')
_INSERT_VALUES = (pyp.Optional(_COLUMN_LIST)
+ _VALUES_TOKEN
+ pyp.delimitedList(_VALUES))
_INSERT_SET = _SET
_INSERT_SELECT = (pyp.Optional(_COLUMN_LIST)
+ pyp.Optional(pyp.Suppress('('))
+ pyp.Group(_SELECT_SQL).setResultsName('source_select')
+ pyp.Optional(pyp.Suppress(')')))
_ON_DUPLICATE_KEY_UPDATE = (_ON_TOKEN
+ _DUPLICATE_TOKEN
+ _KEY_TOKEN
+ _UPDATE_TOKEN
+ _SET_VALUE_LIST)
_INSERT_SQL = pyp.Group(_INSERT_TOKEN
+ pyp.Optional(_IGNORE_TOKEN)
+ pyp.Optional(_INTO_TOKEN)
+ _TABLE_NAME
+ (_INSERT_VALUES
| _INSERT_SET
| _INSERT_SELECT)
+ pyp.Optional(_ON_DUPLICATE_KEY_UPDATE)
).setResultsName('insert')
_REPLACE_SQL = pyp.Group(_REPLACE_TOKEN
+ pyp.Optional(_INTO_TOKEN)
+ _TABLE_NAME
+ (_INSERT_VALUES
| _INSERT_SET
| _INSERT_SELECT)
).setResultsName('replace')
# DELETE STATEMENTS
# DELETE FROM table WHERE ... [ORDER BY ...] [LIMIT ...]
# WHERE ... is not optional because sql.par demands its existence
# in this statement type.
_DELETE_SIMPLE_SQL = pyp.Group(_DELETE_TOKEN
+ _FROM_TOKEN
+ _TABLE_NAME
+ pyp.Optional(_WHERE)
+ pyp.Optional(_ORDER_BY)
+ pyp.Optional(_LIMIT)
).setResultsName('delete')
# DELETE table FROM table_references [WHERE ...]
_DELETE_MULTI_SQL = pyp.Group(_DELETE_TOKEN
+ pyp.delimitedList(_TABLE_NAME
+ pyp.Optional('.*'))
+ _FROM_TOKEN
+ _TABLE_REFERENCES.setResultsName('exclude')
+ (pyp.Group(pyp.Optional(_WHERE))
.setResultsName('exclude'))
).setResultsName('delete')
# DELETE FROM table USING table_references [WHERE ...]
_DELETE_MULTI_SQL2 = pyp.Group(_DELETE_TOKEN
+ _FROM_TOKEN
+ pyp.delimitedList(_TABLE_NAME
+ pyp.Optional('.*'))
+ _USING_TOKEN
+ _TABLE_REFERENCES.setResultsName('exclude')
+ (pyp.Group(pyp.Optional(_WHERE))
.setResultsName('exclude'))
).setResultsName('delete')
# TRANSACTIONS
_START_TRANSACTION_SQL = pyp.Group((_START_TOKEN + _TRANSACTION_TOKEN)
| _BEGIN_TOKEN
).setResultsName('start_transaction')
_END_TRANSACTION_SQL = pyp.Group(_COMMIT_TOKEN
| _ROLLBACK_TOKEN
).setResultsName('end_transaction')
# UNSUPPORTED QUERIES
_RENAME_TABLE_SQL = (pyp.CaselessKeyword('rename') +
pyp.SkipTo(_LINE_DELIMITER).suppress())
_TRUNCATE_SQL = (pyp.CaselessKeyword('truncate')
+ pyp.SkipTo(_LINE_DELIMITER).suppress())
# VERSIONED COMMENTS
_STATEMENT = pyp.Forward()
_VERSIONED_COMMENT = (pyp.Literal('/*!')
+ pyp.Optional(_NUMBER.setResultsName('min_version'))
+ _STATEMENT
+ pyp.Literal('*/'))
# MAIN
_STATEMENT << pyp.Group(_ALTER_TABLE_SQL
| _ALTER_DATABASE_SQL
| _CREATE_TABLE_SQL
| _CREATE_TABLE_LIKE_SQL
| _DROP_TABLE_SQL
| _RENAME_TABLE_SQL
| _SELECT_SQL
| _UPDATE_SQL
| _INSERT_SQL
| _REPLACE_SQL
| _DELETE_MULTI_SQL
| _DELETE_MULTI_SQL2
| _DELETE_SIMPLE_SQL
| _TRUNCATE_SQL
| _START_TRANSACTION_SQL
| _END_TRANSACTION_SQL
| _CREATE_DATABASE_SQL
| _DROP_DATABASE_SQL
| _CREATE_INDEX_SQL
| _SET_SQL
| _VERSIONED_COMMENT
).setResultsName('statement')
_QUERY = pyp.Group(_STATEMENT
+ _LINE_DELIMITER).setResultsName('query')
_QUERY.ignore(_COMMENT_LINE)
_QUERY.ignore(_COMMENT_BLOCK)
class GoogleSQLParser(SQLParser):
"""Extended grammar for SQL within Google"""
_GOOGLE_SQL_ON_SHARD = (
pyp.CaselessKeyword('on')
+ pyp.CaselessKeyword('shard')
+ pyp.Group(pyp.delimitedList(SQLParser._NUMBER)).setResultsName('shard'))
_GOOGLE_SQL_EXTENSION = pyp.Group(_GOOGLE_SQL_ON_SHARD
).setResultsName('running_scheme')
_QUERY = pyp.Group(pyp.Optional(_GOOGLE_SQL_EXTENSION)
+ SQLParser._STATEMENT
+ SQLParser._LINE_DELIMITER).setResultsName('query')
_QUERY.ignore(SQLParser._COMMENT_LINE)
_QUERY.ignore(SQLParser._COMMENT_BLOCK)
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MySQL query parser.
Tools to parse SQL queries into a pyparsing parse tree. The primary method here
is SQLParser.ParseString, which takes a string that you might pipe to the mysql
CLI (containing multiple delimited queries) and parses it. The parsing grammar
is far from complete, and focuses on DDL.
GoogleSQLParser adds ON SHARD support to the grammar.
"""
# based on the work of <NAME> (<EMAIL>)
__author__ = '<EMAIL> (<NAME>)'
import logging
import pyparsing as pyp
import re
try:
from ..pylib import db
except (ImportError, ValueError):
from pylib import db
class Error(Exception): pass
class ParseError(Error):
def __init__(self, msg, loc):
self.msg = msg
self.loc = loc
def __str__(self):
return '%s (at char %d)' % (self.msg, self.loc)
class SQLParser(object):
"""SQL Parser"""
def _LogStart(self, instring, loc, expr):
logging.debug('Start: base_loc: %d, loc: %d, expr: %s',
self._base_loc, loc, expr.name)
def _LogSuccess(self, instring, start, loc, expr, tokens):
logging.debug('Success: base_loc: %d, loc: %d, expr: %s, tokens: %s',
self._base_loc, loc, expr.name, tokens)
tokens['loc'] = self._base_loc + loc
def _LogFailure(self, instring, start, expr, err):
logging.debug('Failure: base_loc: %d, loc: %d, expr: %s, err: %s',
self._base_loc, err.loc, expr.name, err)
def __init__(self, progress_callback=None):
"""Constructor.
Args:
progress_callback: If specified, called with the character location of
the end of the last-yielded statement.
"""
# Get all the class variables that matches _*_TOKEN
keywords = list(SQLParser.__dict__[k]
for k in SQLParser.__dict__
if re.match(r'^_([_\w])+_TOKEN$', k))
# Fill the grammar rule _KEYWORDS with all the keywords possible
SQLParser.__dict__['_KEYWORDS'] << pyp.MatchFirst(keywords)
self._loc = 0 # Last yielded line end
self._base_loc = 0 # Start of this statement
self._callback = progress_callback
for key in dir(self):
grammar_rule = getattr(self, key)
if isinstance(grammar_rule, pyp.ParserElement):
grammar_rule.setName(key)
grammar_rule.setDebugActions(
self._LogStart, self._LogSuccess, self._LogFailure)
def _OnNewLine(self, loc):
self._loc = loc
def ParseString(self, string):
logging.debug('Parsing: %r', string)
try:
for statement in db.XCombineSQL(db.XSplit(string, '\n',
callback=self._OnNewLine)):
yield self._QUERY.parseString(statement)[0]
if self._callback:
self._callback(self._loc)
self._base_loc = self._loc + len(statement) + 1
except pyp.ParseException as e:
raise ParseError(e.msg, self._base_loc + e.loc)
except db.InputRemaining as e:
raise ParseError('Input remaining: %s' % e, self._base_loc + self._loc)
# DISCARDED
_COMMENT_START = pyp.Keyword(
'--', identChars=pyp.Keyword.DEFAULT_KEYWORD_CHARS + '-')
_COMMENT_LINE = _COMMENT_START + pyp.restOfLine
_COMMENT_BLOCK = pyp.Regex(r'/\*(?=[^!])(?:[^*]*\*+)+?/')
# TERMINALS
_LINE_DELIMITER = pyp.Suppress(';').setName(';')
_ALTER_TOKEN = pyp.CaselessKeyword('alter')
_SELECT_TOKEN = pyp.CaselessKeyword('select')
_CREATE_TOKEN = pyp.CaselessKeyword('create')
_UPDATE_TOKEN = pyp.CaselessKeyword('update')
_INSERT_TOKEN = pyp.CaselessKeyword('insert')
_REPLACE_TOKEN = pyp.CaselessKeyword('replace')
_DELETE_TOKEN = pyp.CaselessKeyword('delete')
_MODIFY_TOKEN = pyp.CaselessKeyword('modify')
_ADD_TOKEN = pyp.CaselessKeyword('add')
_CHANGE_TOKEN = pyp.CaselessKeyword('change')
_DROP_TOKEN = pyp.CaselessKeyword('drop')
_CONVERT_TOKEN = pyp.CaselessKeyword('convert')
_TO_TOKEN = pyp.CaselessKeyword('to')
_ALL_TOKEN = pyp.CaselessKeyword('all')
_DISTINCT_TOKEN = pyp.CaselessKeyword('distinct')
_DISTINCTROW_TOKEN = pyp.CaselessKeyword('distinctrow')
_FROM_TOKEN = pyp.CaselessKeyword('from').suppress()
_WHERE_TOKEN = pyp.CaselessKeyword('where').suppress()
_ORDER_TOKEN = pyp.CaselessKeyword('order').suppress()
_GROUP_TOKEN = pyp.CaselessKeyword('group').suppress()
_HAVING_TOKEN = pyp.CaselessKeyword('having').suppress()
_LIMIT_TOKEN = pyp.CaselessKeyword('limit').suppress()
_BY_TOKEN = pyp.CaselessKeyword('by').suppress()
_AS_TOKEN = pyp.CaselessKeyword('as').suppress()
_INTO_TOKEN = pyp.CaselessKeyword('into').suppress()
_VALUES_TOKEN = pyp.CaselessKeyword('values').suppress()
_IS_TOKEN = pyp.CaselessKeyword('is')
_NOT_TOKEN = pyp.CaselessKeyword('not')
_NULL_TOKEN = pyp.CaselessKeyword('null')
_TRUE_TOKEN = pyp.CaselessKeyword('true')
_FALSE_TOKEN = pyp.CaselessKeyword('false')
_UNKNOWN_TOKEN = pyp.CaselessKeyword('unknown')
_IN_TOKEN = pyp.CaselessKeyword('in')
_CASE_TOKEN = pyp.CaselessKeyword('case')
_WHEN_TOKEN = pyp.CaselessKeyword('when')
_THEN_TOKEN = pyp.CaselessKeyword('then')
_ELSE_TOKEN = pyp.CaselessKeyword('else')
_START_TOKEN = pyp.CaselessKeyword('start')
_END_TOKEN = pyp.CaselessKeyword('end')
_JOIN_TOKEN = pyp.CaselessKeyword('join')
_LEFT_TOKEN = pyp.CaselessKeyword('left')
_RIGHT_TOKEN = pyp.CaselessKeyword('right')
_CROSS_TOKEN = pyp.CaselessKeyword('cross')
_INNER_TOKEN = pyp.CaselessKeyword('inner')
_OUTER_TOKEN = pyp.CaselessKeyword('outer')
_NATURAL_TOKEN = pyp.CaselessKeyword('natural')
_ON_TOKEN = pyp.CaselessKeyword('on')
_USING_TOKEN = pyp.CaselessKeyword('using')
_STRAIGHT_JOIN_TOKEN = pyp.CaselessKeyword('straight_join')
_LIKE_TOKEN = pyp.CaselessKeyword('like')
_ENGINE_TOKEN = pyp.CaselessKeyword('engine')
_IF_TOKEN = pyp.CaselessKeyword('if').suppress()
_EXISTS_TOKEN = pyp.CaselessKeyword('exists').suppress()
_CHARSET_TOKEN = pyp.CaselessKeyword('charset')
_CHARACTER_TOKEN = pyp.CaselessKeyword('character')
_NAMES_TOKEN = pyp.CaselessKeyword('names')
_COLLATE_TOKEN = pyp.CaselessKeyword('collate')
_INTERVAL_TOKEN = pyp.CaselessKeyword('interval')
_DATABASE_TOKEN = pyp.CaselessKeyword('database')
_TABLE_TOKEN = pyp.CaselessKeyword('table').suppress()
_COLUMN_TOKEN = pyp.CaselessKeyword('column').suppress()
_INDEX_TOKEN = pyp.CaselessKeyword('index')
_PRIMARY_TOKEN = pyp.CaselessKeyword('primary')
_KEY_TOKEN = pyp.CaselessKeyword('key')
_UNIQUE_TOKEN = pyp.CaselessKeyword('unique')
_DUPLICATE_TOKEN = pyp.CaselessKeyword('duplicate').suppress()
_AUTO_INCREMENT_TOKEN = pyp.CaselessKeyword('auto_increment')
_DEFAULT_TOKEN = pyp.CaselessKeyword('default').suppress()
_USE_TOKEN = pyp.CaselessKeyword('use')
_IGNORE_TOKEN = pyp.CaselessKeyword('ignore')
_FORCE_TOKEN = pyp.CaselessKeyword('force')
_CONSTRAINT_TOKEN = pyp.CaselessKeyword('constraint')
_FOREIGN_TOKEN = pyp.CaselessKeyword('foreign')
_RESTRICT_TOKEN = pyp.CaselessKeyword('restrict')
_CASCADE_TOKEN = pyp.CaselessKeyword('cascade')
_NO_TOKEN = pyp.CaselessKeyword('no')
_ACTION_TOKEN = pyp.CaselessKeyword('action')
_REFERENCES_TOKEN = pyp.CaselessKeyword('references')
_TINYINT_TOKEN = pyp.CaselessKeyword('tinyint')
_SMALLINT_TOKEN = pyp.CaselessKeyword('smallint')
_MEDIUMINT_TOKEN = pyp.CaselessKeyword('mediumint')
_INT_TOKEN = pyp.CaselessKeyword('int')
_INTEGER_TOKEN = pyp.CaselessKeyword('integer')
_BIGINT_TOKEN = pyp.CaselessKeyword('bigint')
_UNSIGNED_TOKEN = pyp.CaselessKeyword('unsigned')
_DECIMAL_TOKEN = pyp.CaselessKeyword('decimal')
_DEC_TOKEN = pyp.CaselessKeyword('dec')
_FIXED_TOKEN = pyp.CaselessKeyword('fixed')
_FLOAT_TOKEN = pyp.CaselessKeyword('float')
_DOUBLE_TOKEN = pyp.CaselessKeyword('double')
_PRECISION_TOKEN = pyp.CaselessKeyword('precision')
_DATE_TOKEN = pyp.CaselessKeyword('date')
_DATETIME_TOKEN = pyp.CaselessKeyword('datetime')
_TIMESTAMP_TOKEN = pyp.CaselessKeyword('timestamp')
_TIME_TOKEN = pyp.CaselessKeyword('time')
_YEAR_TOKEN = pyp.CaselessKeyword('year')
_CHAR_TOKEN = pyp.CaselessKeyword('char')
_VARCHAR_TOKEN = pyp.CaselessKeyword('varchar')
_BINARY_TOKEN = pyp.CaselessKeyword('binary')
_VARBINARY_TOKEN = pyp.CaselessKeyword('varbinary')
_TINYBLOB_TOKEN = pyp.CaselessKeyword('tinyblob')
_BLOB_TOKEN = pyp.CaselessKeyword('blob')
_MEDIUMBLOB_TOKEN = pyp.CaselessKeyword('mediumblob')
_LONGBLOB_TOKEN = pyp.CaselessKeyword('longblob')
_TINYTEXT_TOKEN = pyp.CaselessKeyword('tinytext')
_TEXT_TOKEN = pyp.CaselessKeyword('text')
_MEDIUMTEXT_TOKEN = pyp.CaselessKeyword('mediumtext')
_LONGTEXT_TOKEN = pyp.CaselessKeyword('longtext')
_ENUM_TOKEN = pyp.CaselessKeyword('enum')
_SET_TOKEN = pyp.CaselessKeyword('set')
_BIT_TOKEN = pyp.CaselessKeyword('bit')
_FIRST_TOKEN = pyp.CaselessKeyword('first')
_BEFORE_TOKEN = pyp.CaselessKeyword('before')
_AFTER_TOKEN = pyp.CaselessKeyword('after')
_CURRENT_TIMESTAMP_TOKEN = pyp.CaselessKeyword('current_timestamp')
_BEGIN_TOKEN = pyp.CaselessKeyword('begin')
_TRANSACTION_TOKEN = pyp.CaselessKeyword('transaction')
_COMMIT_TOKEN = pyp.CaselessKeyword('commit')
_ROLLBACK_TOKEN = pyp.CaselessKeyword('rollback')
_LOCAL_TOKEN = pyp.CaselessKeyword('local')
_SESSION_TOKEN = pyp.CaselessKeyword('session')
_GLOBAL_TOKEN = pyp.CaselessKeyword('global')
## IDENTIFIER
_KEYWORDS = pyp.Forward() # list of keywords, defined by __init__()
_IDENTIFIER = pyp.Group(pyp.Word(pyp.alphas, pyp.alphanums + '_$')
| pyp.QuotedString('`', multiline=True, escChar='\\'))
_CHARSET = '_' + pyp.Word(pyp.alphanums).setResultsName('character_set')
_STRING = (pyp.Optional(_CHARSET)
+ (pyp.QuotedString('\'', multiline=True, escChar='\\')
| pyp.QuotedString('\"', multiline=True, escChar='\\')))
_NUMBER = pyp.Word(pyp.nums)
_ARITH_SIGN = pyp.Word('+-', exact=1)
_E = pyp.CaselessLiteral('E')
_REAL_NUMBER = pyp.Combine(pyp.Optional(_ARITH_SIGN)
+ pyp.Optional(_NUMBER) + '.' + _NUMBER
+ pyp.Optional(_E
+ pyp.Optional(_ARITH_SIGN)
+ _NUMBER))
_INT_NUMBER = pyp.Combine(pyp.Optional(_ARITH_SIGN)
+ _NUMBER
+ pyp.Optional(_E
+ pyp.Optional('+')
+ _NUMBER))
_HEX = ((pyp.CaselessLiteral('0x').suppress()
+ pyp.Word(pyp.hexnums))
| pyp.Regex(r"x'(?:[0-9a-fA-F])+'"))
_VAL = pyp.Group(
_HEX
| pyp.OneOrMore(_STRING)
| _REAL_NUMBER
| _INT_NUMBER
| _NULL_TOKEN
| _TRUE_TOKEN
| _FALSE_TOKEN).setResultsName('val')
## TYPES
_FIELD_LIST = pyp.Group(pyp.Suppress('(')
+ pyp.delimitedList(_IDENTIFIER)
+ pyp.Suppress(')')
).setResultsName('fields')
_STRING_LIST = pyp.Group(pyp.Suppress('(')
+ pyp.delimitedList(_STRING)
+ pyp.Suppress(')')
).setResultsName('values')
_TYPE_SIZE = (pyp.Suppress('(')
+ _NUMBER.setName('type_size')
+ pyp.Suppress(')'))
_TYPE_PRECISION = (pyp.Suppress('(')
+ _NUMBER.setName('type_precision')
+ pyp.Suppress(',')
+ _NUMBER.setName('type_scale')
+ pyp.Suppress(')'))
# Types that don't take arguments.
_SIMPLE_TYPE = (_DATE_TOKEN
| _DATETIME_TOKEN
| _TIMESTAMP_TOKEN
| _TIME_TOKEN
| _YEAR_TOKEN
| _TINYTEXT_TOKEN
| _TEXT_TOKEN
| _MEDIUMTEXT_TOKEN
| _LONGTEXT_TOKEN
| _TINYBLOB_TOKEN
| _BLOB_TOKEN
| _MEDIUMBLOB_TOKEN
| _LONGBLOB_TOKEN).setResultsName('type_type')
_BIT = (_BIT_TOKEN.setResultsName('type_type')
+ pyp.Optional(_TYPE_SIZE))
_ENUM = (_ENUM_TOKEN.setResultsName('type_type')
+ _STRING_LIST)
_SET_TYPE = (_SET_TOKEN.setResultsName('type_type')
+ _STRING_LIST)
_INTS = ((_TINYINT_TOKEN
| _SMALLINT_TOKEN
| _MEDIUMINT_TOKEN
| _INT_TOKEN
| _INTEGER_TOKEN
| _BIGINT_TOKEN).setResultsName('type_type')
+ pyp.Optional(_TYPE_SIZE)
+ pyp.Optional(_UNSIGNED_TOKEN))
_REALS = ((_DECIMAL_TOKEN
| _DEC_TOKEN
| _FIXED_TOKEN
| _FLOAT_TOKEN
| _DOUBLE_TOKEN + pyp.Optional(_PRECISION_TOKEN)
).setResultsName('type_type')
+ pyp.Optional(_TYPE_PRECISION))
_CHARS = ((_VARCHAR_TOKEN
| _CHAR_TOKEN
| _BINARY_TOKEN
| _VARBINARY_TOKEN).setResultsName('type_type')
+ pyp.Optional(_TYPE_SIZE)
+ pyp.Optional(_BINARY_TOKEN))
_TYPE = pyp.Group(_BIT
| _ENUM
| _SET_TYPE
| _INTS
| _REALS
| _CHARS
| _SIMPLE_TYPE
).setResultsName('type')
## GRAMMAR
# COMMONS
_DB_NAME = _IDENTIFIER.setResultsName('database')
_TABLE_NAME_ONLY = _IDENTIFIER.setResultsName('table')
_TABLE_NAME = pyp.Group((_DB_NAME + '.' + _TABLE_NAME_ONLY)
| _TABLE_NAME_ONLY).setResultsName('table_spec')
_COLUMN_NAME_WILD = (_IDENTIFIER | '*').setResultsName('column')
_COLUMN_NAME = pyp.Group(
(_DB_NAME + '.' + _TABLE_NAME_ONLY + '.' + _COLUMN_NAME_WILD)
| (_TABLE_NAME_ONLY + '.' + _COLUMN_NAME_WILD)
| _COLUMN_NAME_WILD).setResultsName('column_spec')
_INDEX_NAME = _IDENTIFIER.setResultsName('index')
_COLUMN_LIST = pyp.Group(pyp.Suppress('(')
+ pyp.delimitedList(_COLUMN_NAME)
+ pyp.Suppress(')')
).setResultsName('columns')
# DATA DEFINITION COMMONS
_DEFAULT_VAL = (_DEFAULT_TOKEN
+ pyp.Group(_NULL_TOKEN
| _VAL
| _CURRENT_TIMESTAMP_TOKEN
).setResultsName('default'))
_COLUMN_CONSTRAINT = pyp.Group(pyp.Optional(_NOT_TOKEN)
+ _NULL_TOKEN
).setResultsName('constraint')
_POSITIONAL = pyp.Group(_FIRST_TOKEN
| ((_BEFORE_TOKEN | _AFTER_TOKEN) + _COLUMN_NAME)
).setResultsName('position')
# Optional column flags:
# - CHARSET <charset>
# - CHARACTER SET <charset>
# - COLLATE <collate name>
# - DEFAULT '<value>'
# - AUTO_INCREMENT
# - NOT NULL
# - ON UPDATE CURRENT_TIMESTAMP
_COLUMN_FLAGS = pyp.Group(
(_CHARSET_TOKEN + _IDENTIFIER.setResultsName('charset'))
| (_CHARACTER_TOKEN + _SET_TOKEN + _IDENTIFIER.setResultsName('charset'))
| (_COLLATE_TOKEN + _IDENTIFIER.setResultsName('collate'))
| _COLUMN_CONSTRAINT
| _DEFAULT_VAL
| _AUTO_INCREMENT_TOKEN.setResultsName('option')
| (_ON_TOKEN + _UPDATE_TOKEN + _CURRENT_TIMESTAMP_TOKEN)
).setResultsName('column_flags')
_COLUMN_DEFINITION = pyp.Group(_TYPE
+ pyp.ZeroOrMore(_COLUMN_FLAGS)
).setResultsName('column_definition')
_KEY_DEFINITION = pyp.Group(
(((pyp.Optional(_UNIQUE_TOKEN).setResultsName('key_option')
+ (_INDEX_TOKEN | _KEY_TOKEN).setResultsName('key_type'))
| _UNIQUE_TOKEN.setResultsName('key_type'))
+ pyp.Optional(_IDENTIFIER).setResultsName('key_name')
+ _FIELD_LIST)
| ((_PRIMARY_TOKEN + _KEY_TOKEN).setResultsName('key_type')
+ _FIELD_LIST)
).setResultsName('key_definition')
# ALTER STATEMENTS
# ADD COLUMN columnname TYPE [BEFORE | AFTER ...]
# ADD COLUMN (columnname TYPE, ...) [BEFORE | AFTER ...]
_ALTER_TABLE_ADD_COLUMN = pyp.Group(
_ADD_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ ((_COLUMN_NAME + _COLUMN_DEFINITION)
| (pyp.Suppress('(')
+ pyp.delimitedList(_COLUMN_NAME + _COLUMN_DEFINITION)
+ pyp.Suppress(')')))
+ pyp.ZeroOrMore(_COLUMN_FLAGS)
+ pyp.Optional(_PRIMARY_TOKEN + _KEY_TOKEN)
+ pyp.Optional(_POSITIONAL)
).setResultsName('add_column')
_REFERENCE_OPTION = pyp.Group(
_RESTRICT_TOKEN
| _CASCADE_TOKEN
| (_SET_TOKEN + _NULL_TOKEN)
| (_NO_TOKEN + _ACTION_TOKEN)
).setResultsName('reference_option')
_CONSTRAINT_DEFINITION = pyp.Group(
pyp.Optional(
_CONSTRAINT_TOKEN
+ pyp.Optional(_IDENTIFIER).setResultsName('constraint_name')
)
+ _FOREIGN_TOKEN + _KEY_TOKEN
+ pyp.Optional(_IDENTIFIER).setResultsName('key_name')
+ _FIELD_LIST
+ _REFERENCES_TOKEN
+ _TABLE_NAME
+ _FIELD_LIST
+ pyp.Optional(_ON_TOKEN
+ _DELETE_TOKEN
+ _REFERENCE_OPTION)
+ pyp.Optional(_ON_TOKEN
+ _UPDATE_TOKEN
+ _REFERENCE_OPTION)
)
_ALTER_TABLE_ADD_CONSTRAINT = pyp.Group(
_ADD_TOKEN
+ _CONSTRAINT_DEFINITION
).setResultsName('add_constraint')
_ALTER_TABLE_DROP_FOREIGN_KEY = pyp.Group(
_DROP_TOKEN
+ _FOREIGN_TOKEN
+ _KEY_TOKEN
+ _IDENTIFIER.setResultsName('constraint_name')
).setResultsName('drop_foreign_key')
# ADD [UNIQUE] INDEX | KEY ...
# ADD UNIQUE ...
_ALTER_TABLE_ADD_INDEX = pyp.Group(
_ADD_TOKEN
+ ((pyp.Optional(_UNIQUE_TOKEN).setResultsName('key_option')
+ (_INDEX_TOKEN | _KEY_TOKEN))
| (_UNIQUE_TOKEN).setResultsName('key_type'))
+ pyp.Optional(_IDENTIFIER).setResultsName('key_name')
+ _FIELD_LIST
).setResultsName('add_index')
_ALTER_TABLE_ADD_PRIMARY_KEY = pyp.Group(
_ADD_TOKEN + _PRIMARY_TOKEN + _KEY_TOKEN
+ _FIELD_LIST
).setResultsName('add_primary_key')
_ALTER_TABLE_ALTER = pyp.Group(
_ALTER_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ _COLUMN_NAME
+ ((_SET_TOKEN + _DEFAULT_VAL)
| (_DROP_TOKEN + _DEFAULT_TOKEN))
).setResultsName('alter_column')
_ALTER_TABLE_MODIFY = pyp.Group(
_MODIFY_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ (_COLUMN_NAME + _COLUMN_DEFINITION)
+ pyp.Optional(_POSITIONAL)
).setResultsName('modify_column')
_ALTER_TABLE_CHANGE = pyp.Group(
_CHANGE_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ _COLUMN_NAME
+ _COLUMN_NAME.setResultsName('column_spec_new')
+ _COLUMN_DEFINITION
).setResultsName('change_column')
_ALTER_TABLE_DROP_COLUMN = pyp.Group(
_DROP_TOKEN + pyp.Optional(_COLUMN_TOKEN)
+ _COLUMN_NAME
).setResultsName('drop_column')
_ALTER_TABLE_DROP_PRIMARY_KEY = pyp.Group(
_DROP_TOKEN + _PRIMARY_TOKEN + _KEY_TOKEN
).setResultsName('drop_primary_key')
_ALTER_TABLE_DROP_INDEX = pyp.Group(
_DROP_TOKEN + (_INDEX_TOKEN | _KEY_TOKEN)
+ _IDENTIFIER.setResultsName('key_name')
).setResultsName('drop_index')
_ALTER_TABLE_CONVERT = pyp.Group(
_CONVERT_TOKEN + _TO_TOKEN + _CHARACTER_TOKEN + _SET_TOKEN
+ _IDENTIFIER.setResultsName('character_set')
).setResultsName('convert')
_ALTER_CHARACTER_SET = pyp.Group(
_CHARACTER_TOKEN + _SET_TOKEN
+ _IDENTIFIER.setResultsName('character_set')
).setResultsName('alter_charset')
# The various ALTER TABLE operations supported:
# - ADD PRIMARY KEY
# - ADD INDEX
# - ADD COLUMN
# - CHANGE
# - DROP
# - ALTER
_ALTER_TABLE_OPERATIONS = pyp.Group(
_ALTER_TABLE_MODIFY
| _ALTER_TABLE_ADD_PRIMARY_KEY
| _ALTER_TABLE_ADD_CONSTRAINT
| _ALTER_TABLE_DROP_FOREIGN_KEY
| _ALTER_TABLE_ADD_INDEX
| _ALTER_TABLE_ADD_COLUMN
| _ALTER_TABLE_CHANGE
| _ALTER_TABLE_DROP_PRIMARY_KEY
| _ALTER_TABLE_DROP_INDEX
| _ALTER_TABLE_DROP_COLUMN
| _ALTER_TABLE_ALTER
| _ALTER_TABLE_CONVERT
| _ALTER_CHARACTER_SET
).setResultsName('operations')
_ALTER_TABLE_SQL = pyp.Group(_ALTER_TOKEN
+ _TABLE_TOKEN
+ _TABLE_NAME
+ pyp.delimitedList(_ALTER_TABLE_OPERATIONS)
).setResultsName('alter')
_ALTER_DATABASE_OPERATIONS = pyp.Group(
_ALTER_CHARACTER_SET
).setResultsName('operations')
_ALTER_DATABASE_SQL = pyp.Group(
_ALTER_TOKEN
+ _DATABASE_TOKEN
+ _DB_NAME
+ pyp.delimitedList(_ALTER_DATABASE_OPERATIONS)
).setResultsName('alter_db')
# CREATE STATEMENTS
_CREATE_DEFINITION = pyp.Group(_KEY_DEFINITION
| _CONSTRAINT_DEFINITION
| (_COLUMN_NAME
+ _COLUMN_DEFINITION)
).setResultsName('operation')
# Match on IF NOT EXISTS
_CREATE_NO_OVERWRITE = _IF_TOKEN + _NOT_TOKEN + _EXISTS_TOKEN
_CREATE_OPERATIONS = pyp.Group(pyp.delimitedList(_CREATE_DEFINITION)
).setResultsName('operations')
# CREATE TABLE table options can come in any order. There may be
# zero or many of them
_TABLE_FLAGS = pyp.Group(_ENGINE_TOKEN
| (_DEFAULT_TOKEN + _CHARSET_TOKEN)
| _CHARSET_TOKEN
| (_CHARACTER_TOKEN + _SET_TOKEN)
| (_DEFAULT_TOKEN + _CHARACTER_TOKEN + _SET_TOKEN)
| _COLLATE_TOKEN
).setResultsName('table_flags_type')
# CREATE TABLE table options are always of the format: FLAG=VALUE
_TABLE_FLAGS_DEF = pyp.Group(
_TABLE_FLAGS
+ pyp.Optional(pyp.Suppress('='))
+ _IDENTIFIER.setResultsName('table_flags_identifier')
).setResultsName('table_flags_definition')
_CREATE_TABLE_SQL = pyp.Group(
_CREATE_TOKEN
+ _TABLE_TOKEN
+ pyp.Optional(_CREATE_NO_OVERWRITE)
+ _TABLE_NAME
+ pyp.Suppress('(')
+ _CREATE_OPERATIONS
+ pyp.Suppress(')')
+ pyp.ZeroOrMore(_TABLE_FLAGS_DEF).setResultsName('table_flags')
).setResultsName('create_table')
_CREATE_TABLE_LIKE_SQL = pyp.Group(
_CREATE_TOKEN
+ _TABLE_TOKEN
+ pyp.Optional(_CREATE_NO_OVERWRITE)
+ _TABLE_NAME
+ _LIKE_TOKEN
+ _TABLE_NAME
).setResultsName('create_table_like')
# DROP TABLE [IF EXISTS] table
_DROP_TABLE_SQL = pyp.Group(_DROP_TOKEN
+ _TABLE_TOKEN
+ pyp.Optional(_IF_TOKEN + _EXISTS_TOKEN)
+ pyp.delimitedList(_TABLE_NAME)
).setResultsName('drop_table')
# CREATE DATABASE dbname
_CREATE_DATABASE_SQL = pyp.Group(_CREATE_TOKEN
+ _DATABASE_TOKEN
+ pyp.Optional(_CREATE_NO_OVERWRITE)
+ _DB_NAME
).setResultsName('create_database')
# DROP DATABASE dbname
_DROP_DATABASE_SQL = pyp.Group(_DROP_TOKEN
+ _DATABASE_TOKEN
+ pyp.Optional(_IF_TOKEN + _EXISTS_TOKEN)
+ _DB_NAME
).setResultsName('drop_database')
# CREATE INDEX idx ON table (column, ...)
_CREATE_INDEX_SQL = (
_CREATE_TOKEN
+ pyp.Optional(_UNIQUE_TOKEN).setResultsName('key_option')
+ _INDEX_TOKEN
+ _INDEX_NAME.setResultsName('key_name')
+ _ON_TOKEN
+ _TABLE_NAME
+ _COLUMN_LIST)
# EXPRESSIONS
_BINOP1 = pyp.oneOf("* / %")
_BINOP2 = pyp.oneOf("+ - << >> | &")
_BINOP3 = pyp.oneOf(":= = != <> < > >= <=")
_BINOP4 = pyp.oneOf("like between regexp", caseless=True) # optional "NOT"
_BINOP5 = pyp.oneOf("and", caseless=True)
_BINOP6 = pyp.oneOf("or", caseless=True)
_EXPRESSION = pyp.Forward() # _EXPRESSION is recursive
_DATE_FUNCTION_NAME = pyp.oneOf("date_add date_sub", caseless=True
).setResultsName('function_name')
_INTERVAL_UNIT = pyp.oneOf(
"microsecond second minute hour day week month quarter year "
"second_microsecond minute_microsecond minute_second hour_microsecond "
"hour_second hour_minute day_microsecond day_second day_minute "
"day_hour year_month", caseless=True
).setResultsName('interval_unit')
_DATE_FUNCTION = pyp.Group(
_DATE_FUNCTION_NAME
+ pyp.Suppress('(')
+ _EXPRESSION.setResultsName('arg')
+ pyp.Suppress(',')
+ _INTERVAL_TOKEN
+ _EXPRESSION.setResultsName('interval_val')
+ _INTERVAL_UNIT
+ pyp.Suppress(')')
).setResultsName('function')
_FUNCTION_NAME = (_IDENTIFIER
).setResultsName('function_name')
_ARG_LIST = pyp.Group(
pyp.Suppress('(')
+ pyp.Optional(pyp.delimitedList(_EXPRESSION.setResultsName('arg')))
+ pyp.Suppress(')')
).setResultsName('args')
_FUNCTION = pyp.Group(
_FUNCTION_NAME
+ _ARG_LIST
).setResultsName('function')
_VARIABLE = pyp.Group(
pyp.Group(pyp.Literal('@@')
| pyp.Literal('@')
).setResultsName('scope')
+ _IDENTIFIER.setResultsName('variable'))
_LVAL = ((pyp.Suppress('(') + _EXPRESSION + pyp.Suppress(')'))
| _VAL
| _FUNCTION
| _DATE_FUNCTION
| _COLUMN_NAME + pyp.Optional(
_COLLATE_TOKEN + _IDENTIFIER.setResultsName('collate'))
| _VARIABLE)
_IN_EXPRESSION = pyp.Group(
_LVAL
+ pyp.Optional(_NOT_TOKEN)
+ _IN_TOKEN
+ pyp.Suppress('(')
+ pyp.delimitedList(_VAL)
+ pyp.Suppress(')')
).setResultsName('in')
_IS_EXPRESSION = pyp.Group(
_LVAL
+ _IS_TOKEN
+ pyp.Optional(_NOT_TOKEN)
+ (_NULL_TOKEN | _TRUE_TOKEN | _FALSE_TOKEN | _UNKNOWN_TOKEN)
).setResultsName('is')
_CASES_LIST = (
pyp.OneOrMore(_WHEN_TOKEN
+ _EXPRESSION
+ _THEN_TOKEN
+ _EXPRESSION)
+ pyp.Optional(_ELSE_TOKEN
+ _EXPRESSION))
_CASE_EXPRESSION = pyp.Group(
_CASE_TOKEN
+ (_CASES_LIST
| (_EXPRESSION + _CASES_LIST))
+ _END_TOKEN).setResultsName('case')
_UNARY = (
_NOT_TOKEN
| '!'
| '-')
_EXPRESSION0 = (
_IS_EXPRESSION
| _IN_EXPRESSION
| _CASE_EXPRESSION
| (pyp.Optional(_UNARY) + _LVAL))
_EXPRESSION1 = (
pyp.Group(_EXPRESSION0
+ pyp.ZeroOrMore(_BINOP1 + _EXPRESSION0)).setResultsName('ex'))
_EXPRESSION2 = (
pyp.Group(_EXPRESSION1
+ pyp.ZeroOrMore(_BINOP2 + _EXPRESSION1)).setResultsName('ex'))
_EXPRESSION3 = (
pyp.Group(_EXPRESSION2
+ pyp.ZeroOrMore(_BINOP3 + _EXPRESSION2)).setResultsName('ex'))
_EXPRESSION4 = (
pyp.Group(_EXPRESSION3
+ pyp.ZeroOrMore(
pyp.Optional(_NOT_TOKEN) + _BINOP4 + _EXPRESSION3)
).setResultsName('ex'))
_EXPRESSION5 = (
pyp.Group(_EXPRESSION4
+ pyp.ZeroOrMore(_BINOP5 + _EXPRESSION4)).setResultsName('ex'))
_EXPRESSION << (
pyp.Group(_EXPRESSION5
+ pyp.ZeroOrMore(_BINOP6 + _EXPRESSION5)).setResultsName('ex'))
# SET STATEMENT
_SET_VARIABLE = (
pyp.Optional(
_LOCAL_TOKEN
| _SESSION_TOKEN
| _GLOBAL_TOKEN
| pyp.Literal('@@')
| pyp.Literal('@')
).setResultsName('scope')
+ _IDENTIFIER.setResultsName('variable')
+ pyp.Literal('=')
+ _EXPRESSION)
_SET_CHARSET = (
_CHARACTER_TOKEN
+ _SET_TOKEN
+ _EXPRESSION)
_SET_NAMES = (
_NAMES_TOKEN
+ _EXPRESSION)
_SET_SQL = pyp.Group(
_SET_TOKEN
+ pyp.delimitedList(_SET_VARIABLE
| _SET_CHARSET
| _SET_NAMES))
# TABLE REFERENCE
_INDEX_HINT = ((_USE_TOKEN | _IGNORE_TOKEN | _FORCE_TOKEN)
+ (_INDEX_TOKEN | _KEY_TOKEN)
+ pyp.Suppress('(')
+ pyp.delimitedList(_IDENTIFIER)
+ pyp.Suppress(')'))
_ALIAS = (pyp.Optional(_AS_TOKEN)
+ pyp.NotAny(_KEYWORDS)
+ _IDENTIFIER.setResultsName('alias'))
_TABLE = (pyp.Group(_TABLE_NAME
+ pyp.Optional(_ALIAS)).setResultsName('table_alias')
+ pyp.Optional(pyp.delimitedList(_INDEX_HINT)))
_JOIN_CONDITION = ((_ON_TOKEN + _EXPRESSION)
| pyp.Group(_USING_TOKEN
+ _COLUMN_LIST).setResultsName('using'))
_JOIN_LEFT_RIGHT = ((_LEFT_TOKEN | _RIGHT_TOKEN)
+ pyp.Optional(_OUTER_TOKEN))
_JOIN_SIDE = pyp.Group((_INNER_TOKEN | _CROSS_TOKEN)
|(_NATURAL_TOKEN
+ pyp.Optional(_JOIN_LEFT_RIGHT))
| _JOIN_LEFT_RIGHT
).setResultsName('join_side')
_TABLE_JOIN = pyp.Group(
pyp.Optional(_JOIN_SIDE)
+ (_JOIN_TOKEN | _STRAIGHT_JOIN_TOKEN)
+ _TABLE
+ pyp.Optional(_JOIN_CONDITION)).setResultsName('tablejoin')
_TABLE_REFERENCE = _TABLE + pyp.ZeroOrMore(_TABLE_JOIN)
_TABLE_REFERENCES = pyp.Group(pyp.delimitedList(_TABLE_REFERENCE))
# DATA MANIPULATION COMMONS
_EXPRESSION_LIST = pyp.Group(pyp.delimitedList(_EXPRESSION))
_WHERE = (_WHERE_TOKEN
+ _EXPRESSION_LIST.setResultsName('where'))
_ORDER_BY = (_ORDER_TOKEN
+ _BY_TOKEN
+ _EXPRESSION_LIST.setResultsName('order_by'))
_GROUP_BY = (_GROUP_TOKEN
+ _BY_TOKEN
+ _EXPRESSION_LIST.setResultsName('group_by'))
_HAVING = (_HAVING_TOKEN
+ _EXPRESSION_LIST.setResultsName('having'))
_LIMIT = (_LIMIT_TOKEN
+ _NUMBER.setResultsName('limit'))
_SET_VALUE = pyp.Group(_COLUMN_NAME
+ pyp.Suppress('=')
+ _EXPRESSION.setResultsName('set_value')
).setResultsName('set')
_SET_VALUE_LIST = pyp.Group(pyp.delimitedList(_SET_VALUE)
).setResultsName('sets')
_SET = (_SET_TOKEN.suppress()
+ _SET_VALUE_LIST)
# SELECT STATEMENTS
_SELECT_EXPRESSION = (pyp.Group(
_EXPRESSION.setResultsName('select_expression')
+ pyp.Optional(_AS_TOKEN
+ _IDENTIFIER.setResultsName('alias')))
| pyp.Suppress('*'))
_SELECT_FROM = pyp.Group(_FROM_TOKEN
+ _TABLE_REFERENCES).setResultsName('select_from')
_SELECT_SQL_2 = (_SELECT_FROM
+ pyp.Optional(_WHERE)
+ pyp.Optional(_GROUP_BY)
+ pyp.Optional(_HAVING)
+ pyp.Optional(_ORDER_BY)
+ pyp.Optional(_LIMIT))
_SELECT_OPTIONS = (_ALL_TOKEN
| _DISTINCT_TOKEN
| _DISTINCTROW_TOKEN)
_SELECT_SQL = pyp.Group(_SELECT_TOKEN
+ pyp.Optional(_SELECT_OPTIONS)
+ pyp.delimitedList(_SELECT_EXPRESSION)
.setResultsName('select_expressions')
+ pyp.Optional(_SELECT_SQL_2)
).setResultsName('select')
# UPDATE STATEMENTS
_UPDATE_TABLE = (_TABLE_NAME
+ _SET
+ pyp.Optional(_WHERE)
+ pyp.Optional(_ORDER_BY)
+ pyp.Optional(_LIMIT))
_UPDATE_TABLE_REFERENCE = (_TABLE_REFERENCES
+ _SET
+ pyp.Optional(_WHERE))
_UPDATE_SQL = pyp.Group(_UPDATE_TOKEN
+ (_UPDATE_TABLE
| _UPDATE_TABLE_REFERENCE)
).setResultsName('update')
# INSERT/REPLACE STATEMENTS
_VALUES = pyp.Group(pyp.Suppress('(')
+ pyp.delimitedList(_EXPRESSION)
+ pyp.Suppress(')')
).setResultsName('vals')
_INSERT_VALUES = (pyp.Optional(_COLUMN_LIST)
+ _VALUES_TOKEN
+ pyp.delimitedList(_VALUES))
_INSERT_SET = _SET
_INSERT_SELECT = (pyp.Optional(_COLUMN_LIST)
+ pyp.Optional(pyp.Suppress('('))
+ pyp.Group(_SELECT_SQL).setResultsName('source_select')
+ pyp.Optional(pyp.Suppress(')')))
_ON_DUPLICATE_KEY_UPDATE = (_ON_TOKEN
+ _DUPLICATE_TOKEN
+ _KEY_TOKEN
+ _UPDATE_TOKEN
+ _SET_VALUE_LIST)
_INSERT_SQL = pyp.Group(_INSERT_TOKEN
+ pyp.Optional(_IGNORE_TOKEN)
+ pyp.Optional(_INTO_TOKEN)
+ _TABLE_NAME
+ (_INSERT_VALUES
| _INSERT_SET
| _INSERT_SELECT)
+ pyp.Optional(_ON_DUPLICATE_KEY_UPDATE)
).setResultsName('insert')
_REPLACE_SQL = pyp.Group(_REPLACE_TOKEN
+ pyp.Optional(_INTO_TOKEN)
+ _TABLE_NAME
+ (_INSERT_VALUES
| _INSERT_SET
| _INSERT_SELECT)
).setResultsName('replace')
# DELETE STATEMENTS
# DELETE FROM table WHERE ... [ORDER BY ...] [LIMIT ...]
# WHERE ... is not optional because sql.par demands its existence
# in this statement type.
_DELETE_SIMPLE_SQL = pyp.Group(_DELETE_TOKEN
+ _FROM_TOKEN
+ _TABLE_NAME
+ pyp.Optional(_WHERE)
+ pyp.Optional(_ORDER_BY)
+ pyp.Optional(_LIMIT)
).setResultsName('delete')
# DELETE table FROM table_references [WHERE ...]
_DELETE_MULTI_SQL = pyp.Group(_DELETE_TOKEN
+ pyp.delimitedList(_TABLE_NAME
+ pyp.Optional('.*'))
+ _FROM_TOKEN
+ _TABLE_REFERENCES.setResultsName('exclude')
+ (pyp.Group(pyp.Optional(_WHERE))
.setResultsName('exclude'))
).setResultsName('delete')
# DELETE FROM table USING table_references [WHERE ...]
_DELETE_MULTI_SQL2 = pyp.Group(_DELETE_TOKEN
+ _FROM_TOKEN
+ pyp.delimitedList(_TABLE_NAME
+ pyp.Optional('.*'))
+ _USING_TOKEN
+ _TABLE_REFERENCES.setResultsName('exclude')
+ (pyp.Group(pyp.Optional(_WHERE))
.setResultsName('exclude'))
).setResultsName('delete')
# TRANSACTIONS
_START_TRANSACTION_SQL = pyp.Group((_START_TOKEN + _TRANSACTION_TOKEN)
| _BEGIN_TOKEN
).setResultsName('start_transaction')
_END_TRANSACTION_SQL = pyp.Group(_COMMIT_TOKEN
| _ROLLBACK_TOKEN
).setResultsName('end_transaction')
# UNSUPPORTED QUERIES
_RENAME_TABLE_SQL = (pyp.CaselessKeyword('rename') +
pyp.SkipTo(_LINE_DELIMITER).suppress())
_TRUNCATE_SQL = (pyp.CaselessKeyword('truncate')
+ pyp.SkipTo(_LINE_DELIMITER).suppress())
# VERSIONED COMMENTS
_STATEMENT = pyp.Forward()
_VERSIONED_COMMENT = (pyp.Literal('/*!')
+ pyp.Optional(_NUMBER.setResultsName('min_version'))
+ _STATEMENT
+ pyp.Literal('*/'))
# MAIN
_STATEMENT << pyp.Group(_ALTER_TABLE_SQL
| _ALTER_DATABASE_SQL
| _CREATE_TABLE_SQL
| _CREATE_TABLE_LIKE_SQL
| _DROP_TABLE_SQL
| _RENAME_TABLE_SQL
| _SELECT_SQL
| _UPDATE_SQL
| _INSERT_SQL
| _REPLACE_SQL
| _DELETE_MULTI_SQL
| _DELETE_MULTI_SQL2
| _DELETE_SIMPLE_SQL
| _TRUNCATE_SQL
| _START_TRANSACTION_SQL
| _END_TRANSACTION_SQL
| _CREATE_DATABASE_SQL
| _DROP_DATABASE_SQL
| _CREATE_INDEX_SQL
| _SET_SQL
| _VERSIONED_COMMENT
).setResultsName('statement')
_QUERY = pyp.Group(_STATEMENT
+ _LINE_DELIMITER).setResultsName('query')
_QUERY.ignore(_COMMENT_LINE)
_QUERY.ignore(_COMMENT_BLOCK)
class GoogleSQLParser(SQLParser):
"""Extended grammar for SQL within Google"""
_GOOGLE_SQL_ON_SHARD = (
pyp.CaselessKeyword('on')
+ pyp.CaselessKeyword('shard')
+ pyp.Group(pyp.delimitedList(SQLParser._NUMBER)).setResultsName('shard'))
_GOOGLE_SQL_EXTENSION = pyp.Group(_GOOGLE_SQL_ON_SHARD
).setResultsName('running_scheme')
_QUERY = pyp.Group(pyp.Optional(_GOOGLE_SQL_EXTENSION)
+ SQLParser._STATEMENT
+ SQLParser._LINE_DELIMITER).setResultsName('query')
_QUERY.ignore(SQLParser._COMMENT_LINE)
_QUERY.ignore(SQLParser._COMMENT_BLOCK)
|
en
| 0.687009
|
# Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MySQL query parser. Tools to parse SQL queries into a pyparsing parse tree. The primary method here is SQLParser.ParseString, which takes a string that you might pipe to the mysql CLI (containing multiple delimited queries) and parses it. The parsing grammar is far from complete, and focuses on DDL. GoogleSQLParser adds ON SHARD support to the grammar. # based on the work of <NAME> (<EMAIL>) SQL Parser Constructor. Args: progress_callback: If specified, called with the character location of the end of the last-yielded statement. # Get all the class variables that matches _*_TOKEN # Fill the grammar rule _KEYWORDS with all the keywords possible # Last yielded line end # Start of this statement # DISCARDED # TERMINALS ## IDENTIFIER # list of keywords, defined by __init__() ## TYPES # Types that don't take arguments. ## GRAMMAR # COMMONS # DATA DEFINITION COMMONS # Optional column flags: # - CHARSET <charset> # - CHARACTER SET <charset> # - COLLATE <collate name> # - DEFAULT '<value>' # - AUTO_INCREMENT # - NOT NULL # - ON UPDATE CURRENT_TIMESTAMP # ALTER STATEMENTS # ADD COLUMN columnname TYPE [BEFORE | AFTER ...] # ADD COLUMN (columnname TYPE, ...) [BEFORE | AFTER ...] # ADD [UNIQUE] INDEX | KEY ... # ADD UNIQUE ... # The various ALTER TABLE operations supported: # - ADD PRIMARY KEY # - ADD INDEX # - ADD COLUMN # - CHANGE # - DROP # - ALTER # CREATE STATEMENTS # Match on IF NOT EXISTS # CREATE TABLE table options can come in any order. There may be # zero or many of them # CREATE TABLE table options are always of the format: FLAG=VALUE # DROP TABLE [IF EXISTS] table # CREATE DATABASE dbname # DROP DATABASE dbname # CREATE INDEX idx ON table (column, ...) # EXPRESSIONS # optional "NOT" # _EXPRESSION is recursive # SET STATEMENT # TABLE REFERENCE # DATA MANIPULATION COMMONS # SELECT STATEMENTS # UPDATE STATEMENTS # INSERT/REPLACE STATEMENTS # DELETE STATEMENTS # DELETE FROM table WHERE ... [ORDER BY ...] [LIMIT ...] # WHERE ... is not optional because sql.par demands its existence # in this statement type. # DELETE table FROM table_references [WHERE ...] # DELETE FROM table USING table_references [WHERE ...] # TRANSACTIONS # UNSUPPORTED QUERIES # VERSIONED COMMENTS # MAIN Extended grammar for SQL within Google
| 2.535264
| 3
|
src/pybel/struct/mutation/induction/neighborhood.py
|
aman527/pybel
| 103
|
6628848
|
# -*- coding: utf-8 -*-
"""Functions for selecting by the neighborhoods of nodes."""
import itertools as itt
from typing import Iterable, Optional
from ...graph import BELGraph
from ...pipeline import transformation
from ...utils import update_metadata
from ....dsl import BaseEntity
__all__ = [
'get_subgraph_by_neighborhood',
]
@transformation
def get_subgraph_by_neighborhood(graph: BELGraph, nodes: Iterable[BaseEntity]) -> Optional[BELGraph]:
"""Get a BEL graph around the neighborhoods of the given nodes.
Returns none if no nodes are in the graph.
:param graph: A BEL graph
:param nodes: An iterable of BEL nodes
:return: A BEL graph induced around the neighborhoods of the given nodes
"""
node_set = set(nodes)
if not any(node in graph for node in node_set):
return
rv = graph.child()
rv.add_edges_from(
itt.chain(
graph.in_edges(nodes, keys=True, data=True),
graph.out_edges(nodes, keys=True, data=True),
),
)
return rv
|
# -*- coding: utf-8 -*-
"""Functions for selecting by the neighborhoods of nodes."""
import itertools as itt
from typing import Iterable, Optional
from ...graph import BELGraph
from ...pipeline import transformation
from ...utils import update_metadata
from ....dsl import BaseEntity
__all__ = [
'get_subgraph_by_neighborhood',
]
@transformation
def get_subgraph_by_neighborhood(graph: BELGraph, nodes: Iterable[BaseEntity]) -> Optional[BELGraph]:
"""Get a BEL graph around the neighborhoods of the given nodes.
Returns none if no nodes are in the graph.
:param graph: A BEL graph
:param nodes: An iterable of BEL nodes
:return: A BEL graph induced around the neighborhoods of the given nodes
"""
node_set = set(nodes)
if not any(node in graph for node in node_set):
return
rv = graph.child()
rv.add_edges_from(
itt.chain(
graph.in_edges(nodes, keys=True, data=True),
graph.out_edges(nodes, keys=True, data=True),
),
)
return rv
|
en
| 0.901093
|
# -*- coding: utf-8 -*- Functions for selecting by the neighborhoods of nodes. Get a BEL graph around the neighborhoods of the given nodes. Returns none if no nodes are in the graph. :param graph: A BEL graph :param nodes: An iterable of BEL nodes :return: A BEL graph induced around the neighborhoods of the given nodes
| 3.1113
| 3
|
cl/nets/q_nets.py
|
tarod13/DRIM
| 3
|
6628849
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from nets.custom_layers import Linear_noisy, parallel_Linear
from nets.vision_nets import vision_Net
from nets.net_utils import weights_init_rnd
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# Continuous action space
#-------------------------------------------------
class q_Net(nn.Module):
def __init__(self, s_dim, a_dim, noisy=False, lr=3e-4):
super().__init__()
self.s_dim = s_dim
self.a_dim = a_dim
if noisy:
layer = Linear_noisy
else:
layer = nn.Linear
self.l1 = layer(s_dim+a_dim, 256)
self.l2 = layer(256, 256)
self.lQ = layer(256, 1)
if not noisy:
self.apply(weights_init_rnd)
torch.nn.init.orthogonal_(self.lQ.weight, 0.01)
self.lQ.bias.data.zero_()
else:
torch.nn.init.orthogonal_(self.lQ.mean_weight, 0.01)
self.lQ.mean_bias.data.zero_()
self.optimizer = Adam(self.parameters(), lr=lr)
def forward(self, s, a):
sa = torch.cat([s,a], dim=1)
x = F.relu(self.l1(sa))
x = F.relu(self.l2(x))
Q = self.lQ(x)
return Q
# Discrete action space
#-------------------------------------------------
class multihead_dueling_q_Net(nn.Module):
def __init__(self, s_dim, n_actions, n_heads):
super().__init__()
self.s_dim = s_dim
self.n_actions = n_actions
self.l1 = parallel_Linear(n_heads, s_dim, 256)
self.l2 = parallel_Linear(n_heads, 256, 256)
self.lV = parallel_Linear(n_heads, 256, 1)
self.lA = parallel_Linear(n_heads, 256, n_actions)
self.apply(weights_init_rnd)
torch.nn.init.orthogonal_(self.lV.weight, 0.01)
self.lV.bias.data.zero_()
torch.nn.init.orthogonal_(self.lA.weight, 0.01)
self.lA.bias.data.zero_()
def forward(self, s):
x = F.relu(self.l1(s))
x = F.relu(self.l2(x))
V = self.lV(x)
A = self.lA(x)
Q = V + A - A.mean(2, keepdim=True)
return Q
class vision_multihead_dueling_q_Net(multihead_dueling_q_Net):
def __init__(self, s_dim, latent_dim, n_actions, n_heads, lr=1e-4):
super().__init__(s_dim + latent_dim, n_actions, n_heads)
self.vision_nets = nn.ModuleList([vision_Net(latent_dim=latent_dim,
noisy=False) for i in range(0, n_heads)])
self._n_heads = n_heads
self.optimizer = Adam(self.parameters(), lr=lr)
def forward(self, inner_state, outer_state):
state = []
for head in range(0, self._n_heads):
head_features = self.vision_nets[head](outer_state)
state.append(torch.cat([inner_state, head_features], dim=1))
state = torch.stack(state, dim=1)
x = F.relu(self.l1(state))
x = F.relu(self.l2(x))
V = self.lV(x)
A = self.lA(x)
Q = V + A - A.mean(2, keepdim=True)
return Q
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from nets.custom_layers import Linear_noisy, parallel_Linear
from nets.vision_nets import vision_Net
from nets.net_utils import weights_init_rnd
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# Continuous action space
#-------------------------------------------------
class q_Net(nn.Module):
def __init__(self, s_dim, a_dim, noisy=False, lr=3e-4):
super().__init__()
self.s_dim = s_dim
self.a_dim = a_dim
if noisy:
layer = Linear_noisy
else:
layer = nn.Linear
self.l1 = layer(s_dim+a_dim, 256)
self.l2 = layer(256, 256)
self.lQ = layer(256, 1)
if not noisy:
self.apply(weights_init_rnd)
torch.nn.init.orthogonal_(self.lQ.weight, 0.01)
self.lQ.bias.data.zero_()
else:
torch.nn.init.orthogonal_(self.lQ.mean_weight, 0.01)
self.lQ.mean_bias.data.zero_()
self.optimizer = Adam(self.parameters(), lr=lr)
def forward(self, s, a):
sa = torch.cat([s,a], dim=1)
x = F.relu(self.l1(sa))
x = F.relu(self.l2(x))
Q = self.lQ(x)
return Q
# Discrete action space
#-------------------------------------------------
class multihead_dueling_q_Net(nn.Module):
def __init__(self, s_dim, n_actions, n_heads):
super().__init__()
self.s_dim = s_dim
self.n_actions = n_actions
self.l1 = parallel_Linear(n_heads, s_dim, 256)
self.l2 = parallel_Linear(n_heads, 256, 256)
self.lV = parallel_Linear(n_heads, 256, 1)
self.lA = parallel_Linear(n_heads, 256, n_actions)
self.apply(weights_init_rnd)
torch.nn.init.orthogonal_(self.lV.weight, 0.01)
self.lV.bias.data.zero_()
torch.nn.init.orthogonal_(self.lA.weight, 0.01)
self.lA.bias.data.zero_()
def forward(self, s):
x = F.relu(self.l1(s))
x = F.relu(self.l2(x))
V = self.lV(x)
A = self.lA(x)
Q = V + A - A.mean(2, keepdim=True)
return Q
class vision_multihead_dueling_q_Net(multihead_dueling_q_Net):
def __init__(self, s_dim, latent_dim, n_actions, n_heads, lr=1e-4):
super().__init__(s_dim + latent_dim, n_actions, n_heads)
self.vision_nets = nn.ModuleList([vision_Net(latent_dim=latent_dim,
noisy=False) for i in range(0, n_heads)])
self._n_heads = n_heads
self.optimizer = Adam(self.parameters(), lr=lr)
def forward(self, inner_state, outer_state):
state = []
for head in range(0, self._n_heads):
head_features = self.vision_nets[head](outer_state)
state.append(torch.cat([inner_state, head_features], dim=1))
state = torch.stack(state, dim=1)
x = F.relu(self.l1(state))
x = F.relu(self.l2(x))
V = self.lV(x)
A = self.lA(x)
Q = V + A - A.mean(2, keepdim=True)
return Q
|
pt
| 0.13232
|
# Continuous action space #------------------------------------------------- # Discrete action space #-------------------------------------------------
| 2.604451
| 3
|
office365.py
|
ahmadfaizalbh/MS-Office-365-Mailer
| 1
|
6628850
|
<reponame>ahmadfaizalbh/MS-Office-365-Mailer<gh_stars>1-10
import datetime
import urllib2
import urllib
import json
import time
import os
class MSOFileHandler:
'''Attachment File handler'''
def __init__(self, def_read_dir="", def_write_dir=""):
'''Default Initializer function'''
self.default_read_dir = def_read_dir + ("" if def_read_dir.\
endswith("/") else "/")
self.default_write_dir = def_write_dir + ("" if def_write_dir.\
endswith("/") else "/")
def create_file(self, MSO_dict, Dir=None):
'''
Attachment name and content is read and create's new file in local
system
'''
if ("Name" in MSO_dict) and ("ContentBytes" in MSO_dict):
f = open(( (Dir + ("" if Dir.endswith("/") else "/")) if Dir else \
self.default_write_dir) +MSO_dict["Name"],"wb")
f.write(MSO_dict["ContentBytes"].decode('base64'))
f.close()
def Create_Attachment(self,File_name):
'''
Takes File name (with absolute or complete path) and returns
a dictionary of attributes needed for attachment of file in Microsoft
mail
'''
MSO_dict = {"@odata.type": "#Microsoft.OutlookServices.FileAttachment"}
F_path = ("" if File_name.startswith("/") else self.default_read_dir)\
+ File_name
f = open(F_path,"rb")
MSO_dict["Name"] = f.name.split("/")[-1]
MSO_dict["ContentBytes"] = f.read().encode('base64')
MSO_dict["ContentType"] = mimetypes.types_map['.' \
+ MSO_dict["Name"].split(".")[-1]]
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = \
os.stat(f.name)
f.close()
MSO_dict["DateTimeLastModified"] = time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime(mtime))
return MSO_dict
class MSOffice365:
'''
A microsoft 365 Outlook access class
'''
def __init__(self,username,password,mail_box,sourceDir=".",destinationDir="."):
'''
Default Initializer function
MSOffice365('<EMAIL>','password','<EMAIL>',sourceDir="Uploads",destinationDir="Downloads")
'''
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
self.MailBox_id = username if not mail_box else mail_box
self.top_level_url = \
"https://outlook.office365.com/api/v1.0/Users('%s')" % self.MailBox_id
password_mgr.add_password(None,
self.top_level_url,
username,
password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
self.opener = urllib2.build_opener(handler)
self.FileHandler = MSOFileHandler(def_read_dir=sourceDir,
def_write_dir=destinationDir)
@property
def DisplayName(self):
"""
"""
if "profile" not in self.__dict__:
try:
self.profile = self.open("/")
except Exception, e:
self.profile = {}
try:
return self.profile["DisplayName"]
except Exception, e:
return self.MailBox_id
@property
def Alias(self):
"""
"""
if "profile" not in self.__dict__:
try:
self.profile = self.open("/")
except Exception, e:
self.profile = {}
try:
return self.profile["Alias"]
except Exception, e:
return self.MailBox_id.split("@")[0]
def open(self,url):
"""
"""
response=json.load(self.opener.open(self.top_level_url + url))
self.next_url = response['@odata.nextLink'] if '@odata.nextLink' in \
response else ""
return response
def next(self):
"""
"""
if self.next_url:
response = json.load(self.opener.open(self.next_url))
self.next_url = response['@odata.nextLink'] if '@odata.nextLink' \
in response else ""
return response
return {"error":{
"code":"ErrorInvalidUrlfield",
"message":"Invalid Url."
}
}
def buildQuery(self, url="", q=None):
"""
"""
if q:
fieldSep = "?"
if type(q) == dict:
for i in q:
url += fieldSep + "$" + i + "="
if type(q[i]) == list:
sep=""
for j in q[i]:
url += sep + urllib.quote_plus(unicode(j).\
encode('utf8'), safe='/')
sep = ","
elif type(q[i]) == str:
url += urllib.quote_plus(unicode(q[i]).encode('utf8'),
safe='/')
else:
raise ValueError("Invalid Argument Syntax")
fieldSep = "&"
elif type(q) == str:
url += fieldSep + q
else:
raise ValueError("Invalid Argument Syntax")
return url
def Messages(self,q=None,mail_id=None, folder_id=None):
"""
Messages(q=query,mail_id=mailId,folder_id=folderId)
"""
url = (("/Folders('" + folder_id + "')") if folder_id else "") \
+ "/Messages" + (("""('""" + mail_id + """')/""") if mail_id \
else "/")
url = self.buildQuery(url, q)
return self.open(url)
def Post(self, url, json_data, fullurl=False):
"""
"""
headers = { 'X_REQUESTED_WITH' :'XMLHttpRequest',
'ACCEPT': 'application/json, text/javascript, */*; q=0.01',
'Contentlength':len(json_data)}
request = urllib2.Request(url if fullurl else (self.top_level_url + \
url),
data=json_data,
headers=headers)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: "POST"
try:
connection = self.opener.open(request)
except urllib2.HTTPError, e:
connection = e
return connection.read()
def Sendmail(self, Subject="Have you seen this new Mail REST API?",
Importance="High", Body=None, ToRecipients=None, Attachments=[],
SaveToSentItems=True):
''' sendmail(
Subject="Have you seen this new Mail REST API?",
Importance="High",
Body={"ContentType": "HTML",
"Content": "It looks awesome!<br/> This is test mail" },
ToRecipients=[{ "EmailAddress": {
"Name": "<NAME>",
"Address": "<EMAIL>"
}
}],
Attachments=[list of file names],
SaveToSentItems=True
)
'''
message_data={
"Subject": Subject,
"Importance": Importance,
"Body": Body if Body else {
"ContentType": "HTML",
"Content": "It looks awesome!<br/> This is test mail"
},
"ToRecipients": ToRecipients if ToRecipients else [{
"EmailAddress": {
"Name": self.DisplayName,
"Address": self.MailBox_id
}
}]
}
if Attachments:
message_data["Attachments"] = []
for File_name in Attachments:
message_data["Attachments"].append(self.FileHandler.\
Create_Attachment(File_name))
json_data = json.dumps({
"Message": message_data,
"SaveToSentItems": SaveToSentItems
})
return self.Post("/sendmail", json_data)
def CreateDraftMessage(self, folder_id='inbox',
Subject="Have you seen this new Mail REST API?",
Importance="High", Body=None, ToRecipients=None,
Attachments=[]):
"""
CreateDraftMessage(folder_id='inbox',
Subject="Have you seen this new Mail REST API?",
Importance="High",
Body={
"ContentType": "HTML",
"Content": "It looks awesome!<br/> This is test mail"
},
ToRecipients=[{
"EmailAddress": {
"Name": ToDisplayName,
"Address": ToEmailAdress
}
}],
Attachments=['test1.docx','test2.docx'])
"""
url = "/folders('" + folder_id + "')/messages"
message_data = {
"Subject": Subject,
"Importance": Importance,
"Body": Body if Body else {
"ContentType": "HTML",
"Content": "It looks awesome!<br/> This is test mail"
},
"ToRecipients":ToRecipients if ToRecipients else [{
"EmailAddress": {
"Name": self.DisplayName,
"Address": self.MailBox_id
}
}]
}
if Attachments:
message_data["Attachments"] = []
for File_name in Attachments:
message_data["Attachments"].append(self.FileHandler.\
Create_Attachment(File_name))
json_data=json.dumps(message_data)
return self.Post(url, json_data)
def CreateFolder(self, folder_id, DisplayName):
"""
CreateFolder(folder_id, DisplayName)
"""
url = "/Folders('" + folder_id + "')/childfolders"
json_data = json.dumps({
"DisplayName": DisplayName
})
return self.Post(url, json_data)
def CreateContact(self, GivenName="<NAME>", EmailAddresses=[],
BusinessPhones=[]):
"""
CreateContact(
GivenName = "<NAME>",
EmailAddresses = [{
"Address":"<EMAIL>",
"Name":"<NAME>"
}],
BusinessPhones = ["123-456-7890"])
"""
json_data = json.dumps({
"GivenName": GivenName,
"EmailAddresses": EmailAddresses ,
"BusinessPhones": BusinessPhones
})
return self.Post("/Contacts", json_data)
def Folders(self, folder_id=None, q=None):
"""
Folders(folder_id=folderId, q=query)
"""
url = "/Folders" + (("""('""" + folder_id + """')/""") if folder_id \
else "/")
url = self.buildQuery(url, q)
return self.open(url)
def Calendars(self, Calender_id=None, q=None):
"""
Calendars(Calender_id=CalenderId, q=query)
"""
url = "/Calendars" + (("""('""" + Calender_id + """')/""") \
if Calender_id else "/")
url = self.buildQuery(url, q)
return self.open(url)
def CalendarGroups(self, CalGroup_id=None, q=None):
"""
CalendarGroups(CalGroup_id=CalGroupId, q=query)
"""
url = "/CalendarGroups" + (("""('""" + CalGroup_id + """')/""") \
if CalGroup_id else "/")
url = self.buildQuery(url, q)
return self.open(url)
def Events(self, Event_id=None, q=None):
"""
Events(Event_id=EventId, q=query)
"""
url = "/Events"+(("""('""" + Event_id + """')/""") if Event_id \
else "/")
url = self.buildQuery(url, q)
return self.open(url)
def Contacts(self, Contact_id=None, folder_id=None, q=None):
"""
Contacts(self, Contact_id=ContactId, folder_id=folderId, q=query)
"""
url = (("/Contactfolders('" + folder_id + "')") if folder_id else "") \
+ "/Contacts" + (("""('""" + Contact_id + """')/""") \
if Contact_id else "/")
url = self.buildQuery(url, q)
return self.open(url)
def ContactFolders(self, Contact_id=None, q=None):
"""
ContactFolders(Contact_id=ContactId, q=query)
"""
url = "/Contactfolders" + (("""('""" + Contact_id + """')/""") \
if Contact_id else "/")
url = buildQuery(url, q)
return self.open(url)
def Attachments(self, mail_id, Attachment_id=None, q=None, Dir=None,
Create_file=False):
"""
"""
url = "/Messages('" + mail_id + "')/attachments" + (("('" + \
Attachment_id + "')/") if Attachment_id else "/" )
url = self.buildQuery(url,q)
MSO_dict = self.open(url)
if Create_file:
if Attachment_id:
self.FileHandler.create_file(MSO_dict, Dir=Dir)
else:
for i in MSO_dict["value"]:
self.FileHandler.create_file(i, Dir=Dir)
return MSO_dict
|
import datetime
import urllib2
import urllib
import json
import time
import os
class MSOFileHandler:
'''Attachment File handler'''
def __init__(self, def_read_dir="", def_write_dir=""):
'''Default Initializer function'''
self.default_read_dir = def_read_dir + ("" if def_read_dir.\
endswith("/") else "/")
self.default_write_dir = def_write_dir + ("" if def_write_dir.\
endswith("/") else "/")
def create_file(self, MSO_dict, Dir=None):
'''
Attachment name and content is read and create's new file in local
system
'''
if ("Name" in MSO_dict) and ("ContentBytes" in MSO_dict):
f = open(( (Dir + ("" if Dir.endswith("/") else "/")) if Dir else \
self.default_write_dir) +MSO_dict["Name"],"wb")
f.write(MSO_dict["ContentBytes"].decode('base64'))
f.close()
def Create_Attachment(self,File_name):
'''
Takes File name (with absolute or complete path) and returns
a dictionary of attributes needed for attachment of file in Microsoft
mail
'''
MSO_dict = {"@odata.type": "#Microsoft.OutlookServices.FileAttachment"}
F_path = ("" if File_name.startswith("/") else self.default_read_dir)\
+ File_name
f = open(F_path,"rb")
MSO_dict["Name"] = f.name.split("/")[-1]
MSO_dict["ContentBytes"] = f.read().encode('base64')
MSO_dict["ContentType"] = mimetypes.types_map['.' \
+ MSO_dict["Name"].split(".")[-1]]
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = \
os.stat(f.name)
f.close()
MSO_dict["DateTimeLastModified"] = time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime(mtime))
return MSO_dict
class MSOffice365:
'''
A microsoft 365 Outlook access class
'''
def __init__(self,username,password,mail_box,sourceDir=".",destinationDir="."):
'''
Default Initializer function
MSOffice365('<EMAIL>','password','<EMAIL>',sourceDir="Uploads",destinationDir="Downloads")
'''
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
self.MailBox_id = username if not mail_box else mail_box
self.top_level_url = \
"https://outlook.office365.com/api/v1.0/Users('%s')" % self.MailBox_id
password_mgr.add_password(None,
self.top_level_url,
username,
password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
self.opener = urllib2.build_opener(handler)
self.FileHandler = MSOFileHandler(def_read_dir=sourceDir,
def_write_dir=destinationDir)
@property
def DisplayName(self):
"""
"""
if "profile" not in self.__dict__:
try:
self.profile = self.open("/")
except Exception, e:
self.profile = {}
try:
return self.profile["DisplayName"]
except Exception, e:
return self.MailBox_id
@property
def Alias(self):
"""
"""
if "profile" not in self.__dict__:
try:
self.profile = self.open("/")
except Exception, e:
self.profile = {}
try:
return self.profile["Alias"]
except Exception, e:
return self.MailBox_id.split("@")[0]
def open(self,url):
"""
"""
response=json.load(self.opener.open(self.top_level_url + url))
self.next_url = response['@odata.nextLink'] if '@odata.nextLink' in \
response else ""
return response
def next(self):
"""
"""
if self.next_url:
response = json.load(self.opener.open(self.next_url))
self.next_url = response['@odata.nextLink'] if '@odata.nextLink' \
in response else ""
return response
return {"error":{
"code":"ErrorInvalidUrlfield",
"message":"Invalid Url."
}
}
def buildQuery(self, url="", q=None):
"""
"""
if q:
fieldSep = "?"
if type(q) == dict:
for i in q:
url += fieldSep + "$" + i + "="
if type(q[i]) == list:
sep=""
for j in q[i]:
url += sep + urllib.quote_plus(unicode(j).\
encode('utf8'), safe='/')
sep = ","
elif type(q[i]) == str:
url += urllib.quote_plus(unicode(q[i]).encode('utf8'),
safe='/')
else:
raise ValueError("Invalid Argument Syntax")
fieldSep = "&"
elif type(q) == str:
url += fieldSep + q
else:
raise ValueError("Invalid Argument Syntax")
return url
def Messages(self,q=None,mail_id=None, folder_id=None):
"""
Messages(q=query,mail_id=mailId,folder_id=folderId)
"""
url = (("/Folders('" + folder_id + "')") if folder_id else "") \
+ "/Messages" + (("""('""" + mail_id + """')/""") if mail_id \
else "/")
url = self.buildQuery(url, q)
return self.open(url)
def Post(self, url, json_data, fullurl=False):
"""
"""
headers = { 'X_REQUESTED_WITH' :'XMLHttpRequest',
'ACCEPT': 'application/json, text/javascript, */*; q=0.01',
'Contentlength':len(json_data)}
request = urllib2.Request(url if fullurl else (self.top_level_url + \
url),
data=json_data,
headers=headers)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: "POST"
try:
connection = self.opener.open(request)
except urllib2.HTTPError, e:
connection = e
return connection.read()
def Sendmail(self, Subject="Have you seen this new Mail REST API?",
Importance="High", Body=None, ToRecipients=None, Attachments=[],
SaveToSentItems=True):
''' sendmail(
Subject="Have you seen this new Mail REST API?",
Importance="High",
Body={"ContentType": "HTML",
"Content": "It looks awesome!<br/> This is test mail" },
ToRecipients=[{ "EmailAddress": {
"Name": "<NAME>",
"Address": "<EMAIL>"
}
}],
Attachments=[list of file names],
SaveToSentItems=True
)
'''
message_data={
"Subject": Subject,
"Importance": Importance,
"Body": Body if Body else {
"ContentType": "HTML",
"Content": "It looks awesome!<br/> This is test mail"
},
"ToRecipients": ToRecipients if ToRecipients else [{
"EmailAddress": {
"Name": self.DisplayName,
"Address": self.MailBox_id
}
}]
}
if Attachments:
message_data["Attachments"] = []
for File_name in Attachments:
message_data["Attachments"].append(self.FileHandler.\
Create_Attachment(File_name))
json_data = json.dumps({
"Message": message_data,
"SaveToSentItems": SaveToSentItems
})
return self.Post("/sendmail", json_data)
def CreateDraftMessage(self, folder_id='inbox',
Subject="Have you seen this new Mail REST API?",
Importance="High", Body=None, ToRecipients=None,
Attachments=[]):
"""
CreateDraftMessage(folder_id='inbox',
Subject="Have you seen this new Mail REST API?",
Importance="High",
Body={
"ContentType": "HTML",
"Content": "It looks awesome!<br/> This is test mail"
},
ToRecipients=[{
"EmailAddress": {
"Name": ToDisplayName,
"Address": ToEmailAdress
}
}],
Attachments=['test1.docx','test2.docx'])
"""
url = "/folders('" + folder_id + "')/messages"
message_data = {
"Subject": Subject,
"Importance": Importance,
"Body": Body if Body else {
"ContentType": "HTML",
"Content": "It looks awesome!<br/> This is test mail"
},
"ToRecipients":ToRecipients if ToRecipients else [{
"EmailAddress": {
"Name": self.DisplayName,
"Address": self.MailBox_id
}
}]
}
if Attachments:
message_data["Attachments"] = []
for File_name in Attachments:
message_data["Attachments"].append(self.FileHandler.\
Create_Attachment(File_name))
json_data=json.dumps(message_data)
return self.Post(url, json_data)
def CreateFolder(self, folder_id, DisplayName):
"""
CreateFolder(folder_id, DisplayName)
"""
url = "/Folders('" + folder_id + "')/childfolders"
json_data = json.dumps({
"DisplayName": DisplayName
})
return self.Post(url, json_data)
def CreateContact(self, GivenName="<NAME>", EmailAddresses=[],
BusinessPhones=[]):
"""
CreateContact(
GivenName = "<NAME>",
EmailAddresses = [{
"Address":"<EMAIL>",
"Name":"<NAME>"
}],
BusinessPhones = ["123-456-7890"])
"""
json_data = json.dumps({
"GivenName": GivenName,
"EmailAddresses": EmailAddresses ,
"BusinessPhones": BusinessPhones
})
return self.Post("/Contacts", json_data)
def Folders(self, folder_id=None, q=None):
"""
Folders(folder_id=folderId, q=query)
"""
url = "/Folders" + (("""('""" + folder_id + """')/""") if folder_id \
else "/")
url = self.buildQuery(url, q)
return self.open(url)
def Calendars(self, Calender_id=None, q=None):
"""
Calendars(Calender_id=CalenderId, q=query)
"""
url = "/Calendars" + (("""('""" + Calender_id + """')/""") \
if Calender_id else "/")
url = self.buildQuery(url, q)
return self.open(url)
def CalendarGroups(self, CalGroup_id=None, q=None):
"""
CalendarGroups(CalGroup_id=CalGroupId, q=query)
"""
url = "/CalendarGroups" + (("""('""" + CalGroup_id + """')/""") \
if CalGroup_id else "/")
url = self.buildQuery(url, q)
return self.open(url)
def Events(self, Event_id=None, q=None):
"""
Events(Event_id=EventId, q=query)
"""
url = "/Events"+(("""('""" + Event_id + """')/""") if Event_id \
else "/")
url = self.buildQuery(url, q)
return self.open(url)
def Contacts(self, Contact_id=None, folder_id=None, q=None):
"""
Contacts(self, Contact_id=ContactId, folder_id=folderId, q=query)
"""
url = (("/Contactfolders('" + folder_id + "')") if folder_id else "") \
+ "/Contacts" + (("""('""" + Contact_id + """')/""") \
if Contact_id else "/")
url = self.buildQuery(url, q)
return self.open(url)
def ContactFolders(self, Contact_id=None, q=None):
"""
ContactFolders(Contact_id=ContactId, q=query)
"""
url = "/Contactfolders" + (("""('""" + Contact_id + """')/""") \
if Contact_id else "/")
url = buildQuery(url, q)
return self.open(url)
def Attachments(self, mail_id, Attachment_id=None, q=None, Dir=None,
Create_file=False):
"""
"""
url = "/Messages('" + mail_id + "')/attachments" + (("('" + \
Attachment_id + "')/") if Attachment_id else "/" )
url = self.buildQuery(url,q)
MSO_dict = self.open(url)
if Create_file:
if Attachment_id:
self.FileHandler.create_file(MSO_dict, Dir=Dir)
else:
for i in MSO_dict["value"]:
self.FileHandler.create_file(i, Dir=Dir)
return MSO_dict
|
en
| 0.520385
|
Attachment File handler Default Initializer function Attachment name and content is read and create's new file in local system Takes File name (with absolute or complete path) and returns a dictionary of attributes needed for attachment of file in Microsoft mail A microsoft 365 Outlook access class Default Initializer function MSOffice365('<EMAIL>','password','<EMAIL>',sourceDir="Uploads",destinationDir="Downloads") Messages(q=query,mail_id=mailId,folder_id=folderId) (' ')/ sendmail( Subject="Have you seen this new Mail REST API?", Importance="High", Body={"ContentType": "HTML", "Content": "It looks awesome!<br/> This is test mail" }, ToRecipients=[{ "EmailAddress": { "Name": "<NAME>", "Address": "<EMAIL>" } }], Attachments=[list of file names], SaveToSentItems=True ) CreateDraftMessage(folder_id='inbox', Subject="Have you seen this new Mail REST API?", Importance="High", Body={ "ContentType": "HTML", "Content": "It looks awesome!<br/> This is test mail" }, ToRecipients=[{ "EmailAddress": { "Name": ToDisplayName, "Address": ToEmailAdress } }], Attachments=['test1.docx','test2.docx']) CreateFolder(folder_id, DisplayName) CreateContact( GivenName = "<NAME>", EmailAddresses = [{ "Address":"<EMAIL>", "Name":"<NAME>" }], BusinessPhones = ["123-456-7890"]) Folders(folder_id=folderId, q=query) (' ')/ Calendars(Calender_id=CalenderId, q=query) (' ')/ CalendarGroups(CalGroup_id=CalGroupId, q=query) (' ')/ Events(Event_id=EventId, q=query) (' ')/ Contacts(self, Contact_id=ContactId, folder_id=folderId, q=query) (' ')/ ContactFolders(Contact_id=ContactId, q=query) (' ')/
| 3.189893
| 3
|