content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
"""
A coordinate transformation module. Made as a separate chunk of code to allow for easier implementation of newer/better reference frame translation methods.
Generally used to project a trajectory in ECEF coordinates (eg lat/lon) into a projected reference system.
##just getting started!
"""
#collect dependencies
import numpy as np
import sys
import pyproj as prj
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from six.moves.urllib.parse import urlparse
from django.utils.translation import ugettext_lazy as _
from django import forms
from sentry import http
from sentry.web.helpers import render_to_response
from sentry.identity.pipeline import IdentityProviderPipeline
from sentry.identity.gitlab import get_user_info
from sentry.identity.gitlab.provider import GitlabIdentityProvider
from sentry.integrations import IntegrationInstallation, IntegrationFeatures, IntegrationProvider, IntegrationMetadata
from sentry.pipeline import NestedPipelineView, PipelineView
from sentry.utils.http import absolute_uri
from .client import GitLabApiClient, GitLabApiClientPath
DESCRIPTION = """
Fill me out
"""
FEATURES = []
metadata = IntegrationMetadata(
description=DESCRIPTION.strip(),
features=FEATURES,
author='The Sentry Team',
noun=_('Installation'),
issue_url='https://github.com/getsentry/sentry/issues/',
source_url='https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/gitlab',
aspects={},
)
class GitlabIntegration(IntegrationInstallation):
def __init__(self, *args, **kwargs):
super(GitlabIntegration, self).__init__(*args, **kwargs)
self.default_identity = None
def get_client(self):
if self.default_identity is None:
self.default_identity = self.get_default_identity()
return GitLabApiClient(self)
class InstallationForm(forms.Form):
url = forms.CharField(
label=_("Installation Url"),
help_text=_('The "base URL" for your gitlab instance, '
'includes the host and protocol.'),
widget=forms.TextInput(
attrs={'placeholder': 'https://github.example.com'}
),
)
name = forms.CharField(
label=_("Gitlab App Name"),
help_text=_('The name of your OAuth Application in Gitlab. '
'This can be found on the apps configuration '
'page. (/profile/applications)'),
widget=forms.TextInput(
attrs={'placeholder': _('Sentry App')}
)
)
group = forms.CharField(
label=_("Gitlab Group Name"),
widget=forms.TextInput(
attrs={'placeholder': _('my-awesome-group')}
)
)
verify_ssl = forms.BooleanField(
label=_("Verify SSL"),
help_text=_('By default, we verify SSL certificates '
'when delivering payloads to your Gitlab instance'),
widget=forms.CheckboxInput(),
required=False
)
client_id = forms.CharField(
label=_("Gitlab Application ID"),
widget=forms.TextInput(
attrs={'placeholder': _(
'5832fc6e14300a0d962240a8144466eef4ee93ef0d218477e55f11cf12fc3737')}
)
)
client_secret = forms.CharField(
label=_("Gitlab Application Secret"),
widget=forms.TextInput(
attrs={'placeholder': _('XXXXXXXXXXXXXXXXXXXXXXXXXXX')}
)
)
def __init__(self, *args, **kwargs):
super(InstallationForm, self).__init__(*args, **kwargs)
self.fields['verify_ssl'].initial = True
class InstallationConfigView(PipelineView):
def dispatch(self, request, pipeline):
form = InstallationForm(request.POST)
if form.is_valid():
form_data = form.cleaned_data
pipeline.bind_state('installation_data', form_data)
pipeline.bind_state('oauth_config_information', {
"access_token_url": u"{}/oauth/token".format(form_data.get('url')),
"authorize_url": u"{}/oauth/authorize".format(form_data.get('url')),
"client_id": form_data.get('client_id'),
"client_secret": form_data.get('client_secret'),
"verify_ssl": form_data.get('verify_ssl')
})
return pipeline.next_step()
project_form = InstallationForm()
return render_to_response(
template='sentry/integrations/gitlab-config.html',
context={
'form': project_form,
},
request=request,
)
class GitlabIntegrationProvider(IntegrationProvider):
key = 'gitlab'
name = 'Gitlab'
metadata = metadata
integration_cls = GitlabIntegration
needs_default_identity = True
features = frozenset([
IntegrationFeatures.ISSUE_BASIC,
])
setup_dialog_config = {
'width': 1030,
'height': 1000,
}
def _make_identity_pipeline_view(self):
"""
Make the nested identity provider view. It is important that this view is
not constructed until we reach this step and the
``oauth_config_information`` is available in the pipeline state. This
method should be late bound into the pipeline vies.
"""
identity_pipeline_config = dict(
oauth_scopes=(
'api',
'sudo',
),
redirect_url=absolute_uri('/extensions/gitlab/setup/'),
**self.pipeline.fetch_state('oauth_config_information')
)
return NestedPipelineView(
bind_key='identity',
provider_key='gitlab',
pipeline_cls=IdentityProviderPipeline,
config=identity_pipeline_config,
)
def get_oauth_data(self, payload):
data = {'access_token': payload['access_token']}
# https://docs.gitlab.com/ee/api/oauth2.html#2-requesting-access-token
# doesn't seem to be correct, format we actually get:
# {
# "access_token": "123432sfh29uhs29347",
# "token_type": "bearer",
# "refresh_token": "29f43sdfsk22fsj929",
# "created_at": 1536798907,
# "scope": "api sudo"
# }
if 'refresh_token' in payload:
data['refresh_token'] = payload['refresh_token']
if 'token_type' in payload:
data['token_type'] = payload['token_type']
return data
def get_group_info(self, access_token, installation_data):
session = http.build_session()
resp = session.get(
GitLabApiClientPath.build_api_url(
base_url=installation_data['url'],
path=GitLabApiClientPath.group.format(
group=installation_data['group'],
)
),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % access_token,
},
verify=installation_data['verify_ssl']
)
resp.raise_for_status()
return resp.json()
def get_pipeline_views(self):
return [InstallationConfigView(), lambda: self._make_identity_pipeline_view()]
def build_integration(self, state):
data = state['identity']['data']
oauth_data = self.get_oauth_data(data)
user = get_user_info(data['access_token'], state['installation_data'])
group = self.get_group_info(data['access_token'], state['installation_data'])
scopes = sorted(GitlabIdentityProvider.oauth_scopes)
base_url = state['installation_data']['url']
integration = {
'name': group['name'],
'external_id': u'{}:{}'.format(urlparse(base_url).netloc, group['id']),
'metadata': {
'icon': group['avatar_url'],
'domain_name': group['web_url'].replace('https://', ''),
'scopes': scopes,
'verify_ssl': state['installation_data']['verify_ssl'],
'base_url': base_url,
},
'user_identity': {
'type': 'gitlab',
'external_id': u'{}:{}'.format(urlparse(base_url).netloc, user['id']),
'scopes': scopes,
'data': oauth_data,
},
}
return integration
|
nilq/baby-python
|
python
|
'''
Created by Sidhant Nagpal
Feb 1, 2018
'''
from matplotlib import pyplot as plt
from random import shuffle
import numpy as np
import json
plt.figure(figsize=(12,6))
data = json.load(open('data.json'))
a = [(k,v) for k, v in data.iteritems()]
for i in xrange(2,len(a)):
if a[i-2]>a[i] and a[i-2]>a[i-1]:
a[i-2], a[i] = a[i], a[i-2]
elif a[i]>a[i-2] and a[i]>a[i-1]:
a[i-1], a[i] = a[i], a[i-1]
values = [y for x, y in a]
probs = sum(values)
labels = ['{} ({}) ({:.1f}%)'.format(x,y,100.*y/probs) for x, y in a]
colors = ['crimson','lightcoral','darkcyan','green','coral','orange','seagreen','purple','gold','mediumvioletred','darkturquoise','greenyellow','indigo','limegreen']
shuffle(colors)
colors = colors[:len(a)]
patches, texts = plt.pie(values, colors=colors, frame=True, shadow=True, startangle=100)
plt.axis('equal')
plt.title('Total Solved = {}'.format(probs), loc='left')
plt.legend(patches, labels, loc='lower right')
plt.tight_layout()
plt.show()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Simple ClaSP test."""
__author__ = ["patrickzib"]
__all__ = []
import numpy as np
from sktime.annotation.clasp import ClaSPSegmentation
from sktime.datasets import load_gun_point_segmentation
def test_clasp_sparse():
"""Test ClaSP sparse segmentation.
Check if the predicted change points match.
"""
# load the test dataset
ts, period_size, cps = load_gun_point_segmentation()
# compute a ClaSP segmentation
clasp = ClaSPSegmentation(period_size, n_cps=1)
clasp.fit(ts)
found_cps = clasp.predict(ts)
scores = clasp.predict_scores(ts)
assert len(found_cps) == 1 and found_cps[0] == 893
assert len(scores) == 1 and scores[0] > 0.74
def test_clasp_dense():
"""Tests ClaSP dense segmentation.
Check if the predicted segmentation matches.
"""
# load the test dataset
ts, period_size, cps = load_gun_point_segmentation()
# compute a ClaSP segmentation
clasp = ClaSPSegmentation(period_size, n_cps=1, fmt="dense")
clasp.fit(ts)
segmentation = clasp.predict(ts)
scores = clasp.predict_scores(ts)
assert len(segmentation) == 2 and segmentation[0].right == 893
assert np.argmax(scores) == 893
|
nilq/baby-python
|
python
|
from string import ascii_uppercase
from tkinter import *
from analyst import BoardAnalyst
from board import Board, Color
class MainMenuWindow:
"""
A class that represents a Main Menu. Can branch to a NameWindow, to an AboutWindow or to a GoodByeWindow
On button 1: Branch to a NameWindow, which will eventually start a new game.
On button 2: Branch to an AboutWindow, which can only return to a MainMenuWindow.
On button 3: If any player has given a name, branch to a GoodByeWindow and pass it the player name. Else, kill app.
"""
def __init__(self, player_name=None):
"""
Layout of MainMenuWindow is as follows:
root
|
+--frame
|
+--Button (Nuevo juego)
+--Button (Acerca de)
+--Button (Salir)
"""
self.name = player_name
self.root = Tk()
self.root.focus_force()
self.root.geometry("+100+100")
Grid.columnconfigure(self.root, 0, weight=1)
Grid.rowconfigure(self.root, 0, weight=1)
frame = Frame(self.root, borderwidth=10)
frame.grid(row=0, column=0, sticky=N + S + E + W)
Button(frame, text="Nuevo juego", command=lambda: self.start_new_game()).grid(row=0, column=0, sticky=E + W)
Button(frame, text="Acerca de...", command=lambda: self.show_about()).grid(row=1, column=0, sticky=E + W)
Button(frame, text="Salir", command=lambda: self.exit()).grid(row=2, column=0, sticky=E + W)
Grid.columnconfigure(frame, 0, weight=1)
for i in range(3):
Grid.rowconfigure(frame, i, weight=1)
def show(self):
self.root.mainloop()
def start_new_game(self):
new_window = NameWindow(self.name)
self.root.destroy()
new_window.show()
def show_about(self):
new_window = AboutWindow(self.name)
self.root.destroy()
new_window.show()
def exit(self):
if self.name is not None:
new_window = GoodByeWindow(self.name)
self.root.destroy()
if self.name is not None:
new_window.show()
class NameWindow:
"""
A class that represents a Window that asks the user for his/her name. Will only branch to a GameWindow.
"""
def __init__(self, last_player_name):
"""
Layout of NameWindow is as follows:
root
|
+--Label
+--TextField
+--Button (OK)
"""
self.root = Tk()
self.root.geometry("+100+100")
Grid.columnconfigure(self.root, 0, weight=1)
Grid.rowconfigure(self.root, 0, weight=1)
Label(self.root, text="Nombre").grid(row=0, column=0, padx=5, pady=5)
self.text_field = Entry(self.root, justify=CENTER)
if last_player_name is not None:
self.text_field.insert(END, last_player_name)
self.text_field.bind("<Return>", self.start_game)
self.text_field.focus_force()
self.text_field.select_range(0, END)
self.text_field.grid(row=1, column=0, sticky=E + W, padx=5, pady=5)
Button(self.root, text="OK", command=lambda: self.start_game(None)).grid(row=2, column=0, padx=5, pady=5)
def show(self):
self.root.mainloop()
def start_game(self, _):
board = Board()
board.random_fill()
analyst = BoardAnalyst(board)
new_window = GameWindow(self.text_field.get() if len(self.text_field.get()) > 0 else "Sin nombre", board,
analyst)
self.root.destroy()
new_window.show()
class AboutWindow:
"""
A class that represent a Window that shows information about the program. Can only branch to a MainMenuWindow
"""
def __init__(self, player_name):
"""
Layout of NameWindow is as follows:
root
|
+--frame
| |
| +--Text
|
+--second_frame
|
+--Button (OK)
"""
self.name = player_name
self.root = Tk()
self.root.geometry("700x300+100+100")
Grid.rowconfigure(self.root, 0, weight=1)
Grid.columnconfigure(self.root, 0, weight=1)
frame = Frame(self.root, borderwidth=10)
text = Text(frame)
text.pack(fill=BOTH, expand=1)
text.insert(END, "Acerca del juego\n\n")
text.insert(END, "El juego consiste en eliminar los cuadros adyacentes del mismo color de un tablero.\n")
text.insert(END, "Los cuadros están colocados de manera aleatoria.\n")
text.insert(END, "Cuando se eliminan cuadros, los demás se desplazan hacia abajo.\n\n")
text.insert(END, "Diseñado para Fundamentos de Programación, ESPOL\n")
text.insert(END, "Anthony Adachi (KimCordero213)\nJosé Reyes (jreyesr, 0xC0FFEE)\n\n")
import datetime
text.insert(END, datetime.date.today().strftime("%A, %d/%m/%Y"))
frame.grid(row=0, column=0, sticky=N + S + E + W)
second_frame = Frame(self.root, borderwidth=10)
second_frame.grid(row=1, column=0)
ok_button = Button(second_frame, text="OK", command=lambda: self.close(None))
ok_button.grid(row=0, column=0)
ok_button.focus_force()
ok_button.bind("<Return>", self.close)
def show(self):
self.root.mainloop()
def close(self, _):
new_window = MainMenuWindow(self.name)
self.root.destroy()
new_window.show()
class GameWindow:
"""
A class that represents a Game Window, where most of the processing happens. Can only branch to a GameOverWindow
"""
def __init__(self, player_name, board, analyst):
"""
Layout of GameWindow is as follows:
root
|
+--upper_frame
| |
| +--Labels (in row (Board.SIZE+1) and column 1), total Board.SIZE*2
| +--Buttons (in rows 1 to Board.SIZE and columns 2 to Board.SIZE+1, total Board.SIZE^2
|
+--lower_frame
|
+--Label (Puntos...)
+--Button (Terminar juego)
"""
self.player_name = player_name
self.score = 0
self.board = board
self.analyst = analyst
self.buttons = [[0 for _ in range(self.board.SIZE + 1)] for _ in range(self.board.SIZE + 1)]
self.root = Tk()
self.root.focus_force()
self.root.geometry("500x500+100+100")
Grid.rowconfigure(self.root, 0, weight=1)
Grid.columnconfigure(self.root, 0, weight=1)
upper_frame = Frame(self.root, borderwidth=10)
upper_frame.grid(row=0, column=0, sticky=N + S + E + W)
for row_index in range(self.board.SIZE):
Grid.rowconfigure(upper_frame, row_index, weight=1)
for col_index in range(self.board.SIZE):
Grid.columnconfigure(upper_frame, col_index + 1, weight=1)
btn = Button(upper_frame, command=lambda x=row_index, y=col_index: self.button_clicked(x, y))
btn.configure(bg=self.get_color(row_index, col_index))
self.buttons[row_index][col_index] = btn
btn.grid(row=row_index, column=col_index + 1, sticky=N + S + E + W, padx=2, pady=2)
# Set labels
for i in range(self.board.SIZE):
Label(upper_frame, text=ascii_uppercase[i]).grid(row=i, column=0, sticky=N + S + E + W)
for j in range(self.board.SIZE):
Label(upper_frame, text=str(j + 1)).grid(row=self.board.SIZE, column=j + 1, sticky=N + S + E + W)
# Set additional info (score, exit button)
lower_frame = Frame(self.root)
lower_frame.grid(row=1, column=0, sticky=N + S + E + W)
Grid.rowconfigure(lower_frame, 0, weight=1)
Grid.columnconfigure(lower_frame, 0, weight=1)
Grid.columnconfigure(lower_frame, 1, weight=1)
# Score label
lbl = Label(lower_frame, text="Puntos: 0")
self._score_label = lbl
lbl.grid(row=0, column=0, sticky=N + S + E + W, padx=5, pady=5)
# Exit game button
Button(lower_frame, text="Terminar juego", command=lambda: self.end_game()).grid(row=0, column=1,
sticky=N + S + E + W, padx=20,
pady=5)
def show(self):
self.root.mainloop()
def end_game(self):
new_window = GameOverWindow(self.player_name, self.score)
self.root.destroy()
new_window.show()
def button_clicked(self, i, j):
"""
To be called when a button on the button grid is clicked. If item in said position in the board is not Blank
and has friends, remove all friends and update score, board and grid accordingly. If there are not any friends
for any button, end game automatically.
"""
if not self.analyst.has_friends(i, j) or self.board.item(i, j) == Color.Blank:
return
to_clear = self.analyst.all_friends(i, j)
self.score += self.analyst.score(to_clear)
self.board.clear_items(to_clear)
self.board.compact_all()
self._score_label.configure(text="Puntos: {}".format(self.score))
if not self.analyst.any_friends():
self.end_game()
self.update_button_colors()
def update_button_colors(self):
"""
Updates the button grid with the new colors. To be called after changing the Board.
"""
for i in range(self.board.SIZE):
for j in range(self.board.SIZE):
try:
self.buttons[i][j].configure(bg=self.get_color(i, j))
except TclError:
pass
def get_color(self, i, j):
"""
Return a string representation for the color in position (i, j) in the Board
:param i: The row of the item
:param j: The column of the item
:return: A string to be used in bg
"""
if self.board.item(i, j) == Color.A:
return 'red'
elif self.board.item(i, j) == Color.B:
return 'green'
elif self.board.item(i, j) == Color.C:
return 'blue'
elif self.board.item(i, j) == Color.D:
return 'yellow'
else:
return 'gray'
class GameOverWindow:
"""
A class representing a 'Game Over' window. Can only branch to a MainMenuWindow.
"""
def __init__(self, player_name, score):
"""
Layout of GameOverWindow is as follows:
root
|
+--frame
|
+--Label (player name)
+--Label (score)
+--Button (OK)
"""
self.player_name = player_name
self.score = score
self.root = Tk()
self.root.geometry("+100+100")
Grid.columnconfigure(self.root, 0, weight=1)
Grid.rowconfigure(self.root, 0, weight=1)
frame = Frame(self.root, borderwidth=10)
frame.grid(row=0, column=0, sticky=N + S + E + W)
Label(frame, text=player_name).grid(row=0, column=0)
Label(frame, text="{} puntos".format(score)).grid(row=1, column=0)
ok_button = Button(frame, text="OK", command=lambda: self.close(None))
ok_button.grid(row=2, column=0)
ok_button.focus_force()
ok_button.bind("<Return>", self.close)
Grid.columnconfigure(frame, 0, weight=1)
Grid.rowconfigure(frame, 0, weight=1)
Grid.rowconfigure(frame, 1, weight=1)
def close(self, _):
new_window = MainMenuWindow(self.player_name if self.player_name != "Sin nombre" else None)
self.root.destroy()
new_window.show()
def show(self):
self.root.mainloop()
class GoodByeWindow:
"""
A class representing a 'Goodbye' window. Will only branch to nothingness...
Is only called when MainMenuWindow has a player name stored
"""
def __init__(self, player_name):
"""
Layout of GoodByeWindow is as follows:
root
|
+--frame
|
+--Label (player name, goodbye message)
+--Button (OK)
"""
self.player_name = player_name
self.root = Tk()
self.root.geometry("+100+100")
Grid.columnconfigure(self.root, 0, weight=1)
Grid.rowconfigure(self.root, 0, weight=1)
frame = Frame(self.root, borderwidth=10)
frame.grid(row=0, column=0, sticky=N + S + E + W)
Label(frame, text="Hasta luego, {}".format(player_name)).grid(row=0, column=0, pady=5)
ok_button = Button(frame, text="OK", command=lambda: self.close(None))
ok_button.grid(row=1, column=0)
ok_button.focus_force()
ok_button.bind("<Return>", self.close)
Grid.columnconfigure(frame, 0, weight=1)
Grid.rowconfigure(frame, 0, weight=1)
Grid.rowconfigure(frame, 1, weight=1)
def close(self, _):
self.root.destroy()
def show(self):
self.root.mainloop()
|
nilq/baby-python
|
python
|
from typing import List
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
carry = (digits[-1] + 1) > 9
digits[-1] = (digits[-1] + 1) % 10
for i in reversed(range(len(digits) - 1)):
temp = carry
carry = (digits[i] + carry > 9)
digits[i] = (digits[i] + temp) % 10
if carry == 1:
return [1, *digits]
return digits
|
nilq/baby-python
|
python
|
# Import libraries
from collections import Counter, OrderedDict
from itertools import chain
from more_itertools import unique_everseen
import numpy as np
import pandas as pd
import random
import tensorflow as tf
from keras import models
import warnings
import functools
import operator
warnings.filterwarnings("ignore")
def get_df():
""" Returns main dataframe used in the project """
# Path to file
hotels_path = "../data/clean_hotels_scraped_v2.csv"
# Dataframe
hotels_df = pd.read_csv(hotels_path, usecols = ["city", "country", "hotel_name", "rating",
"address", "popularity_rating", "locality", "price",
"landmark", "URL"])
return hotels_df
def get_model():
""" Return model architecture and weights """
# Import embeddings model and weights
model = models.load_model("../models/nn_scraped_hotels.h5")
model.load_weights("../models/nn_scraped_hotels_weights.h5")
return model
def get_int_mapping(dataframe, column):
""" Returns index, reverse_index, and list of unique items in a pandas datframe """
# Convert series to list
column_to_list = dataframe[column].tolist()
# Find set of unique items and convert to a list
unique_items_list = list(unique_everseen(column_to_list))
# Create indexes for each item
item_index = {item: idx for idx, item in enumerate(unique_items_list)}
index_item = {idx: item for item, idx in item_index.items()}
return item_index, index_item, unique_items_list
def get_embeddings(layer_name):
""" Given a model and a layer name, this function returns the
normalized embedding [weights] for said layer """
# Get model
model = get_model()
# Get layer
item_layer = model.get_layer(layer_name)
# Get weights
item_weights = item_layer.get_weights()[0]
# Normalize the embeddings so that we can calculate cosine similarity
item_weights = item_weights / np.linalg.norm(item_weights, axis = 1).reshape((-1, 1))
return item_weights
def find_similar(name, weights, index_name = "hotel_name", n = 10, plot = True, filtering = False, filter_name = None):
""" Return most similar items """
index = hotel_index
rindex = index_hotel
# Select index and reverse index
if index_name == "city":
index = city_index
rindex = index_city
if index_name == "country":
index = country_index
rindex = index_country
if index_name == "rating":
index = rating_index
rindex = index_rating
if index_name == "popularity_rating":
index = popularity_index
rindex = index_popularity
if index_name == "locality":
index = locality_index
rindex = index_locality
if index_name == "price":
index = price_index
rindex = index_price
if index_name == "landmark":
index = landmark_index
rindex = index_landmark
# Check name is in index
try:
# Calculate dot product between item/property and all others
distances = np.dot(weights, weights[index[name]])
except KeyError:
print(" {} Not Found.".format(name))
return
# Sort distances from smallest to largest
sorted_distances = np.argsort(distances)
# Find the most similar
closest = sorted_distances[-n:]
# Limit results by filtering
filter_ = None
hotel_name = []
city = []
country = []
url = []
landmark = []
locality = []
rating = []
# Limit results by filtering
filter_ = None
filtered_results = []
if filtering:
for idxs, rows in hotels_df.iterrows():
if hotels_df.at[idxs, index_name] == name:
filter_ = hotels_df.at[idxs, filter_name]
break
match_df = hotels_df[hotels_df[filter_name].str.match(filter_)]
match_df = match_df.reset_index(drop = True)
match_df["distance"] = None
for idxs, rows in match_df.iterrows():
item = match_df.at[idxs, index_name]
distance = np.dot(weights[index[item]], weights[index[name]])
match_df.loc[match_df.index[idxs], "distance"] = distance
match_df = match_df.sort_values(by = ["distance"], axis = 0, ascending = False)
list_of_filtered_items = match_df[index_name].to_list()
list_of_filtered_distances = match_df["distance"].to_list()
list_of_filtered_results = list(zip(list_of_filtered_items, list_of_filtered_distances))
for item in list_of_filtered_results[1:]:
if item not in filtered_results:
filtered_results.append(item)
if plot:
# Find closest and most far away item
closest = filtered_results[:n // 2]
far_away = filtered_results[-n-1: len(filtered_results) - 1]
to_plot = [c[0] for c in closest]
to_plot.extend(c[0] for c in far_away)
# Find distances
dist = [c[1] for c in closest]
dist.extend(c[1] for c in far_away)
# Colors
colors = ["darkturquoise" for _ in range(n)]
colors.extend("hotpink" for _ in range(n // 2))
# Data in DataFrame
data = pd.DataFrame({"distance": dist}, index = to_plot)
# Bar chart
data["distance"].plot.barh(color = colors, figsize = (10, 8), edgecolor = "k", linewidth = 2)
plt.xlabel("Cosine Similarity");
plt.axvline(x = 0, color = "k");
# Title
name_str = "Most and Least Similar to {}".format(name)
plt.title(name_str, x = 0.2, size = 28, y = 1.05)
return None
return None
# Plot results
if plot:
# Find closest and most far away item
far_away = sorted_distances[:n // 2]
closest = sorted_distances[-n-1: len(distances) - 1]
to_plot = [rindex[c] for c in far_away]
to_plot.extend(rindex[c] for c in closest)
# Find distances
dist = [distances[c] for c in far_away]
dist.extend(distances[c] for c in closest)
# Colors
colors = ["hotpink" for _ in range(n // 2)]
colors.extend("darkturquoise" for _ in range(n))
# Data in DataFrame
data = pd.DataFrame({"distance": dist}, index = to_plot)
# Bar chart
data["distance"].plot.barh(color = colors, figsize = (10, 8), edgecolor = "k", linewidth = 2)
plt.xlabel("Cosine Similarity");
plt.axvline(x = 0, color = "k");
# Title
name_str = "Most and Least Similar to {}".format(name)
plt.title(name_str, x = 0.2, size = 28, y = 1.05)
return None
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define linear function approximator.
Dependencies:
- `pyrobolearn.models`
- `pyrobolearn.states`
- `pyrobolearn.actions`
"""
from pyrobolearn.approximators.approximator import Approximator
from pyrobolearn.models.basics.polynomial import Polynomial, PolynomialFunction
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
class PolynomialApproximator(Approximator):
r"""Polynomial Function Approximator
The polynomial function approximator is a discriminative deterministic model expressed mathematically as
:math:`y = f(x) = W \phi(x)`, where :math:`x` is the input vector, :math:`y` is the output vector, :math:`W`
is the weight matrix, and :math:`\phi` is the polynomial function which returns the transformed input vector.
This transformed input vector is often of higher dimension, based on the idea that if it is not linear with
respect to the parameters in the current space, it might be in a higher dimensional space.
"""
def __init__(self, inputs, outputs, degree=1, preprocessors=None, postprocessors=None):
"""
Initialize the polynomial approximator.
Args:
inputs (State, Action, np.array, torch.Tensor): inputs of the inner models (instance of Action/State)
outputs (State, Action, np.array, torch.Tensor): outputs of the inner models (instance of Action/State)
degree (int, list of int, np.array[D]): degree(s) of the polynomial. Setting `degree=3`, will apply
`[1,x,x^2,x^3]` to the inputs, while setting `degree=[1,3]` will apply `[x,x^3]` to the inputs.
preprocessors (None, Processor, list of Processor): the inputs are first given to the preprocessors then
to the model.
postprocessors (None, Processor, list of Processor): the predicted outputs by the model are given to the
processors before being returned.
"""
# create inner model
polynomial_fct = PolynomialFunction(degree=degree)
model = Polynomial(num_inputs=self._size(inputs), num_outputs=self._size(outputs),
polynomial_fct=polynomial_fct)
# call parent class
super(PolynomialApproximator, self).__init__(inputs, outputs, model=model, preprocessors=preprocessors,
postprocessors=postprocessors)
|
nilq/baby-python
|
python
|
import requests
from bs4 import BeautifulSoup
server_address = 'http://127.0.0.1:5000'
def getElementById(html, theId):
soup = BeautifulSoup(html, 'html.parser')
r = soup.find(id=theId)
return r
def register(uname, pword, twofa, session=None):
url = server_address + '/register'
if session is None:
session = requests.session()
session.close()
credentials = {'uname': uname, 'pword': pword, '2fa': twofa}
r = session.post(url, data=credentials)
result = getElementById(r.text, 'success')
if result is None:
print('Unable to find id=result')
return {'result': False, 'session': session}
if 'success' in result.text:
# Server response = successful
return {'result': True, 'session': session}
elif 'failure' in result.text:
# Server response = failed
return {'result': False, 'explicit_failure': True, 'session': session}
else:
# No response from server
return {'result': False, 'explicit_failure': False, 'session': session}
def login(uname, pword, twofa, session=None):
url = server_address + '/login'
if session is None:
session = requests.session()
session.close() # close any previous session if exist
creds = {'uname': uname, 'pword': pword, '2fa': twofa}
r = session.post(url, data=creds)
result = getElementById(r.text, 'result')
if result is None:
print('Cannot find id=result in response')
return {'result': False, 'session': session}
if 'success' in result.text:
return {'result': True, 'session': session}
else:
return {'result': False, 'session': session}
def index_page_exists():
req = requests.get(server_address + '/')
assert req.status_code == 200, "Status code not 200"
def login_page_exists():
req = requests.get(server_address + '/login')
assert req.status_code == 200, "Status code not 200"
def register_page_exists():
req = requests.get(server_address + '/register')
assert req.status_code == 200, "Status code not 200"
def spell_page_exists():
req = requests.get(server_address + '/spell_check')
assert req.status_code == 200, "Status code not 200"
def logout_page_exists():
req = requests.get(server_address + '/logout')
assert req.status_code == 200, "Status code not 200"
|
nilq/baby-python
|
python
|
import random
import gym
import numpy as np
M = 5.0
T = 1.0
GOAL = 0.001
class WeightEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
super(WeightEnv, self).__init__()
self.reward_range = (-float('inf'), 0.0)
self.state = np.array([0, 0, 0]) # position, velocity, acceleration
# action: force[-10, 10]
self.action_space = gym.spaces.Box(low=-10, high=10, shape=(1,), dtype=np.float32)
# observation: position[-10,10], velocity[-10,10], acceleration[-10,10], jerk[-10,10]
self.observation_space = gym.spaces.Box(np.array([-10, -10, -10, -10]), np.array([10, 10, 10, 10], dtype=np.float32))
self.steps = 0
def step(self, action):
prev_position = self.state[0]
prev_velocity = self.state[1]
prev_acceleration = self.state[2]
action_force = min(max(action[0], -10.0), 10.0)
next_acceleration = action_force / M
next_jerk = next_acceleration - prev_acceleration
next_velocity = prev_velocity + next_acceleration * T
next_position = prev_position + next_velocity * T
self.steps += 1
done = ((abs(next_position) < GOAL) and (abs(next_velocity) < GOAL)) or (self.steps > 100)
self.state = np.array([next_position, next_velocity, next_acceleration])
reward = 0.0 - (abs(next_position)**2) - (abs(next_velocity)**2) - (abs(next_acceleration)**2) - (abs(next_jerk)**2)
return np.array([next_position, next_velocity, next_acceleration, next_jerk]), reward, done, {}
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
def reset(self):
self.steps = 0
self.state = np.array([self.np_random.uniform(low=-10.0, high=10.0), 0, 0]) # position, velocity, accel
return np.array([self.state[0], self.state[1], self.state[2], 0])
|
nilq/baby-python
|
python
|
PATTERN = r"(doge|shib)"
TRANSFORMER_MODEL = 'cardiffnlp/twitter-xlm-roberta-base-sentiment'
SENTIMENT_MAPPING = {
'Positive' : 1,
'Neutral' : 0,
'Negative' : -1
}
|
nilq/baby-python
|
python
|
"""Two Number Sum
Write a function that takes in a non-empy array of distinct integers and
an integer representing a target sum. If any two numbers in the input
array sum up to the target sum, the function should return them in an array,
in any order. If no two numbers sum up to the target sum, the function should
return an empty array.
Note that the target sum has to be obtained by summing two different integers
in the array; you can't add a single integer to itself in order to obtain the
target sum.
You can assume that there will be at most one pair of numbers summing up to the
target sum.
Sample Input:
array = [3, 5, -4, 8, 11, 1, -1, 6]
targetSum = 10
Sample Output:
[-1, 11] // the numbers could be in reversed order
"""
def twoNumberSum(array : list, targetSum : int) -> list:
"""Finds the two numbers in the array needed to get targetSum
This solution has O(n) time complexity | O(n) space complexity
Args:
array: A list containing all the candidate numbers
targetSum: The target number we want to get by adding two numbers from the array
Returns:
A list containing the two numbers that added give targetSum as a result
"""
sum = []
diff = []
for e in array:
if e in diff:
sum.append(e)
sum.append(array[diff.index(e)])
break
else:
diff.append(targetSum - e)
return sum
|
nilq/baby-python
|
python
|
from .bmp180 import bmp180
|
nilq/baby-python
|
python
|
"""Pull git repos and update the local schemes and templates files """
import os
import sys
import shutil
import asyncio
from .shared import get_yaml_dict, rel_to_cwd, verb_msg, compat_event_loop
def write_sources_file():
"""Write a sources.yaml file to current working dir."""
file_content = (
"schemes: "
"https://github.com/Base24/base24-schemes-source.git\n"
"templates: "
"https://github.com/Base24/base24-templates-source.git"
)
file_path = rel_to_cwd("sources.yaml")
with open(file_path, "w") as file_:
file_.write(file_content)
async def git_clone(git_url, path, verbose=False):
"""Clone git repository at $git_url to $path. Return True if successful,
otherwise False."""
if verbose:
print("Cloning {}...".format(git_url))
if os.path.exists(os.path.join(path, ".git")):
# get rid of local repo if it already exists
shutil.rmtree(path)
os.makedirs(path, exist_ok=True)
proc_env = os.environ.copy()
proc_env["GIT_TERMINAL_PROMPT"] = "0"
git_proc = await asyncio.create_subprocess_exec(
"git", "clone", git_url, path, stderr=asyncio.subprocess.PIPE, env=proc_env
)
_stdout, stderr = await git_proc.communicate()
if git_proc.returncode != 0:
# remove created directory if it's empty
try:
os.rmdir(path)
except OSError:
pass
verb_msg("{}:\n{}".format(git_url, stderr.decode("utf-8")))
return False
if verbose:
print("Cloned {}".format(git_url))
return True
async def git_clone_scheduler(yaml_file, base_dir, verbose=False):
"""Create task list for clone jobs and run them asynchronously."""
jobs = generate_jobs_from_yaml(yaml_file, base_dir)
task_list = [git_clone(*args_, verbose=verbose) for args_ in jobs]
return await asyncio.gather(*task_list)
def generate_jobs_from_yaml(yaml_file, base_dir):
"""Get a set of jobs from a yaml file """
yaml_dict = get_yaml_dict(yaml_file)
for key, value in yaml_dict.items():
yield (value, rel_to_cwd(base_dir, key))
def update(custom_sources=False, verbose=False):
"""Update function to be called from cli.py"""
if not shutil.which("git"):
print("Git executable not found in $PATH.")
sys.exit(1)
results = []
with compat_event_loop() as event_loop:
if not custom_sources:
print("Creating sources.yaml…")
write_sources_file()
print("Cloning sources…")
r = event_loop.run_until_complete(
git_clone_scheduler(
rel_to_cwd("sources.yaml"),
rel_to_cwd("sources"), verbose=verbose
)
)
results.append(r)
print("Cloning templates…")
r = event_loop.run_until_complete(
git_clone_scheduler(
rel_to_cwd("sources", "templates", "list.yaml"),
rel_to_cwd("templates"),
verbose=verbose,
)
)
results.append(r)
print("Cloning schemes…")
r = event_loop.run_until_complete(
git_clone_scheduler(
rel_to_cwd("sources", "schemes", "list.yaml"),
rel_to_cwd("schemes"),
verbose=verbose,
)
)
results.append(r)
return all(results)
|
nilq/baby-python
|
python
|
# This module is derived (with modifications) from # https://github.com/GoogleCloudPlatform/tensorflow-without-a-phd/blob/master/tensorflow-rl-pong/trainer/task.py
# Special thanks to:
# Yu-Han Liu https://nuget.pkg.github.com/dizcology
# Martin Görner https://github.com/martin-gorner
# Copyright 2019 Leigh Johnson
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Python
import logging
import argparse
import os
from collections import deque
# Lib
import tensorflow as tf
import numpy as np
import gym
from trainer.helpers import discount_rewards, preprocess_frame
from agents.tools.wrappers import AutoReset, FrameHistory
# Legal moves in space invaders are FIRE, RIGHT, LEFT, and DO NOTHING (NOOP or "No operation")
ACTIONS = {
0: "NOOP",
1: "FIRE",
# 2: "UP",
2: "RIGHT",
3: "LEFT",
# 5: "DOWN",
# 6: "UPRIGHT",
# 7: "UPLEFT",
# 8: "DOWNRIGHT",
# 9: "DOWNLEFT",
# 10: "UPFIRE",
# 11: "RIGHTFIRE",
# 12: "LEFTFIRE",
# 13: "DOWNFIRE",
# 14: "UPRIGHTFIRE",
# 15: "UPLEFTFIRE",
# 16: "DOWNRIGHTFIRE",
# 17: "DOWNLEFTFIRE",
}
MAX_MEMORY_LEN = 100000
ROLLOUT_SIZE = 10000
# We'll be pre-processing inputs into a 105 x 80 image diff (downsampled by a factor of 2) of currentframe - previousframe
OBSERVATION_DIM = 105 * 80
# MEMORY stores tuples:
# (observation, label, reward)
MEMORY = deque([], maxlen=MAX_MEMORY_LEN)
def gen():
for m in list(MEMORY):
yield m
def build_graph(observations):
"""Calculates logits from the input observations tensor.
This function will be called twice: rollout and train.
The weights will be shared.
"""
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
hidden = tf.layers.dense(
observations, args.hidden_dim, use_bias=False, activation=tf.nn.relu)
logits = tf.layers.dense(hidden, len(ACTIONS), use_bias=False)
return logits
def main(args):
args_dict = vars(args)
logging.info('args: {}'.format(args_dict))
with tf.Graph().as_default() as g:
# rollout subgraph
with tf.name_scope('rollout'):
observations = tf.placeholder(
shape=(None, OBSERVATION_DIM), dtype=tf.float32)
logits = build_graph(observations)
logits_for_sampling = tf.reshape(
logits, shape=(1, len(ACTIONS)))
# Sample the action to be played during rollout.
sample_action = tf.squeeze(tf.multinomial(
logits=logits_for_sampling, num_samples=1))
optimizer = tf.train.RMSPropOptimizer(
learning_rate=args.learning_rate,
decay=args.rmsprop_decay
)
# dataset subgraph for experience replay
with tf.name_scope('dataset'):
# the dataset reads from MEMORY
ds = tf.data.Dataset.from_generator(
gen, output_types=(tf.float32, tf.int32, tf.float32))
ds = ds.shuffle(MAX_MEMORY_LEN).repeat().batch(args.batch_size)
iterator = ds.make_one_shot_iterator()
# training subgraph
with tf.name_scope('train'):
# the train_op includes getting a batch of data from the dataset, so we do not need to use a feed_dict when running the train_op.
next_batch = iterator.get_next()
train_observations, labels, processed_rewards = next_batch
# This reuses the same weights in the rollout phase.
train_observations.set_shape((args.batch_size, OBSERVATION_DIM))
train_logits = build_graph(train_observations)
cross_entropies = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=train_logits,
labels=labels
)
# Extra loss when the paddle is moved, to encourage more natural moves.
probs = tf.nn.softmax(logits=train_logits)
move_cost = args.move_penalty * \
tf.reduce_sum(probs * [0, 1.0, 1.0, 1.0], axis=1)
loss = tf.reduce_sum(processed_rewards *
cross_entropies + move_cost)
global_step = tf.train.get_or_create_global_step()
train_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=args.max_to_keep)
with tf.name_scope('summaries'):
rollout_reward = tf.placeholder(
shape=(),
dtype=tf.float32
)
# the weights to the hidden layer can be visualized
hidden_weights = tf.trainable_variables()[0]
for h in range(args.hidden_dim):
slice_ = tf.slice(hidden_weights, [0, h], [-1, 1])
image = tf.reshape(slice_, [1, 105, 80, 1])
tf.summary.image('hidden_{:04d}'.format(h), image)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
tf.summary.scalar('{}_max'.format(
var.op.name), tf.reduce_max(var))
tf.summary.scalar('{}_min'.format(
var.op.name), tf.reduce_min(var))
tf.summary.scalar('rollout_reward', rollout_reward)
tf.summary.scalar('loss', loss)
merged = tf.summary.merge_all()
logging.info('Number of trainable variables: {}'.format(
len(tf.trainable_variables())))
inner_env = gym.make('SpaceInvaders-v0')
# tf.agents helper to more easily track consecutive pairs of frames
env = FrameHistory(inner_env, past_indices=[0, 1], flatten=False)
# tf.agents helper to automatically reset the environment
env = AutoReset(env)
with tf.Session(graph=g) as sess:
if args.restore:
restore_path = tf.train.latest_checkpoint(args.output_dir)
logging.info('Restoring from {}'.format(restore_path))
saver.restore(sess, restore_path)
else:
sess.run(init)
summary_path = os.path.join(args.output_dir, 'summary')
summary_writer = tf.summary.FileWriter(summary_path, sess.graph)
# lowest possible score after an episode as the
# starting value of the running reward
_rollout_reward = -21.0
for i in range(args.n_epoch):
logging.info('>>>>>>> epoch {}'.format(i+1))
logging.info('>>> Rollout phase')
epoch_memory = []
episode_memory = []
# The loop for actions/steps
_observation = np.zeros(OBSERVATION_DIM)
while True:
# sample one action with the given probability distribution
_action = sess.run(sample_action, feed_dict={
observations: [_observation]})
_label = ACTIONS[_action]
_pair_state, _reward, _done, _ = env.step(_action)
if args.render:
env.render()
# record experience
episode_memory.append((_observation, _action, _reward))
# Get processed frame delta for the next step
pair_state = _pair_state
current_state, previous_state = pair_state
current_x = preprocess_frame(current_state)
previous_x = preprocess_frame(previous_state)
_observation = current_x - previous_x
if _done:
obs, lbl, rwd = zip(*episode_memory)
# processed rewards
prwd = discount_rewards(rwd, args.reward_decay)
prwd -= np.mean(prwd)
prwd /= np.std(prwd)
# store the processed experience to memory
epoch_memory.extend(zip(obs, lbl, prwd))
# calculate the running rollout reward
_rollout_reward = 0.9 * _rollout_reward + 0.1 * sum(rwd)
episode_memory = []
# if args.render:
# _ = input('episode done, press Enter to replay')
# epoch_memory = []
# continue
if len(epoch_memory) >= ROLLOUT_SIZE:
break
# add to the global memory
MEMORY.extend(epoch_memory)
logging.info('>>> Train phase')
logging.info('rollout reward: {}'.format(_rollout_reward))
# Here we train only once.
_, _global_step = sess.run([train_op, global_step])
if _global_step % args.save_checkpoint_steps == 0:
logging.info('Writing summary')
feed_dict = {rollout_reward: _rollout_reward}
summary = sess.run(merged, feed_dict=feed_dict)
summary_writer.add_summary(summary, _global_step)
save_path = os.path.join(args.output_dir, 'model.ckpt')
save_path = saver.save(
sess, save_path, global_step=_global_step)
logging.info('Model checkpoint saved: {}'.format(save_path))
def parse_args():
parser = argparse.ArgumentParser('')
parser.add_argument(
'--loglevel',
type=str,
default='INFO',
choices=['debug', 'info', 'error', 'warning',
'DEBUG', 'INFO', 'ERROR', 'WARNING']
)
parser.add_argument(
'--n-epoch',
type=int,
default=5000,
help='Number of iterations (training rounds) to run'
)
parser.add_argument(
'--batch-size',
type=int,
default=10000,
help='Number of batches to divide dataset into. Each epoch (training round) consists of dataset_size / batch_size training sets'
)
parser.add_argument(
'--output-dir',
type=str,
default='tmp/training-output',
help='Directory where Tensorflow checkpoints will be written'
)
parser.add_argument(
'--restore',
default=False,
action='store_true',
help='Restore from latest checkpoint in --output-dir'
)
parser.add_argument(
'--video-dir',
default='tmp/training-videos',
type=str,
help='Directory where mp4s of each training epoch will be stored'
)
parser.add_argument(
'--learning-rate',
type=float,
default=0.001,
help='learning_rate used by tf.train.RMSPropOptimizer'
)
parser.add_argument(
'--rmsprop-decay',
type=float,
default=0.99,
help='decay (gamma) used by tf.train.RMSPropOptimizer'
)
parser.add_argument(
'--reward-decay',
type=float,
default=0.99,
help='decay (gamma) used as a reward discount factor'
)
parser.add_argument(
'--move-penalty',
type=float,
default=0.01,
help='additional penalty (loss function multipler) applied when actor is moved, which discourages super-human bursts of movement'
)
parser.add_argument(
'--hidden-dim',
type=int,
default=200
)
parser.add_argument(
'--render',
type=bool,
default=True,
help='Render gameplay visually (and record to --video-dir'
)
parser.add_argument(
'--save-checkpoint-steps',
type=int,
default=1
)
args = parser.parse_args()
# save all checkpoints
args.max_to_keep = args.n_epoch // args.save_checkpoint_steps
return args
if __name__ == '__main__':
args = parse_args()
logging.basicConfig(level=args.loglevel)
main(args)
|
nilq/baby-python
|
python
|
# coding: utf-8
""" Project Euler problem #40. """
def problem():
u""" Solve the problem.
An irrational decimal fraction is created by concatenating the positive
integers:
0.12345678910(1)112131415161718192021...
It can be seen that the 12th digit of the fractional part is 1.
If dn represents the nth digit of the fractional part, find the value of
the following expression.
d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000
Answer: 210
"""
stops = [1, 10, 100, 1000, 10000, 10**5, 10**6]
length, x, prod = 0, 0, 1
while stops:
x += 1
length += len(str(x))
if length >= stops[0]:
prod *= int(str(x)[stops[0] - length - 1])
stops.pop(0)
return prod
if __name__ == '__main__':
print problem()
|
nilq/baby-python
|
python
|
# Author: Mathurin Massias <mathurin.massias@gmail.com>
# License: BSD 3 clause
import os
from pathlib import Path
from bz2 import BZ2Decompressor
import numpy as np
from scipy import sparse
from download import download
from sklearn import preprocessing
from sklearn.datasets import load_svmlight_file
NAMES = {
'aloi': 'multiclass/aloi.bz2',
'bodyfat': 'regression/bodyfat',
'connect-4': 'multiclass/connect-4',
'dna': 'multiclass/dna.scale',
'eunite2001': 'regression/eunite2001',
'finance': 'regression/log1p.E2006.train.bz2',
'glass': 'multiclass/glass.scale',
'housing': 'regression/housing',
'iris': 'multiclass/iris.scale',
'kdda_train': 'binary/kdda.bz2',
'letter': 'multiclass/letter.scale',
'mnist': 'multiclass/mnist.bz2',
'news20': 'binary/news20.binary.bz2',
'news20_multiclass': 'multiclass/news20.bz2',
# 'protein': 'multiclass/protein.bz2',
'rcv1_multiclass': 'multiclass/rcv1_train.multiclass.bz2',
'rcv1_topics_test': 'multilabel/rcv1_topics_test_2.svm.bz2',
'rcv1_train': 'binary/rcv1_train.binary.bz2',
'real-sim': 'binary/real-sim.bz2',
'sector_train': 'multiclass/sector/sector.bz2',
'sector_test': 'multiclass/sector/sector.t.bz2',
'smallNORB': 'multiclass/smallNORB.bz2',
'url': 'binary/url_combined.bz2',
'webspam': 'binary/webspam_wc_normalized_trigram.svm.bz2',
}
N_FEATURES = {
'aloi': 128,
'bodyfat': 14,
'connect-4': 126,
'dna': 180,
'eunite2001': 16,
'finance': 4_272_227,
'glass': 9,
'housing': 13,
'iris': 4,
'kdda_train': 20_216_830,
'letter': 16,
'mnist': 780,
'news20': 1_355_191,
'news20_multiclass': 62_061,
# 'protein': 357,
'rcv1_multiclass': 47_236,
'rcv1_topics_test': 47_236,
'rcv1_train': 47_236,
'real-sim': 20_958,
'sector_train': 55_197,
'sector_test': 55_197,
'smallNORB': 18_432,
'url': 3_231_961,
'webspam': 16_609_143,
}
# DATA_HOME is determined using environment variables.
# The top priority is the environment variable $LIBSVMDATA_HOME which is
# specific to this package.
# Else, it falls back on XDG_DATA_HOME if it is set.
# Finally, it defaults to $HOME/data.
# The data will be put in a subfolder 'libsvm'
def get_data_home():
data_home = os.environ.get(
'LIBSVMDATA_HOME', os.environ.get('XDG_DATA_HOME', None)
)
if data_home is None:
data_home = Path.home() / 'data'
return Path(data_home) / 'libsvm'
DATA_HOME = get_data_home()
def download_libsvm(dataset, destination, replace=False):
"""Download a dataset from LIBSVM website."""
url = ("https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/" +
NAMES[dataset])
path = download(url, destination, replace=replace)
return path
def _get_X_y(dataset, multilabel, replace=False):
"""Load a LIBSVM dataset as sparse X and observation y/Y.
If X and y already exists as npz and npy, they are not redownloaded unless
replace=True."""
# some files are compressed, some are not:
if NAMES[dataset].endswith('.bz2'):
stripped_name = NAMES[dataset][:-4]
else:
stripped_name = NAMES[dataset]
ext = '.npz' if multilabel else '.npy'
y_path = DATA_HOME / f"{stripped_name}_target{ext}"
X_path = DATA_HOME / f"{stripped_name}_data.npz"
if replace or not y_path.exists() or not X_path.exists():
tmp_path = DATA_HOME / stripped_name
# Download the dataset
source_path = DATA_HOME / NAMES[dataset]
if not source_path.parent.exists():
source_path.parent.mkdir(parents=True)
download_libsvm(dataset, source_path, replace=replace)
# decompress file only if it is compressed
if NAMES[dataset].endswith('.bz2'):
decompressor = BZ2Decompressor()
print("Decompressing...")
with open(tmp_path, "wb") as f, open(source_path, "rb") as g:
for data in iter(lambda: g.read(100 * 1024), b''):
f.write(decompressor.decompress(data))
source_path.unlink()
n_features_total = N_FEATURES[dataset]
print("Loading svmlight file...")
with open(tmp_path, 'rb') as f:
X, y = load_svmlight_file(
f, n_features=n_features_total, multilabel=multilabel)
tmp_path.unlink()
X = sparse.csc_matrix(X)
X.sort_indices()
sparse.save_npz(X_path, X)
if multilabel:
indices = np.array([lab for labels in y for lab in labels])
indptr = np.cumsum([0] + [len(labels) for labels in y])
data = np.ones_like(indices)
Y = sparse.csr_matrix((data, indices, indptr))
sparse.save_npz(y_path, Y)
return X, Y
else:
np.save(y_path, y)
else:
X = sparse.load_npz(X_path)
if multilabel:
y = sparse.load_npz(y_path)
else:
y = np.load(y_path)
return X, y
def fetch_libsvm(dataset, replace=False, normalize=False, min_nnz=3):
"""
Download a dataset from LIBSVM website.
Parameters
----------
dataset : string
Dataset name. Must be in .NAMES.keys()
replace : bool, default=False
Whether to force download of dataset if already downloaded.
normalize : bool, default=False
If True, columns of X are set to unit norm. This may make little sense
for a sparse matrix since centering is not performed.
y is centered and set to unit norm if the dataset is a regression one.
min_nnz: int, default=3
Columns of X with strictly less than min_nnz non-zero entries are
discarded.
Returns
-------
X : scipy.sparse.csc_matrix
Design matrix, in column sparse format.
y : 1D or 2D np.array
Design vector or matrix (in multiclass setting)
References
----------
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/
"""
if dataset not in NAMES:
raise ValueError("Unsupported dataset %s" % dataset)
multilabel = NAMES[dataset].split('/')[0] == 'multilabel'
is_regression = NAMES[dataset].split('/')[0] == 'regression'
print("Dataset: %s" % dataset)
X, y = _get_X_y(dataset, multilabel, replace=replace)
# preprocessing
if min_nnz != 0:
X = X[:, np.diff(X.indptr) >= min_nnz]
if normalize:
X = preprocessing.normalize(X, axis=0)
if is_regression:
y -= np.mean(y)
y /= np.std(y)
return X, y
if __name__ == "__main__":
for dataset in NAMES:
if not dataset.startswith("sector") and not dataset == "webspam":
fetch_libsvm(dataset, replace=False)
|
nilq/baby-python
|
python
|
# Create class for weather module
# Imports
import requests
import json
import datetime
import time
import os
import sys
from dotenv import load_dotenv
# Class
class WeatherModule:
"""
Weather module class
"""
# Initialize
def __init__(self, city):
"""
Initialize WeatherModule class
"""
# Create instance of class
self.city = city
# Method
def get_weather(self):
"""
Get weather data
"""
# Set up request
load_dotenv()
url = (
"http://api.openweathermap.org/data/2.5/weather?q="
+ self.city
+ "&units=metric"
+ "&lang=sp"
+ "&APPID="
+ os.getenv("OPENWEATHERMAP_API_KEY")
)
# Get data
data = requests.get(url).json()
# Return data
description = data.get("weather")[0].get("description")
temp = data.get("main").get("temp_max")
return description
def get_temperature(self):
load_dotenv()
url = (
"http://api.openweathermap.org/data/2.5/weather?q="
+ self.city
+ "&units=metric"
+ "&lang=sp"
+ "&APPID="
+ os.getenv("OPENWEATHERMAP_API_KEY")
)
# Get data
data = requests.get(url).json()
# Return data
temp = data.get("main").get("temp_max")
return temp
|
nilq/baby-python
|
python
|
"""
AWS Lambda entrypoint and Intent router
"""
from __future__ import print_function
import json
import logging
import strings
from manage_data import get_player_info
from utility import (
get_household_and_person_ids,
determine_welcome_message
)
from play_new_game import play_new_game
from handle_answer_request import (
handle_answer_request,
next_clue_request,
repeat_clue_request
)
from alexa_responses import play_end_message, speech
from session_attributes import SessionAttributes
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def lambda_handler(event, _context):
""" AWS Lambda entry point """
logger.debug('=====lambda handler started...')
logger.debug(json.dumps(event))
household_id, person_id = get_household_and_person_ids(event)
# If a one-shot was used to start a new game treat it like a LaunchRequest.
if event['session']['new'] and event['request']['type'] == "IntentRequest":
return launch_request(household_id, person_id)
if event['request']['type'] == "LaunchRequest":
return launch_request(household_id, person_id)
if event['request']['type'] == "IntentRequest":
return on_intent(event['request']['intent'], event['session'])
if event['request']['type'] == "SessionEndedRequest":
return play_end_message()
def launch_request(household_id, person_id):
""" Handles LaunchRequests """
player = get_player_info(household_id, person_id)
logger.debug("=====Player Info: %s", player)
tts = determine_welcome_message(household_id, person_id, player)
session_attributes = {
"game_status": "not_yet_started",
"player_info": player
}
return speech(tts=tts,
attributes=session_attributes,
should_end_session=False,
reprompt=strings.WELCOME_REPROMPT)
def on_intent(intent, session):
""" Router for IntentRequest """
intent_name = intent['name']
logger.debug("=====IntentRequest: %s", intent_name)
this_game = SessionAttributes(session['attributes'])
if intent_name == "AnswerIntent":
return answer_intent(intent, this_game)
if intent_name == "NextClueIntent":
return next_clue_intent(this_game)
if intent_name == "NotSureIntent":
return not_sure_intent(intent, this_game)
if intent_name == "RepeatIntent":
return repeat_intent(this_game)
if intent_name == "AMAZON.StartOverIntent":
return start_over_intent(this_game)
if intent_name == "AMAZON.YesIntent":
return yes_intent(intent, this_game)
if intent_name == "AMAZON.NoIntent":
return no_intent(intent, this_game)
if intent_name in ("AMAZON.StopIntent", "AMAZON.CancelIntent"):
return play_end_message()
if intent_name == 'AMAZON.HelpIntent':
return help_intent(this_game)
def answer_intent(intent, this_game):
""" Handles AnswerIntent """
logger.debug("=====answer_intent fired...")
game_status = this_game.game_status
if game_status == "in_progress":
return handle_answer_request(intent, this_game)
# If the game hasn't started yet, the player may have
# interrupted Alexa during the rules being read to them.
if game_status == "not_yet_started":
return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME,
attributes=this_game.attributes,
should_end_session=False,
reprompt=strings.WELCOME_REPROMPT)
# We probably got here because the player said something other than
# yes or no after asking if they wanted to play the game again.
logger.debug("=====No attributes, ending game!")
return play_end_message()
def next_clue_intent(this_game):
""" Handle NextClueIntent """
logger.debug("=====next_clue_intent fired...")
game_status = this_game.game_status
if game_status == "in_progress":
return next_clue_request(this_game)
# If it's not started yet the player might have interrupted
# Alexa during the rules being read so we repeat them.
if game_status == "not_yet_started":
return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME,
attributes=this_game.attributes,
should_end_session=False,
reprompt=strings.WELCOME_REPROMPT)
# Player probably got here because they said something other than
# yes or no after asking if they wanted to play the game again.
logger.debug("=====No attributes ending game...")
return play_end_message()
def not_sure_intent(intent, this_game):
""" Handle NotSureIntent """
logger.debug("=====not_sure_intent fired...")
game_status = this_game.game_status
if game_status == "in_progress":
# If we're on the last clue then count this as an answer.
if this_game.current_clue_index == 4:
return handle_answer_request(intent, this_game)
# Otherwise we go to the next clue.
return next_clue_request(this_game)
# If it's not started yet the player might have interrupted
# Alexa during the rules being read so we repeat them.
if game_status == "not_yet_started":
return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME,
attributes=this_game.attributes,
should_end_session=False,
reprompt=strings.WELCOME_REPROMPT)
# Player probably got here because they said something other than
# yes or no after asking if they wanted to play the game again.
logger.debug("=====No attributes ending game...")
return play_end_message()
def repeat_intent(this_game):
""" Handle RepeatIntent """
logger.debug("=====repeat_intent fired...")
game_status = this_game.game_status
if game_status == "in_progress":
return repeat_clue_request(this_game)
# If it's not started yet the player might have interrupted
# Alexa during the rules being read so we repeat them.
if game_status == "not_yet_started":
return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME,
attributes=this_game.attributes,
should_end_session=False,
reprompt=strings.WELCOME_REPROMPT)
# Player probably got here because they said something other than
# yes or no after asking if they wanted to play the game again.
logger.debug("=====no attributes ending game")
return play_end_message()
def start_over_intent(this_game):
""" Handle StartOverIntent """
logger.debug("=====start_over_intent fired...")
game_status = this_game.game_status
if game_status == "in_progress":
return play_new_game(this_game, replay=True)
# If it's not started yet the player might have interrupted
# Alexa during the rules being read so we repeat them.
if game_status == "not_yet_started":
return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME,
attributes=this_game.attributes,
should_end_session=False,
reprompt=strings.WELCOME_REPROMPT)
# If the game is over start a new one.
if game_status == "ended":
return play_new_game(this_game, replay=True)
def yes_intent(intent, this_game):
""" Handle YesIntent """
logger.debug("=====yes_intent fired...")
game_status = this_game.game_status
# If there is a game in progress we treat this as a wrong answer.
if game_status == "in_progress":
return handle_answer_request(intent, this_game)
# If it's not started yet the player wants to hear the rules.
if game_status == "not_yet_started":
return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME,
attributes=this_game.attributes,
should_end_session=False,
reprompt=strings.WELCOME_REPROMPT)
# Otherwise they're trying to play the game again after finishing a game.
return play_new_game(this_game, replay=True)
def no_intent(intent, this_game):
""" Handle NoIntent """
logger.debug("=====no_intent fired...")
game_status = this_game.game_status
# If there is a game in progress we treat this as a wrong answer.
if game_status == "in_progress":
return handle_answer_request(intent, this_game)
# If it's not started yet the player does not want the rules.
if game_status == "not_yet_started":
return play_new_game(this_game, replay=False)
# Otherwise end the game.
return play_end_message()
def help_intent(this_game):
""" Handle HelpIntent """
logger.debug("=====help_intent fired...")
tts = strings.HELP_MESSAGE_BEFORE_GAME
if this_game.game_status == "in_progress":
tts = strings.HELP_MESSAGE_DURING_GAME + this_game.current_clue
return speech(tts=tts,
attributes=this_game.attributes,
should_end_session=False)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.7 on 2021-03-12 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recruiter', '0023_auto_20210312_2144'),
]
operations = [
migrations.AddField(
model_name='recruiter',
name='overall_rating',
field=models.FloatField(default=0),
),
]
|
nilq/baby-python
|
python
|
"""Search views init."""
from src.views.index import show_index
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Mar 31, 2018
@ Author: Frederich River
'''
import atexit
import os
import signal
import sys
import time
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from env import LOG_FILE, PID_FILE, TASK_FILE, MANUAL
from libmysql8 import mysqlHeader, mysqlBase
from libtask import taskManager
from message import (DM_MSG, DM_START, DM_ALIVE, DM_STOP,
DM_NOT_RUN)
from sqlalchemy.ext.declarative import declarative_base
from threading import Thread
__version__ = '1.4.8'
def neutrino(pid_file, log_file):
# This is a daemon programe, which will start after
# system booted.
#
# It is defined to start by rc.local.
#
# fork a sub process from father
if os.path.exists(pid_file):
raise RuntimeError('Neutrino is already running')
try:
if os.fork() > 0:
raise SystemExit(0)
except OSError:
raise RuntimeError('Fork #1 failed.')
os.chdir('/')
os.umask(0)
os.setsid()
# Second fork
try:
if os.fork() > 0:
raise SystemExit(0)
except OSError:
raise RuntimeError('Fork #2 failed.')
# Flush I/O buffers
sys.stdout.flush()
sys.stderr.flush()
# with open(log_file, 'rb', 0) as read_null:
# os.dup2(read_null.fileno(), sys.stdin.fileno())
with open(log_file, 'a') as write_null:
# Redirect to 1 which means stdout
os.dup2(write_null.fileno(), 1)
with open(log_file, 'a') as error_null:
# Redirect to 2 which means stderr
os.dup2(error_null.fileno(), 2)
if pid_file:
with open(pid_file, 'w+') as f:
f.write(str(os.getpid()))
atexit.register(os.remove, pid_file)
def sigterm_handler(signo, frame):
raise SystemExit(1)
signal.signal(signal.SIGTERM, sigterm_handler)
def _logMonitor(log_file):
# A parallel programe which monitoring the log file.
# If log file is not exists, it will create one and
# relocalize the file.
while True:
if os.path.exists(log_file):
time.sleep(10)
else:
create_file = open(log_file, 'a')
create_file.close()
with open(log_file, 'a') as write_null:
os.dup2(write_null.fileno(), 1)
with open(log_file, 'a') as error_null:
os.dup2(error_null.fileno(), 2)
print(
f"{time.ctime()}: Log file is missing. Recreate it.\n"
f"{time.ctime()}: Neutrino started with pid {os.getpid()}\n")
def main_function(taskfile=None):
# judge whether the task file exists.
print(
f"{time.ctime()}: "
f"Neutrino started with pid {os.getpid()}\n")
Base = declarative_base()
header = mysqlHeader('root', '6414939', 'test')
mysql = mysqlBase(header)
jobstores = {
'default': SQLAlchemyJobStore(
engine=mysql.engine, metadata=Base.metadata)
}
executor = {'default': ThreadPoolExecutor(20)}
Neptune = taskManager(taskfile=taskfile,
jobstores=jobstores,
executors=executor)
Neptune.start()
print(f"{time.ctime()}: Neptune start.\n")
while True:
print(DM_ALIVE.format(time.ctime()))
Neptune.check_task_file()
time.sleep(1800)
return 1
def print_info(info_file):
infotext = ''
with open(info_file) as r:
infotext = r.read()
print(infotext)
if __name__ == '__main__':
# This is main function
# Arguments format is like 'netrino args'
# Neutrino receives args like start stop or other.
if len(sys.argv) != 2:
print(DM_MSG.format(sys.argv[0]))
raise SystemExit(1)
if sys.argv[1] == 'start':
try:
neutrino(PID_FILE, LOG_FILE)
sys.stdout.write(DM_START.format(t=time.ctime(),
pid=os.getpid()))
sys.stdout.flush()
# Here we start a thread which monitoring the log
# file. If log file is missing, it will create one.
lm = Thread(target=_logMonitor,
args=(LOG_FILE,),
name='lm',
daemon=True)
lm.start()
main_function(TASK_FILE)
# ending of working code.
except Exception:
raise SystemExit(1)
elif sys.argv[1] == 'stop':
if os.path.exists(PID_FILE):
sys.stdout.flush()
with open(LOG_FILE, 'a') as write_null:
os.dup2(write_null.fileno(), 1)
write_null.write(DM_STOP.format(time.ctime()))
with open(PID_FILE) as f:
os.kill(int(f.read()), signal.SIGTERM)
else:
print(DM_NOT_RUN)
raise SystemExit(1)
elif sys.argv[1] == 'reboot':
if os.path.exists(PID_FILE):
sys.stdout.flush()
with open(LOG_FILE, 'a') as write_null:
os.dup2(write_null.fileno(), 1)
write_null.write(DM_STOP.format(time.ctime()))
with open(PID_FILE) as f:
os.kill(int(f.read()), signal.SIGTERM)
else:
print(DM_NOT_RUN)
# raise SystemExit(1)
try:
neutrino(PID_FILE, LOG_FILE)
sys.stdout.write(DM_START.format(t=time.ctime(),
pid=os.getpid()))
sys.stdout.flush()
# Here we start a thread which monitoring the log
# file. If log file is missing, it will create one.
lm = Thread(target=_logMonitor,
args=(LOG_FILE,),
name='lm',
daemon=True)
lm.start()
main_function(TASK_FILE)
# ending of working code.
except Exception:
raise SystemExit(1)
elif sys.argv[1] == 'clear':
with open(LOG_FILE, 'w') as f:
pass
elif sys.argv[1] == 'help':
print_info(MANUAL)
elif sys.argv[1] == 'log':
print_info(LOG_FILE)
elif sys.argv[1] == 'version':
print(__version__)
else:
print('Unknown command {!r}'.format(sys.argv[1]))
raise SystemExit(1)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
from argparse import ArgumentParser
import pandas as pd
import pyprojroot
LOSS_FUNC_ML_TASK_MAP = {
'CE-largest': 'single-label, largest',
'CE-random': 'single-label, random',
'BCE': 'multi-label',
}
def main(source_data_root,
rm_corr_csv_path,
test_results_csv_path,
test_acc_v_r_coeff_csv_filename
):
"""generate source data for figure
that plots accuracy on test set v. r coefficent
from repeated measures correlations
Parameters
----------
source_data_root : str, Path
path to root of directory where "source data" csv files
that are generated should be saved
rm_corr_csv_path : str
path to csv with repeated measures correlations results,
output of generate_source_data_acc_vsd_corr.py.
Path should be written relative to source_data_root
test_results_csv_path : str
path to csv with results of measuring accuracy on test set,
output of generate_source_data_test_results.py.
Path should be written relative to source_data_root
test_acc_v_r_coeff_csv_filename : str
filename for .csv that should be saved
with accuracies and r coefficients combined.
This is the actual source data used for plotting.
Saved in source_data_root.
"""
rm_corr_df = pd.read_csv(
source_data_root.joinpath(rm_corr_csv_path)
)
# get just acc/f1 scores on test set for models trained with transfer learning
test_results_df = pd.read_csv(source_data_root.joinpath(test_results_csv_path))
# copy cuz we're going to slice-and-dice
# to get Dataframe we use for 'x-y' plot comparing test accuracy to r coeff size
xy_df = rm_corr_df.copy()
# add colum to rm_corr_df
xy_df['task (M.L.)'] = xy_df['loss_func'].map(LOSS_FUNC_ML_TASK_MAP)
# just keep transfer results, now will be same len as test_results_df
xy_df = xy_df[xy_df.method == 'transfer']
xy_df['DNN architecture'] = xy_df.net_name.str.replace('_', ' ', regex=False)
# keep only the columns we need
COLUMNS_XY = [
'task (M.L.)', 'DNN architecture', 'loss_func', 'r', 'CI95%', 'dof', 'power', 'pval',
]
xy_df = xy_df[COLUMNS_XY]
# use test_result_df as index for xy_df, so we can add columns from test_df
xy_df = xy_df.set_index(['task (M.L.)', 'DNN architecture'])
test_results_df = test_results_df.set_index(['task (M.L.)', 'DNN architecture'])
xy_df = xy_df.reindex(index=test_results_df.index)
for col in ['acc-largest-mean', 'acc-random-mean', 'f1-mean']:
xy_df[col] = test_results_df[col]
# finally reset index so we don't lose columns when we convert xy_df to 'long-form'
xy_df = xy_df.reset_index()
# make 'long form' so we can use seaborn relplot
value_vars = ['acc-largest-mean', 'acc-random-mean', 'f1-mean']
id_vars = [id_var
for id_var in xy_df.columns.tolist()
if id_var not in value_vars]
var_name = 'metric_name'
value_name = 'metric_val'
long_test_results_df = pd.melt(xy_df,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name)
pairs = [
('single-label, largest', 'acc-largest-mean'),
('single-label, random', 'acc-random-mean'),
('multi-label', 'f1-mean'),
]
long_test_results_df = pd.concat(
[long_test_results_df[
(long_test_results_df['task (M.L.)'] == pair[0]) &
(long_test_results_df['metric_name'] == pair[1])
]
for pair in pairs
]
)
long_test_results_df.to_csv(source_data_root.joinpath(test_acc_v_r_coeff_csv_filename))
long_test_results_df.to_excel(source_data_root.joinpath(
test_acc_v_r_coeff_csv_filename.replace('.csv', '.xlsx')
))
SOURCE_DATA_ROOT = pyprojroot.here().joinpath('results/VSD/source_data')
RM_CORR_CSV_PATH = '8-bins-quantile-strategy/rm_corr.csv'
TEST_RESULTS_CSV_PATH = 'test_results_table_transfer.csv'
def get_parser():
parser = ArgumentParser()
parser.add_argument('--source_data_root',
help=('path to root of directory where "source data" csv files '
'that are generated should be saved'),
default=SOURCE_DATA_ROOT)
parser.add_argument('--rm_corr_csv_path',
help=('path to csv with repeated measures correlations results, '
'output of generate_source_data_acc_vsd_corr.py. '
'Path should be written relative to source_data_root'),
default=RM_CORR_CSV_PATH)
parser.add_argument('--test_results_csv_path',
help=('path to csv with results of measuring accuracy on test set, '
'output of generate_source_data_test_results.py. '
'Path should be written relative to source_data_root'),
default=TEST_RESULTS_CSV_PATH)
parser.add_argument('--test_acc_v_r_coeff_csv_filename', default='acc_v_r_coeff.csv',
help=('filename for .csv that should be saved '
'with accuracies and r coefficients combined. '
'This is the actual source data used for plotting. '
'Saved in source_data_root.'))
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(source_data_root=args.source_data_root,
rm_corr_csv_path=args.rm_corr_csv_path,
test_results_csv_path=args.test_results_csv_path,
test_acc_v_r_coeff_csv_filename=args.test_acc_v_r_coeff_csv_filename
)
|
nilq/baby-python
|
python
|
name = 'controllers'
from .constant_controller import ConstantController
from .controller import Controller
from .energy_controller import EnergyController
from .fb_lin_controller import FBLinController
from .linear_controller import LinearController
from .lqr_controller import LQRController
from .pd_controller import PDController
from .qp_controller import QPController
from .mpc_controller import MPCController
from .mpc_controller_dense import MPCControllerDense
from .robust_mpc_controller_dense import RobustMpcDense
from .mpc_controller_lift_fp import MPCControllerFast
from .aggregated_mpc_controller import AggregatedMpcController
from .random_controller import RandomController
from .openloop_controller import OpenLoopController
|
nilq/baby-python
|
python
|
from message_bot.database.engines.base import BaseEngine
from message_bot.database.engines.gsheet import GsheetEngine
from message_bot.database.engines.json import JSONEngine
|
nilq/baby-python
|
python
|
array = input("Enter the string here: ").split()
array.sort(key=len)
print(array)
|
nilq/baby-python
|
python
|
import unittest
from oletools.common.clsid import KNOWN_CLSIDS
class TestCommonClsid(unittest.TestCase):
def test_known_clsids_uppercase(self):
for k, v in KNOWN_CLSIDS.items():
k_upper = k.upper()
self.assertEqual(k, k_upper)
|
nilq/baby-python
|
python
|
import logging
import os
def setup_logger(log_directory='', log_filename="astronomaly.log"):
"""
Ensures the system logger is set up correctly. If a FileHandler logger has
already been attached to the current logger, nothing new is done.
Parameters
----------
log_directory : str, optional
Location of log file, by default ''
log_filename : str, optional
Log file name, by default "astronomaly.log"
Returns
-------
Logger
The Logger object
"""
root_logger = logging.getLogger()
reset = False
if len(root_logger.handlers) != 0:
for h in root_logger.handlers:
try:
flname = h.baseFilename
if flname != os.path.join(log_directory, log_filename):
print('Warning: logger already attached to log file:')
print(flname)
print('Now switching to new log file:')
print(os.path.join(log_directory, log_filename))
reset = True
except AttributeError:
pass
if reset:
root_logger.handlers = []
if len(root_logger.handlers) == 0:
log_formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s")
root_logger.setLevel(logging.INFO)
if not os.path.exists(log_directory):
os.makedirs(log_directory)
file_handler = logging.FileHandler(
os.path.join(log_directory, log_filename))
file_handler.setFormatter(log_formatter)
file_handler.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.WARNING)
root_logger.addHandler(file_handler)
root_logger.addHandler(console_handler)
return root_logger
def format_function_call(func_name, *args, **kwargs):
"""
Formats a function of a PipelineStage or Dataset object to ensure proper
recording of the function and its arguments. args and kwargs should be
exactly those passed to the function.
Parameters
----------
func_name : str
Name of the stage
Returns
-------
str
Formatted function call
"""
out_str = func_name + '('
if len(args) != 0:
for a in args:
out_str += (str)(a) + ', '
if len(kwargs.keys()) != 0:
for k in kwargs.keys():
out_str += ((str)(k) + '=' + (str)(kwargs[k]) + ', ')
if out_str[-2] == ',':
out_str = out_str[:-2]
out_str += ')'
return out_str
def log(msg, level='INFO'):
"""
Actually logs a message. Ensures the logger has been set up first.
Parameters
----------
msg : str
Log message
level : str, optional
DEBUG, INFO, WARNING or ERROR, by default 'INFO'
"""
root_logger = logging.getLogger()
if len(root_logger.handlers) == 0:
setup_logger()
if level == 'ERROR':
root_logger.error(msg)
elif level == 'WARNING':
root_logger.warning(msg)
elif level == 'DEBUG':
root_logger.debug(msg)
else:
root_logger.info(msg)
def check_if_inputs_same(class_name, local_variables):
"""
Reads the log to check if this function has already been called with the
same arguments (this may still result in the function being rerun if the
input data has changed).
Parameters
----------
class_name : str
Name of PipelineStage
local_variables : dict
List of all local variables.
Returns
-------
args_same, bool
True if the function was last called with the same arguments.
checksum, int
Reads the checksum stored in the log file and returns it.
"""
hdlrs = logging.getLogger().handlers
# Try to be somewhat generic allowing for other handlers but this will
# only return the filename of the first FileHandler object it finds.
# This should be ok except for weird logging edge cases.
flname = ''
checksum = 0
for h in hdlrs:
try:
flname = h.baseFilename
break
except AttributeError:
pass
if len(flname) == 0 or not os.path.exists(flname):
# Log file doesn't exist yet
return False
else:
fl = open(flname)
func_args = {}
args_same = False
for ln in fl.readlines()[::-1]:
if class_name + '(' in ln:
# To be completely general, the string manipulation has to
# be a little complicated
stripped_ln = ln.split('-')[-2].split(')')[0].split('(')[-1]
the_list = stripped_ln.split('=')
kwarg_list = []
if len(the_list) > 1:
for l in the_list:
if ',' not in l:
kwarg_list.append(l)
else:
s = l.split(',')
if len(s) > 2:
kwarg_list.append(','.join(s[:-1]))
else:
kwarg_list.append(s[0])
kwarg_list.append(s[-1])
if len(kwarg_list) != 0:
for k in range(0, len(kwarg_list), 2):
try:
key = kwarg_list[k]
value = kwarg_list[k + 1]
func_args[key.strip()] = value.strip()
except ValueError:
# This happens when there are no arguments
pass
checksum_ln = ln.split('checksum:')
if len(checksum_ln) > 1:
checksum = int(checksum_ln[-1])
else:
checksum = 0
args_same = True
for k in func_args.keys():
if k not in local_variables.keys():
args_same = False
break
else:
if k != "force_rerun" and \
func_args[k] != (str)(local_variables[k]):
args_same = False
break
break
return args_same, checksum
|
nilq/baby-python
|
python
|
from unittest.mock import patch
import pytest
from peerscout.utils.bq_data_service import (
load_file_into_bq,
)
import peerscout.utils.bq_data_service \
as bq_data_service_module
@pytest.fixture(name="mock_bigquery")
def _bigquery():
with patch.object(bq_data_service_module, "bigquery") as mock:
yield mock
@pytest.fixture(name="mock_bq_client_class")
def _bq_client():
with patch.object(bq_data_service_module, "Client") as mock:
yield mock
@pytest.fixture(name="mock_load_job_config")
def _load_job_config():
with patch.object(bq_data_service_module, "LoadJobConfig") as mock:
yield mock
@pytest.fixture(name="mock_open", autouse=True)
def _open():
with patch.object(bq_data_service_module, "open") as mock:
yield mock
@pytest.fixture(name="mock_path")
def _getsize():
with patch.object(bq_data_service_module.os, "path") as mock:
mock.getsize.return_value = 1
mock.isfile.return_value = True
yield mock
def test_load_file_into_bq(
mock_load_job_config,
mock_open,
mock_bq_client_class):
file_name = "file_name"
dataset_name = "dataset_name"
table_name = "table_name"
load_file_into_bq(
filename=file_name,
dataset_name=dataset_name,
table_name=table_name)
mock_open.assert_called_with(file_name, "rb")
source_file = mock_open.return_value.__enter__.return_value
mock_bq_client_class.assert_called_once()
mock_bq_client = mock_bq_client_class.return_value
mock_bq_client.dataset.assert_called_with(dataset_name)
mock_bq_client.dataset(
dataset_name).table.assert_called_with(table_name)
table_ref = mock_bq_client.dataset(
dataset_name).table(table_name)
mock_bq_client.load_table_from_file.assert_called_with(
source_file, destination=table_ref,
job_config=mock_load_job_config.return_value)
|
nilq/baby-python
|
python
|
import click
from jinja2 import PackageLoader
from dgen import jinja
env = jinja.create_env(PackageLoader(package_name=__package__))
TEXT_FIELD = """
%s = models.TextField(
verbose_name=_('%s')
)"""
INTEGER_FIELD = """
%s = models.IntegerField(
verbose_name=_('%s')
)"""
BOOLEAN_FIELD = """
%s = models.BooleanField(
default=False,
verbose_name=_('%s')
)"""
DATE_FIELD = """
%s = models.DateField(
verbose_name=_('%s')
)"""
DATETIME_FIELD = """
%s = models.DateTimeField(
verbose_name=_('%s')
)"""
TIME_FIELD = """
%s = models.TimeField(
verbose_name=_('%s')
)"""
EMAIL_FIELD = """
%s = models.EmailField(
verbose_name=_('%s')
)"""
SLUG_FIELD = """
%s = models.SlugField(
allow_unicode=True,
verbose_name=_('%s')
)"""
URL_FIELD = """
%s = models.URLField(
verbose_name=_('%s')
)"""
UUID_FIELD = """
%s = models.UUIDField(
unique=True,
default=uuid.uuid4,
editable=False,
verbose_name=_('%s')
)"""
FIELDS = {
't': TEXT_FIELD,
'i': INTEGER_FIELD,
'b': BOOLEAN_FIELD,
'd': DATE_FIELD,
'dt': DATETIME_FIELD,
'time': TIME_FIELD,
'e': EMAIL_FIELD,
's': SLUG_FIELD,
'url': URL_FIELD,
'uuid': UUID_FIELD,
}
def get_field(ftype, name):
verbose_name = name.replace('_', ' ').capitalize()
return FIELDS[ftype] % (name, verbose_name)
def parse_fields(fields):
parsed_fields = []
for field in fields:
parsed_field = get_field(ftype=field[0], name=field[1])
parsed_fields.append(parsed_field)
return parsed_fields
def model(name, fields):
template = env.get_template('model.py')
fields = parse_fields(fields)
context = {'name': name, 'fields': fields}
click.echo(template.render(context))
|
nilq/baby-python
|
python
|
from cocos.layer import Layer, director
from cocos.menu import Menu, CENTER, ToggleMenuItem, MenuItem
from cocos.scene import Scene
from app import gVariables
import sceneGenerator
class CustomPauseScene(Scene):
def __init__(self, gScene):
super(CustomPauseScene, self).__init__()
#ADD ALL TO MAIN LAYER
self.add(_MenuBackground(gScene))
self.add(_Menu(gScene))
#MENU LAYERS
class _MenuBackground(Layer):
def __init__(self, gScene):
super(_MenuBackground, self).__init__()
self.R = gScene.R
self.menuBackgroundSprite= self.R.BACKGROUND[0]
self.menuBackgroundSprite.position = (director._window_virtual_width/2, director._window_virtual_height/2)
self.add(self.menuBackgroundSprite)
class _Menu(Menu):
def __init__(self, gScene):
super(_Menu, self).__init__()
self.gScene = gScene
self.menu_valign = CENTER
self.menu_halign = CENTER
self.menu_hmargin = 4
self.font_item['color'] = (189,216,178,255)
self.font_item_selected['color'] = (140,161,132,255)
self.create_menu([
ToggleMenuItem("Sound Effect ", self.onToggleFX, gVariables.g_IS_FX),
ToggleMenuItem("Music ", self.onToggleMusic, gVariables.g_IS_BACKMUSIC),
MenuItem("Resume", self.onBack)
])
def onToggleFX(self, value):
gVariables.g_IS_FX = value
def onToggleMusic(self, value):
if value:
sceneGenerator.PLAYMUSIC.Play()
else:
sceneGenerator.PLAYMUSIC.Stop()
gVariables.g_IS_BACKMUSIC = value
def onBack(self):
director.replace(Scene(self.gScene))
self.gScene.PLAYER.is_playing = False
|
nilq/baby-python
|
python
|
#%%
import numpy as np
from scipy import sparse
from scipy.linalg import block_diag
#%%
def sdp_ymat( lines, Ybus ):
nbus = Ybus.shape[0]
nline = len(lines)
# busset = np.arange(0, nbus)
# lineset = np.arange(0, nline)
#%%
def e(k): return np.eye(nbus)[:, k][np.newaxis] # size of e(k): (1, nbus)
def Yk_small(k): return (e(k).T @ e(k)) @ Ybus
def Yk(k): return (1/2) * \
np.block([
[np.real(Yk_small(k) + Yk_small(k).T), np.imag(Yk_small(k).T - Yk_small(k))],
[np.imag(Yk_small(k) - Yk_small(k).T), np.real(Yk_small(k) + Yk_small(k).T)]
])
def Yk_(k): return -(1/2) * \
np.block([
[np.imag(Yk_small(k) + Yk_small(k).T), np.real(Yk_small(k) - Yk_small(k).T)],
[np.real(Yk_small(k).T - Yk_small(k)), np.imag(Yk_small(k) + Yk_small(k).T)]
])
def Mk(k): return block_diag(e(k).T @ e(k), e(k).T @ e(k))
# Real part of line admittance
def gl(l): return np.real(1 / (lines[l].r+1j*lines[l].x))
# Imaginary part of line admittance
def bl(l): return np.imag(1 / (lines[l].r+1j*lines[l].x))
def tau(l): return 1 if lines[l].tap == 0 else lines[l].tap
def theta(l): return lines[l].shft
def gbcosft(l): return gl(l)*np.cos(theta(l)) + bl(l)*np.cos(theta(l)+np.pi/2)
def gbsinft(l): return gl(l)*np.sin(theta(l)) + bl(l)*np.sin(theta(l)+np.pi/2)
def gbcostf(l): return gl(l)*np.cos(-theta(l)) + bl(l)*np.cos(-theta(l)+np.pi/2)
def gbsintf(l): return gl(l)*np.sin(-theta(l)) + bl(l)*np.sin(-theta(l)+np.pi/2)
#%%
def Ylineft(l): return 0.5*(
sparse.coo_matrix((
[gl(l)/(tau(l)**2), -gbcosft(l)/tau(l), gbsinft(l)/tau(l),
gl(l)/(tau(l)**2), -gbsinft(l)/tau(l), -gbcosft(l)/tau(l)],
([lines[l].fbus, lines[l].fbus, lines[l].fbus, lines[l].fbus +
nbus, lines[l].fbus+nbus, lines[l].fbus+nbus],
[lines[l].fbus, lines[l].tbus, lines[l].tbus+nbus,
lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus])
), shape = (2*nbus, 2*nbus))
+
sparse.coo_matrix((
[gl(l)/(tau(l)**2), -gbcosft(l)/tau(l), gbsinft(l)/tau(l),
gl(l)/(tau(l)**2), -gbsinft(l)/tau(l), -gbcosft(l)/tau(l)],
([lines[l].fbus, lines[l].fbus, lines[l].fbus, lines[l].fbus +
nbus, lines[l].fbus+nbus, lines[l].fbus+nbus],
[lines[l].fbus, lines[l].tbus, lines[l].tbus+nbus,
lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus])
), shape=(2*nbus, 2*nbus)).T
)
def Y_lineft(l): return 0.5*(
sparse.coo_matrix((
[-(bl(l)+lines[l].b/2)/(tau(l)**2), gbsinft(l)/tau(l), gbcosft(l)/tau(l), -
(bl(l)+lines[l].b/2)/(tau(l)**2), -gbcosft(l)/tau(l), gbsinft(l)/tau(l)],
([lines[l].fbus, lines[l].fbus, lines[l].fbus, lines[l].fbus +
nbus, lines[l].fbus+nbus, lines[l].fbus+nbus],
[lines[l].fbus, lines[l].tbus, lines[l].tbus+nbus,
lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus])
), shape=(2*nbus, 2*nbus))
+
sparse.coo_matrix((
[-(bl(l)+lines[l].b/2)/(tau(l)**2), gbsinft(l)/tau(l), gbcosft(l)/tau(l), -
(bl(l)+lines[l].b/2)/(tau(l)**2), -gbcosft(l)/tau(l), gbsinft(l)/tau(l)],
([lines[l].fbus, lines[l].fbus, lines[l].fbus, lines[l].fbus +
nbus, lines[l].fbus+nbus, lines[l].fbus+nbus],
[lines[l].fbus, lines[l].tbus, lines[l].tbus+nbus,
lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus])
), shape=(2*nbus, 2*nbus)).T
)
def Ylinetf(l): return 0.5*(
sparse.coo_matrix((
[-gbcostf(l)/tau(l), -gbsintf(l)/tau(l), gbsintf(l) /
tau(l), -gbcostf(l)/tau(l), gl(l), gl(l)],
([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus,
lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus],
[lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus,
lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus])
), shape = (2*nbus, 2*nbus))
+
sparse.coo_matrix((
[-gbcostf(l)/tau(l), -gbsintf(l)/tau(l), gbsintf(l) /
tau(l), -gbcostf(l)/tau(l), gl(l), gl(l)],
([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus,
lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus],
[lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus,
lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus])
), shape = (2*nbus, 2*nbus)).T
)
def Y_linetf(l): return 0.5*(
sparse.coo_matrix((
[gbsintf(l)/tau(l), -gbcostf(l)/tau(l), gbcostf(l)/tau(l),
gbsintf(l)/tau(l), -(bl(l)+lines[l].b/2), -(bl(l)+lines[l].b/2)],
([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus,
lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus],
[lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus,
lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus])
), shape=(2*nbus, 2*nbus))
+
sparse.coo_matrix((
[gbsintf(l)/tau(l), -gbcostf(l)/tau(l), gbcostf(l)/tau(l),
gbsintf(l)/tau(l), -(bl(l)+lines[l].b/2), -(bl(l)+lines[l].b/2)],
([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus,
lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus],
[lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus,
lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus])
), shape=(2*nbus, 2*nbus)).T
)
def YL(l): return sparse.coo_matrix((
[1, -1, 1, -1, -1, 1, -1, 1],
([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus, lines[l].fbus+nbus,
lines[l].tbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus+nbus],
[lines[l].fbus, lines[l].tbus, lines[l].fbus+nbus, lines[l].tbus+nbus,
lines[l].fbus, lines[l].tbus, lines[l].fbus+nbus, lines[l].tbus+nbus])
), shape = (2*nbus, 2*nbus)) * lines[l].r * (gl(l)**2 + bl(l)**2)
def YL_(l): return (sparse.coo_matrix((
[1, -1, 1, -1, -1, 1, -1, 1],
([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus, lines[l].fbus+nbus,
lines[l].tbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus+nbus],
[lines[l].fbus, lines[l].tbus, lines[l].fbus+nbus, lines[l].tbus+nbus,
lines[l].fbus, lines[l].tbus, lines[l].fbus+nbus, lines[l].tbus+nbus])
), shape = (2*nbus, 2*nbus)) * lines[l].x * (gl(l)**2 + bl(l)**2)
-
sparse.coo_matrix((
[1, 1, 1, 1],
([lines[l].fbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus],
[lines[l].fbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus])
), shape = (2*nbus, 2*nbus)) * lines[l].b / 2)
return Yk, Yk_, Mk, Ylineft, Ylinetf, Y_lineft, Y_linetf, YL, YL_
|
nilq/baby-python
|
python
|
while True:
try:
height = input("Height: ")
while 9 > int(height) > 1:
i = 0
while i <= int(height):
a = int(height) - i
print(a * ' ' + "#" * i)
i = i + 1
exit()
except ValueError:
print("invalid number please try again")
except TypeError:
print("Please try a positive number")
|
nilq/baby-python
|
python
|
from typing import Dict, List, Tuple
import pygame
import pygame_gui
from pygame.constants import TEXTINPUT
from pygame.event import EventType
from pygame_gui.core import UIContainer
from pygame_gui.elements import UIButton
from pygame_gui.elements.ui_label import UILabel
from pygame_gui.ui_manager import UIManager
import pysimgame
from pysimgame.utils.abstract_managers import GameComponentManager
class SpeedManager(GameComponentManager):
"""Manager of the model speed."""
speed: float
available_speeds: List[float]
container: UIContainer
play_button: UIButton
faster_button: UIButton
slower_button: UIButton
speed_label: UILabel
settings: Dict
def _resize_ui(self):
"""Recreate the ui to the size"""
x, y = self.GAME_MANAGER.MAIN_DISPLAY.get_size()
rect: pygame.Rect = self.settings["container_rect"]
rect.x = (x - rect.width) / 2
rect.y = y - rect.height
self.speed = 1
self.container = UIContainer(
relative_rect=self.settings["container_rect"],
manager=self.ui_manager,
)
self.play_button = UIButton(
relative_rect=self.settings["play_rect"],
text=">",
manager=self.ui_manager,
container=self.container,
)
self.faster_button = UIButton(
relative_rect=self.settings["faster_rect"],
text="+",
manager=self.ui_manager,
container=self.container,
)
self.slower_button = UIButton(
relative_rect=self.settings["slower_rect"],
text="-",
manager=self.ui_manager,
container=self.container,
)
self.speed_label = UILabel(
relative_rect=self.settings["text_rect"],
text=f"{self.speed} X",
manager=self.ui_manager,
container=self.container,
)
def prepare(self):
self.settings = {
"available_speeds": [1 / 4, 1 / 2, 1, 2, 4, 10],
"container_rect": pygame.Rect(-1, 500, 200, 50),
"play_rect": pygame.Rect(0, 0, 50, 50),
"faster_rect": pygame.Rect(175, 0, 25, 25),
"slower_rect": pygame.Rect(175, 25, 25, 25),
"text_rect": pygame.Rect(50, 0, 125, 50),
}
# Uses the game manager ui
self.ui_manager = self.GAME_MANAGER.UI_MANAGER
self.available_speeds = sorted(self.settings["available_speeds"])
self._resize_ui()
def connect(self):
self.MODEL_MANAGER = self.GAME_MANAGER.MODEL_MANAGER
self._base_fps = self.MODEL_MANAGER.fps
def increase_speed(self):
"""Increase the speed.
1 step in the available speeds.
"""
# Gets the current speed
ind = self.available_speeds.index(self.speed)
if ind < len(self.available_speeds) - 1:
# Calculate the new speed index (assume sorted)
self.speed = self.available_speeds[int(ind + 1)]
self.post_changed_speed()
def decrease_speed(self):
"""Decrease the speed.
1 step in the available speeds.
"""
# Gets the current speed
ind = self.available_speeds.index(self.speed)
if ind > 0:
# Calculate the new speed index (assume sorted)
self.speed = self.available_speeds[int(ind - 1)]
self.post_changed_speed()
def post_changed_speed(self):
# post event
event = pygame.event.Event(
pysimgame.events.SpeedChanged,
{"fps": self._base_fps * self.speed},
)
pygame.event.post(event)
def process_events(self, event: pygame.event.Event) -> bool:
"""Listen the events for this manager."""
match event:
case EventType(
type=pygame_gui.UI_BUTTON_PRESSED,
ui_element=self.faster_button,
) | EventType(type=pygame.TEXTINPUT, text="+"):
self.increase_speed()
case EventType(
type=pygame_gui.UI_BUTTON_PRESSED,
ui_element=self.slower_button,
) | EventType(type=pygame.TEXTINPUT, text="-"):
self.decrease_speed()
case EventType(type=pysimgame.events.SpeedChanged):
self.speed_label.set_text(f"{self.speed} X")
case EventType(
type=pygame_gui.UI_BUTTON_PRESSED,
ui_element=self.play_button,
):
# Change the pause state
event = pygame.event.Event(pysimgame.events.TogglePaused, {})
pygame.event.post(event)
case EventType(type=pysimgame.events.Paused):
self.play_button.set_text("||")
case EventType(type=pysimgame.events.UnPaused):
self.play_button.set_text(">")
|
nilq/baby-python
|
python
|
# coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pydocx.models import XmlModel, XmlCollection
from pydocx.openxml.vml import Shape, Rect
class Picture(XmlModel):
XML_TAG = 'pict'
children = XmlCollection(Shape, Rect)
|
nilq/baby-python
|
python
|
from typing import Union
from pyppeteer.browser import Browser
__all__ = ("BrowserContext",)
class BrowserContext:
def __init__(self) -> None:
self._browser: Union[Browser, None] = None
def set(self, browser: Browser) -> None:
self._browser = browser
def get(self) -> Union[Browser, None]:
return self._browser
def clear(self) -> None:
self._browser = None
def __repr__(self) -> str:
return f"{self.__class__.__name__}<{self._browser!r}>"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
This script controls the head motors
Altered by Johannes Sommerfeldt
"""
import os
import sys
import redis
# ROS 2 Imports
import rclpy
from rclpy.node import Node
from std_msgs.msg import Float32, String, Int16, Bool
from head.msg import MotorPosition
from systemcore.msg import I2Cwrite8, I2Cwrite16, I2CwriteArray
import time
from threading import Timer
import threading
class Commands():
"""
This class contains the I2C Commands for the Arduino Motor Control.
"""
MOTOR_SET_STIFFNESS = 0x10
MOTOR_TURN_SET_ABSOLUTE = 0x11
MOTOR_TURN_SET_RELATIVE = 0x12 # Deprecated
MOTOR_PITCH_SET_ABSOLUTE = 0x13
MOTOR_PITCH_SET_RELATIVE = 0x14 # Deprecated
MOTOR_SET_SPEED = 0x20
class I2cDataConstants():
"""
This class contains constant values that are sent in the data of Arduino commands
"""
MOTOR_CONTROL_SPEED = 0
MOTOR_CONTROL_DURATION = 1
MOTOR_MAX_SPEED = 50 # Motors will move with: (<value> / 10 * msg.speed) pwm per millisecond
class NodeSpinner(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
self.node = node
def run(self):
rclpy.spin(self.node)
###########################################################################################################
## Object representing a single motor #####################################################################
###########################################################################################################
class Motor(): # class Motor(Node):
"""
Object representing a single motor of the robot.
Each new physical Motor should get its own Motor-Object in this script.
"""
def __init__(self, parentNode,
name, redisTopicLastPWM, redisTopicLastAngle,
redisKeyMaxPWM, redisKeyMinPWM, redisKey0PWM, redisKey90PWM,
rosTopicSetPWM, rosTopicSetAngle, rosTopicChangeAngle, rosTopicIsMoving,
cmdSetAbsolute, cmdSetRelative, i2cAddress, i2cArrayPublisher):
"""Constructor for a Motor Object
Args:
parentNode (Node): The ROS2-Node over which subscriptions and publisher are created. Be aware to have only ONE single node instance for every started ROS-node.
name (String): Name of the motor
redisTopicLastPWM (String): Redis Key under which the last set PWM value is stored and published.\n
redisTopicLastAngle (String): Redis Key under which the last set Motor Angle is stored and published.\n
redisKeyMaxPWM (String): Redis Key under which the Max PWM value for this motor is stored.\n
redisKeyMinPWM (String): Redis Key under which the Min PWM value for this motor is stored.\n
redisKey0PWM (String): Redis Key under which the PWM value for 0 degree is stored for this motor.\n
redisKey90PWM (String): Redis Key under which the PWM relative absolute value for changing the motor position by 90 degree is stored.\n
rosTopicSetPWM (String): The subscribed ROS-Topic to set an absolute PWM value for this motor.\n
rosTopicSetAngle (String): The subscribed ROS-Topic to set an absolute angle for this motor.\n
rosTopicChangeAngle (String): The subscribed ROS-Topic to change the current angle of this motor.\n
cmdSetAbsolute (int): The I2C command linked to this motor to set an absolute value.\n
i2cAddress (int): The I2C address of the arduino controlling the motor.\n
i2cArrayPublisher (rclpy.Publisher): The ROS-Publisher object for publishing I2C-Arrays.\n
In order to use the motor correctly, the default angle values (for 0° and the delta for 90°) and
the min and max PWM values must be stored under the given Redis-Keys.
The current default values for the motors can be found under: https://icampusnet.th-wildau.de/gitlab/ros-e/tischroboter-software-sbc/wikis/Redis-Systemwerte
The arduino commands can be found under: https://icampusnet.th-wildau.de/gitlab/ros-e/tischroboter-software-sbc/wikis/Arduino-I2C-Kommandos#ansteuerung-der-motoren
"""
# super().__init__('motor_node_{}'.format(name))
super().__init__()
self.parentNode = parentNode
self.name = name # Just used for debugging
### Create redis objects
self.r = redis.Redis(host="localhost", port=6379, db=0) # Redis object to store and get key-values
self.p = self.r.pubsub(ignore_subscribe_messages=True) # PubSub to publish redis messages
### Redis publish topics for current motor status
self.redisTopicLastPWM = redisTopicLastPWM
self.redisTopicLastAngle = redisTopicLastAngle
### Min and Max pwm values for the motor
self.maxPWM = int(self.r.get(redisKeyMaxPWM))
self.minPWM = int(self.r.get(redisKeyMinPWM))
self.value0PWM = int(self.r.get(redisKey0PWM))
self.value90PWM = int(self.r.get(redisKey90PWM))
### I2C Command and Address
self.cmdSetAbsolute = cmdSetAbsolute
#self.cmdSetRelative = cmdSetRelative # unused
self.i2cAddress = i2cAddress
self.i2cArrayPublisher = i2cArrayPublisher
### Ros subscriber topics for input commands and publisher for status info
self.rosTopicSetPWM = rosTopicSetPWM
self.rosTopicSetAngle = rosTopicSetAngle
self.rosTopicChangeAngle = rosTopicChangeAngle
self.rosTopicIsMoving = rosTopicIsMoving
### Motor specific topics subscriber and publisher
if (self.rosTopicSetPWM is not None): self.parentNode.create_subscription(Int16, self.rosTopicSetPWM, self.onSetPWM, 10)
if (self.rosTopicSetAngle is not None): self.parentNode.create_subscription(MotorPosition, self.rosTopicSetAngle, self.onSetAngle, 10)
if (self.rosTopicChangeAngle is not None): self.parentNode.create_subscription(MotorPosition, self.rosTopicChangeAngle, self.onChangeAngle, 10)
self.isMovingPublisher = self.parentNode.create_publisher(Bool, self.rosTopicIsMoving, 10)
self.logger = self.parentNode.get_logger()
# variables for the publisher
self.isMoving = False
self.isMovingTimer = None
self.logger.info("Subscribed: {:20} | Msg: head/MotorPosition".format(self.rosTopicSetAngle))
self.logger.info("Subscribed: {:20} | Msg: head/MotorPosition".format(self.rosTopicChangeAngle))
self.logger.info("Subscribed: {:20} | Msg: Int16".format(self.rosTopicSetPWM))
self.logger.info("Publishes: {:20} | Msg: Bool".format(self.rosTopicIsMoving))
### Init head position
# Assume the pwm value the motor had at the end of the last session as the current value.
self.currentPWM = int(self.r.get(redisTopicLastPWM))
# If no value was found in redis, reset to looking straight
if self.currentPWM == None:
self.currentPWM = self.value0PWM
self.logger.info(name + " found no redis entry for the last pwm value. Reset to: " + str(self.currentPWM))
# waiting seems to be necessary in order for the published motor movement to be functioning
time.sleep(1)
# move to the expected pwm value to avoid inconsistencies in case the arduino moved the motors without this node and without updating redis
self.onSetPWM(Int16(data=self.currentPWM))
################################
### LOG INFO ###################
self.logger.info("Publish on: system/i2c/write8 | Msg: system/I2Cwrite8")
self.logger.info("Publish on: system/i2c/writeArray | Msg: system/I2CwriteArray")
self.logger.info("Started Motor Control Node {}".format(name))
################################
def onShutdown(self):
pass
def onSetPWM(self, msg):
"""Method to set an absolute PWM value for this motor. This method will take care about the maximum pwm values.
Args:
msg (std_msgs.msg.Int16): ROS Int16 message object.
"""
self.logger.info("Got message on set {} pwm: \n{}".format(self.name, msg))
pwm = int(msg.data)
# Create a MotorPosition object so the setPWM command can be handled like the set/change angle methods
motorPosition = MotorPosition()
motorPosition.duration = 0
motorPosition.speed = 10.0
self.commitMovement(pwm, motorPosition)
def onSetAngle(self, msg):
"""Method to set an absolute motor angle. This method will take care about the maximum motor angles.
Args:
msg (head.msg.MotorPosition): ROS MotorPosition message object.
"""
self.logger.info("Got message on set {} angle:\n{}".format(self.name, msg))
angle = int(msg.angle)
# use the PWM value for 0 degrees (straight view angle) as reference to turn to an absolute angle
pwm = self.value0PWM + self.getPwmDeltaFromAngle(angle)
self.commitMovement(pwm, msg)
def onChangeAngle(self, msg):
"""Method to change the current motor angle relatively. This method will take care about the maximum motor angles.
Args:
msg (head.msg.MotorPosition): ROS MotorPosition message object.
"""
self.logger.info("Got message on change {} angle:\n{}".format(self.name, msg))
angle = int(msg.angle)
# use the current PWM value as reference to turn to an angle relative to the previous position
pwm = self.currentPWM + self.getPwmDeltaFromAngle(angle)
self.commitMovement(pwm, msg)
def commitMovement(self, pwm, msg):
""" The code all turn commands have in common. Handles everything about the turn. """
# Make sure the PWM is in a range the motor can actually turn to
pwm = self.limitPWM(pwm)
angle = round(self.getAngleFromPWM(pwm))
self.logger.info("Moving with PWM = {} (Calced angle: {})".format(pwm, angle))
# Store the last values in Redis
if (self.redisTopicLastPWM is not None):
self.r.set(self.redisTopicLastPWM, pwm)
self.r.publish(self.redisTopicLastPWM, pwm)
if (self.redisTopicLastAngle is not None):
self.r.set(self.redisTopicLastAngle, angle)
self.r.publish(self.redisTopicLastAngle, angle)
# Update the motor position with the calculated PWM value
self.updateMotorPosition(self.cmdSetAbsolute, pwm, int(round(msg.speed)), int(msg.duration))
# Publish info that the motor is moving
deltaPwm = pwm - self.currentPWM
self.pubMotorActivity(deltaPwm, msg.speed, msg.duration)
self.currentPWM = pwm
def getPwmDeltaFromAngle(self, angle):
""" Returns the PWM delta that matches the specified angle delta. """
return int(float(angle) / 90.0 * self.value90PWM)
def getAngleFromPWM(self, pwm):
""" Returns the absolute angle that matches the specified pwm value. """
return float(pwm - self.value0PWM) * 90.0 / float(self.value90PWM)
def limitPWM(self, pwm):
""" Returns the maximum or minimum pwm value that can be turned to, if the specified pwm value is too great/small.
If the pwm value is already within the legal range, it is returned unchanged. """
return min(max(pwm, self.minPWM), self.maxPWM)
### Generic method to take care of the I2C publishing of new motor positions ###
def updateMotorPosition(self, cmd, pwm, speed, duration):
"""Generic method to update the motor position to a new PWM value. This method is handling the necessary I2C publishing.
Args:
cmd (int): Arduino Command for the motor position update.
pwm (int): New PWM value for the motor.
speed (int): Speed value between 1 and 100 to reach the new position. The speed argument is only considered if the duration argument == 0.
duration (int): Duration in ms to reach the new motor position. If duration != 0, the speed argument is not considered.
"""
self.logger.info("{} --> pwm: {} | speed: {} | duration: {}".format(cmd, pwm, speed, duration))
# Creating the I2C Array Object for publishing to the I2C Bridge node.
o = I2CwriteArray()
o.address = self.i2cAddress
o.command = cmd
# differ between speed or duration value
if duration is not None and duration > 0:
o.data = [int(pwm >> 8), int(pwm & 0x00FF), int(I2cDataConstants.MOTOR_CONTROL_DURATION), int(duration >> 8), int(duration & 0x00FF)]
else:
o.data = [int(pwm >> 8), int(pwm & 0x00FF), int(I2cDataConstants.MOTOR_CONTROL_SPEED), int(speed & 0x00FF), int(0)]
self.i2cArrayPublisher.publish(o)
def pubMotorActivity(self, deltaPwm, speed, duration):
""" Handles publishing of the motor's activity flag. """
# find out how long the movement will take, so a timer can handle resetting the isMoving flag
# "speed factor" is the pwm per milliscond to move the motor and "speed" is the percentage of that factor to use
timeActiveMillis = duration if duration is not None and duration > 0 else abs(deltaPwm) / (float(speed) / 100 * I2cDataConstants.MOTOR_MAX_SPEED)
timeActiveSeconds = float(timeActiveMillis) / 1000
# add a small constant time to make sure the movement-stopped-info is sent after the hardware actually stopped even if there is a tiny hardware delay
timeActiveSeconds += 0.1
# If the motor was not moving before, publish that it started moving
if self.isMoving == False:
self.isMovingPublisher.publish(Bool(data = True))
self.isMoving = True
self.logger.info("Published info that '" + str(self.name) + "' started moving.")
else:
# If another command is received while the motor is still moving, the first movement will be overwritten and the new movement will begin immediately.
# In that case, the timer is now outdated and has to be shut down and started again with the new duration
# to make sure it only publishes when the new movement will finish rather than when the cancelled movement would have finished.
#self.isMovingTimer.shutdown()
self.isMovingTimer.cancel()
# The timer. Sets isMoving to False again after the calculated time for the motor movement has passed
self.isMovingTimer = Timer(interval=timeActiveSeconds, function=self.pubInactive)
self.isMovingTimer.start()
# self.isMovingTimer = self.create_timer(period=timeActiveSeconds, callback=self.pubInactive, oneshot=True)
def pubInactive(self):
""" Handles setting the motor's activity flag to inactive. """
self.isMovingPublisher.publish(Bool(data = False))
self.isMovingTimer.cancel()
self.isMoving = False
self.logger.info("Published info that '" + str(self.name) + "' stopped moving.")
###########################################################################################################
## ROS Node, contains all motor objects ###################################################################
###########################################################################################################
class MotorControl(Node):
""" ROS Node containing all motor objects """
def __init__(self):
super().__init__('motorControl_node')
# Arduino address of the Arduino controlling the head motors
self.arduinoI2C = 0x08
### Publisher for I2C Connection
self.pubI2Cwrite8 = self.create_publisher(I2Cwrite8, "system/i2c/write8", 10)
self.pubI2CwriteArray = self.create_publisher(I2CwriteArray, "system/i2c/writeArray", 10)
### Publish the maximum speed constant to the arduino so this node and hardware have the same value
o = I2Cwrite8()
o.address = self.arduinoI2C
o.command = Commands.MOTOR_SET_SPEED
o.data = int(I2cDataConstants.MOTOR_MAX_SPEED & 0x00FF)
self.pubI2Cwrite8.publish(o)
# time.sleep(1)
# Creating the object for the head turn motor
self.motorTurn = Motor(parentNode = self,
name="turn", redisTopicLastPWM="head/motorturn/lastPWM", redisTopicLastAngle="head/turn/lastAngle",
redisKeyMaxPWM="head/motorturn/maxPWM", redisKeyMinPWM="head/motorturn/minPWM",
redisKey0PWM="head/motorturn/pwm0degree", redisKey90PWM="head/motorturn/pwm90degree",
rosTopicSetPWM="head/motorturn/setPWM", rosTopicSetAngle="head/turn/setAngle",
rosTopicChangeAngle="head/turn/changeAngle", rosTopicIsMoving="head/turn/isMoving",
cmdSetAbsolute=Commands.MOTOR_TURN_SET_ABSOLUTE, cmdSetRelative=Commands.MOTOR_TURN_SET_RELATIVE,
i2cAddress=self.arduinoI2C, i2cArrayPublisher=self.pubI2CwriteArray
)
# time.sleep(1)
# Creating the object for the head pitch motor
self.motorPitch = Motor(parentNode = self,
name="pitch", redisTopicLastPWM="head/motorpitch/lastPWM", redisTopicLastAngle="head/pitch/lastAngle",
redisKeyMaxPWM="head/motorpitch/maxPWM", redisKeyMinPWM="head/motorpitch/minPWM",
redisKey0PWM="head/motorpitch/pwm0degree", redisKey90PWM="head/motorpitch/pwm90degree",
rosTopicSetPWM="head/motorpitch/setPWM", rosTopicSetAngle="head/pitch/setAngle",
rosTopicChangeAngle="head/pitch/changeAngle", rosTopicIsMoving="head/pitch/isMoving",
cmdSetAbsolute=Commands.MOTOR_PITCH_SET_ABSOLUTE, cmdSetRelative=Commands.MOTOR_PITCH_SET_RELATIVE,
i2cAddress=self.arduinoI2C, i2cArrayPublisher=self.pubI2CwriteArray
)
# rclpy.spin(self.motorPitch)
# rclpy.spin(self.motorTurn)
# self.spinNode(self.motorTurn)
# self.spinNode(self.motorPitch)
def onShutdown(self):
self.motorTurn.onShutdown()
self.motorPitch.onShutdown()
# def spinNode(self, node):
# thread = NodeSpinner(node)
# thread.start()
def main(args=None):
rclpy.init(args=args) # 'motorControl_node'
# Init all motors
node = MotorControl()
# Spin forever
rclpy.spin(node)
node.onShutdown()
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from hapServer import *
import hapBack as hb
import time
import sys
hs = hapserver()
if sys.argv[1] == "1":
hb.a1(int(sys.argv[2]),hs)
time.sleep(1)
if sys.argv[1] == "2":
hb.a2(int(sys.argv[2]),hs)
time.sleep(1)
if sys.argv[1] == "3":
hb.r1(int(sys.argv[2]),hs)
time.sleep(1)
if sys.argv[1] == "4":
hb.r2(int(sys.argv[2]),hs)
time.sleep(1)
|
nilq/baby-python
|
python
|
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import json
import logging
import os
import sys
import types
import jwt
from azure.common.credentials import (BasicTokenAuthentication,
ServicePrincipalCredentials)
from azure.keyvault import KeyVaultAuthentication, AccessToken
from c7n_azure import constants
from c7n_azure.utils import (ResourceIdParser, StringUtils, custodian_azure_send_override,
ManagedGroupHelper)
from c7n_azure.utils import get_keyvault_secret
from msrestazure.azure_active_directory import MSIAuthentication
try:
from azure.cli.core._profile import Profile
except Exception:
Profile = None
class Session(object):
def __init__(self, subscription_id=None, authorization_file=None,
resource=constants.RESOURCE_ACTIVE_DIRECTORY):
"""
:param subscription_id: If provided overrides environment variables.
:param authorization_file: Path to file populated from 'get_functions_auth_string'
:param resource: Resource endpoint for OAuth token.
"""
self.log = logging.getLogger('custodian.azure.session')
self._provider_cache = {}
self.subscription_id_override = subscription_id
self.credentials = None
self.subscription_id = None
self.tenant_id = None
self.resource_namespace = resource
self._is_token_auth = False
self._is_cli_auth = False
self.authorization_file = authorization_file
self._auth_params = {}
@property
def auth_params(self):
self._initialize_session()
return self._auth_params
def _authenticate(self):
keyvault_client_id = self._auth_params.get('keyvault_client_id')
keyvault_secret_id = self._auth_params.get('keyvault_secret_id')
# If user provided KeyVault secret, we will pull auth params information from it
if keyvault_secret_id:
self._auth_params.update(
json.loads(
get_keyvault_secret(keyvault_client_id, keyvault_secret_id)))
client_id = self._auth_params.get('client_id')
client_secret = self._auth_params.get('client_secret')
access_token = self._auth_params.get('access_token')
tenant_id = self._auth_params.get('tenant_id')
use_msi = self._auth_params.get('use_msi')
subscription_id = self._auth_params.get('subscription_id')
if access_token and subscription_id:
self.log.info("Creating session with Token Authentication")
self.subscription_id = subscription_id
self.credentials = BasicTokenAuthentication(
token={
'access_token': access_token
})
self._is_token_auth = True
elif client_id and client_secret and tenant_id and subscription_id:
self.log.info("Creating session with Service Principal Authentication")
self.subscription_id = subscription_id
self.credentials = ServicePrincipalCredentials(
client_id=client_id,
secret=client_secret,
tenant=tenant_id,
resource=self.resource_namespace)
self.tenant_id = tenant_id
elif use_msi and subscription_id:
self.log.info("Creating session with MSI Authentication")
self.subscription_id = subscription_id
if client_id:
self.credentials = MSIAuthentication(
client_id=client_id,
resource=self.resource_namespace)
else:
self.credentials = MSIAuthentication(
resource=self.resource_namespace)
elif self._auth_params.get('enable_cli_auth'):
self.log.info("Creating session with Azure CLI Authentication")
self._is_cli_auth = True
try:
(self.credentials,
self.subscription_id,
self.tenant_id) = Profile().get_login_credentials(
resource=self.resource_namespace)
except Exception:
self.log.error('Unable to authenticate with Azure')
self.log.info("Session using Subscription ID: %s" % self.subscription_id)
def _initialize_session(self):
"""
Creates a session using available authentication type.
Auth priority:
1. Token Auth
2. Tenant Auth
3. Azure CLI Auth
"""
# Only run once
if self.credentials is not None:
return
if self.authorization_file:
self.log.info("Using file for authentication parameters")
with open(self.authorization_file) as json_file:
self._auth_params = json.load(json_file)
else:
self.log.info("Using environment variables for authentication parameters")
self._auth_params = {
'client_id': os.environ.get(constants.ENV_CLIENT_ID),
'client_secret': os.environ.get(constants.ENV_CLIENT_SECRET),
'access_token': os.environ.get(constants.ENV_ACCESS_TOKEN),
'tenant_id': os.environ.get(constants.ENV_TENANT_ID),
'use_msi': bool(os.environ.get(constants.ENV_USE_MSI)),
'subscription_id': os.environ.get(constants.ENV_SUB_ID),
'keyvault_client_id': os.environ.get(constants.ENV_KEYVAULT_CLIENT_ID),
'keyvault_secret_id': os.environ.get(constants.ENV_KEYVAULT_SECRET_ID),
'enable_cli_auth': True
}
# Let provided id parameter override everything else
if self.subscription_id_override is not None:
self._auth_params['subscription_id'] = self.subscription_id_override
self._authenticate()
if self.credentials is None:
self.log.error('Unable to authenticate with Azure.')
sys.exit(1)
# TODO: cleanup this workaround when issue resolved.
# https://github.com/Azure/azure-sdk-for-python/issues/5096
if self.resource_namespace == constants.RESOURCE_VAULT:
access_token = AccessToken(token=self.get_bearer_token())
self.credentials = KeyVaultAuthentication(lambda _1, _2, _3: access_token)
def get_session_for_resource(self, resource):
return Session(
subscription_id=self.subscription_id_override,
authorization_file=self.authorization_file,
resource=resource)
def client(self, client):
self._initialize_session()
service_name, client_name = client.rsplit('.', 1)
svc_module = importlib.import_module(service_name)
klass = getattr(svc_module, client_name)
klass_parameters = None
if sys.version_info[0] < 3:
import funcsigs
klass_parameters = funcsigs.signature(klass).parameters
else:
klass_parameters = inspect.signature(klass).parameters
client = None
if 'subscription_id' in klass_parameters:
client = klass(credentials=self.credentials, subscription_id=self.subscription_id)
else:
client = klass(credentials=self.credentials)
# Override send() method to log request limits & custom retries
service_client = client._client
service_client.orig_send = service_client.send
service_client.send = types.MethodType(custodian_azure_send_override, service_client)
# Don't respect retry_after_header to implement custom retries
service_client.config.retry_policy.policy.respect_retry_after_header = False
return client
def get_credentials(self):
self._initialize_session()
return self.credentials
def get_subscription_id(self):
self._initialize_session()
return self.subscription_id
def get_function_target_subscription_name(self):
self._initialize_session()
if constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME in os.environ:
return os.environ[constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME]
return os.environ.get(constants.ENV_FUNCTION_SUB_ID, self.subscription_id)
def get_function_target_subscription_ids(self):
self._initialize_session()
if constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME in os.environ:
return ManagedGroupHelper.get_subscriptions_list(
os.environ[constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME], self.get_credentials())
return [os.environ.get(constants.ENV_FUNCTION_SUB_ID, self.subscription_id)]
def resource_api_version(self, resource_id):
""" latest non-preview api version for resource """
namespace = ResourceIdParser.get_namespace(resource_id)
resource_type = ResourceIdParser.get_resource_type(resource_id)
cache_id = namespace + resource_type
if cache_id in self._provider_cache:
return self._provider_cache[cache_id]
resource_client = self.client('azure.mgmt.resource.ResourceManagementClient')
provider = resource_client.providers.get(namespace)
# The api version may be directly provided
if not provider.resource_types and resource_client.providers.api_version:
return resource_client.providers.api_version
rt = next((t for t in provider.resource_types
if StringUtils.equal(t.resource_type, resource_type)), None)
if rt and rt.api_versions:
versions = [v for v in rt.api_versions if 'preview' not in v.lower()]
api_version = versions[0] if versions else rt.api_versions[0]
self._provider_cache[cache_id] = api_version
return api_version
def get_tenant_id(self):
self._initialize_session()
if self._is_token_auth:
decoded = jwt.decode(self.credentials.token['access_token'], verify=False)
return decoded['tid']
return self.tenant_id
def get_bearer_token(self):
self._initialize_session()
if self._is_cli_auth:
return self.credentials._token_retriever()[1]
return self.credentials.token['access_token']
def load_auth_file(self, path):
with open(path) as json_file:
data = json.load(json_file)
self.tenant_id = data['credentials']['tenant']
return (ServicePrincipalCredentials(
client_id=data['credentials']['client_id'],
secret=data['credentials']['secret'],
tenant=self.tenant_id,
resource=self.resource_namespace
), data.get('subscription', None))
def get_functions_auth_string(self, target_subscription_id):
"""
Build auth json string for deploying
Azure Functions. Look for dedicated
Functions environment variables or
fall back to normal Service Principal
variables.
"""
self._initialize_session()
function_auth_variables = [
constants.ENV_FUNCTION_TENANT_ID,
constants.ENV_FUNCTION_CLIENT_ID,
constants.ENV_FUNCTION_CLIENT_SECRET
]
required_params = ['client_id', 'client_secret', 'tenant_id']
function_auth_params = {k: v for k, v in self._auth_params.items()
if k in required_params}
function_auth_params['subscription_id'] = target_subscription_id
# Use dedicated function env vars if available
if all(k in os.environ for k in function_auth_variables):
function_auth_params['client_id'] = os.environ[constants.ENV_FUNCTION_CLIENT_ID]
function_auth_params['client_secret'] = os.environ[constants.ENV_FUNCTION_CLIENT_SECRET]
function_auth_params['tenant_id'] = os.environ[constants.ENV_FUNCTION_TENANT_ID]
# Verify SP authentication parameters
if any(k not in function_auth_params.keys() for k in required_params):
raise NotImplementedError(
"Service Principal credentials are the only "
"supported auth mechanism for deploying functions.")
return json.dumps(function_auth_params, indent=2)
|
nilq/baby-python
|
python
|
# This sample is used in conjunction with protocolModule4.py.
from typing import Protocol, TypeVar
Y = TypeVar("Y", contravariant=True)
class Fn(Protocol[Y]):
def __call__(self, y: Y) -> None:
...
def x(x: Fn[int]) -> None:
print(x)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Create C/C++ code for two lookup tables.
import math
# Size of static tables.
kTableSize = 4096
# Scale factor for float arg to int index.
kScaleFactor = 256.0
print("// Generated code with lookup tables")
print('#include "functions.h"')
print("namespace tesseract {")
print("const double TanhTable[] = {")
for i in range(kTableSize):
print(" %a," % math.tanh(i / kScaleFactor))
print("};")
print("const double LogisticTable[] = {")
for i in range(kTableSize):
print(" %a," % (1 / (1 + math.exp(-i / kScaleFactor))))
print("};")
print("} // namespace tesseract.")
|
nilq/baby-python
|
python
|
class FactorProfile:
types = {
'question': str,
'questionText': str,
'answer': str,
'phoneNumber': str,
'credentialId': str
}
def __init__(self):
# unique key for question
self.question = None # str
# display text for question
self.questionText = None # str
# answer to question
self.answer = None # str
# phone number of mobile device
self.phoneNumber = None # str
# unique id for instance
self.credentialId = None # str
|
nilq/baby-python
|
python
|
"""
Scheduler Service for starting flow
:license: MIT
"""
import calendar
import datetime
import json
import os
from src.dependencies.dependency_typing import (PynamoDBCheckIn,
PynamoDBConsultant,
PynamoDBCustomers, Requests)
from src.dependencies.pynamodb_checkin_provider import get_checkin_provider
from src.dependencies.pynamodb_consultant_provider import \
get_consultants_provider
from src.dependencies.pynamodb_customers_provider import get_customers_provider
from src.dependencies.requests_provider import get_requests_provider
def pub(event, context):
'''
AWS Serverless Handler
-
:param event: AWS event
:param context: AWS Lambda context
'''
print("context:", context)
print("event", event)
checkin_model = get_checkin_provider()
consultants_model = get_consultants_provider()
customers_model = get_customers_provider()
requests_client = get_requests_provider()
run_scheduler(checkin_model, consultants_model, customers_model, requests_client)
def run_scheduler(checkin_model: PynamoDBCheckIn, consultants_model: PynamoDBConsultant,
customers_model: PynamoDBCustomers, requests_client: Requests) -> None:
'''
Runs Scheduler Services
-
:param checkin_model: Checkin model
:param consultants_model: Consultant model
:param customers_model: Customer model
:param requests_client: Request client
'''
auth_token = os.environ['SlackAuth']
hed = {'Authorization': 'Bearer ' + auth_token}
today = datetime.datetime.today()
first_date = datetime.datetime(today.year, today.month, 1) - datetime.timedelta(days=1)
last_date = datetime.datetime(today.year, today.month,\
calendar.monthrange(today.year, today.month)[1])
consultants_list = list(consultants_model.scan())
customers_list = list(customers_model.scan())
checkins_list = list(checkin_model.scan(checkin_model.date.between(str(first_date),\
str(last_date)) & (checkin_model.completed == 'True')))
for con in consultants_list:
con_data = list(filter(lambda x: con.uuid == x.consultant_uuid, checkins_list))
cust_time = {}
for data in con_data:
customers = next((x for x in json.loads(data.user_input) if\
x['action_id'] == 'customers'), None)
if customers is not None:
customers = list(filter(lambda x: not x['unchecked'], customers['value']))
times = [x for x in json.loads(data.user_input)\
if x['action_id'].startswith('time_desc_input')]
for cust in customers:
time = next((z for z in times if z['customer'] == cust['value']), None)
if time is not None:
name = next((c for c in customers_list if\
c.uuid == cust['value']), None).friendlyName
cust_time[name] = cust_time.get(name, 0) + time['value']['time']
print("Cust_time: ", cust_time)
report = '{0}:'.format(today.strftime("%B"))
for key in cust_time:
report += '\n• {0} - {1} h'.format(key, (cust_time[key]))
data = {
"channel": con.slack_id,
"text": report
}
requests_client.post('https://slack.com/api/chat.postMessage', json=data, headers=hed)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
from setuptools import setup, Command
import senf
class coverage_command(Command):
description = "generate test coverage data"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
from coverage import coverage
except ImportError:
raise SystemExit(
"Missing 'coverage' module. See "
"https://pypi.python.org/pypi/coverage or try "
"`apt-get install python-coverage python3-coverage`")
for key in list(sys.modules.keys()):
if key.startswith('senf'):
del(sys.modules[key])
cov = coverage()
cov.start()
cmd = self.reinitialize_command("test")
cmd.ensure_finalized()
cmd.run()
dest = os.path.join(os.getcwd(), "coverage")
cov.stop()
cov.html_report(
directory=dest,
ignore_errors=True,
include=["senf/*"])
print("Coverage summary: file://%s/index.html" % dest)
class pytest_command(Command):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
self.pytest_args = []
def finalize_options(self):
pass
def run(self):
import pytest
errno = pytest.main(self.pytest_args)
if errno != 0:
sys.exit(errno)
if __name__ == "__main__":
with open('README.rst') as h:
long_description = h.read()
setup(
name="senf",
version=senf.version_string,
url="https://github.com/quodlibet/senf",
description=("Consistent filename handling for all Python versions "
"and platforms"),
long_description=long_description,
author="Christoph Reiter",
author_email="reiter.christoph@gmail.com",
packages=[
"senf",
],
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'License :: OSI Approved :: MIT License',
],
tests_require=['pytest'],
cmdclass={
'test': pytest_command,
'coverage': coverage_command,
},
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
################################################################################
# Created by Oscar Martinez #
# o.rubi@esciencecenter.nl #
################################################################################
import traceback
from flask import Flask, Response, request, jsonify
from flask.ext.cors import CORS, cross_origin
from TermSuggestionsAggregator import TermSuggestionsAggregator, Aggregation
from elsearch import ELSearch
from wnsearch import WNSearch
from word2vec import Word2VecSuggester
from precomputed import PrecomputedClusterSuggester
from rocchio import RocchioSuggester
import MakeChart
from config import get_word2vec_model
from rocchio import RocchioSuggester
app = Flask(__name__)
CORS(app)
methodsConfigurationDict = {1: (WNSearch, ()),
2: (ELSearch, ()),
3: (PrecomputedClusterSuggester, ()),
4: (Word2VecSuggester, (get_word2vec_model(), )),
5: (RocchioSuggester, ()),
}
methodsInstances = {}
for mKey in methodsConfigurationDict:
methodsInstances[mKey] = methodsConfigurationDict[mKey][0](*methodsConfigurationDict[mKey][1])
ts = TermSuggestionsAggregator()
@app.route('/')
@cross_origin(supports_credentials=True)
def api_root():
m = {}
for methodKey in sorted(methodsConfigurationDict.keys()):
m[methodKey ] = (methodsConfigurationDict[methodKey][0].__name__, methodsConfigurationDict[methodKey][1])
return jsonify(m)
@app.errorhandler(404)
@cross_origin(supports_credentials=True)
def api_error(error=None):
message = {
'status': 404,
'message': 'Error: ' + error,
}
resp = jsonify(message)
resp.status_code = 404
return resp
@app.route("/suggester", methods = ['GET',])
@cross_origin(supports_credentials=True)
def api_term():
if request.method == 'GET':
if 'term' in request.args:
if 'agg-method' in request.args:
aggMethod = str(request.args['agg-method']).strip()
if aggMethod == 'sum':
aggMethod = Aggregation.Sum
elif aggMethod == 'average':
aggMethod = Aggregation.Average
else:
return api_error('specify correct aggregation method: sum or average')
else:
# Default aggragation method
aggMethod = Aggregation.Sum
if 'methods[]' in request.args:
methods_str = request.values.getlist('methods[]')
methods = [methodsInstances[int(m)] for m in methods_str]
else:
return api_error('Please select one or more query expansion methods.')
# Get the suggestions
data = ts.getSuggestions(str(request.args['term']), methods, aggMethod)
resp = Response(MakeChart.dict2bar(data), status=200, mimetype='application/json')
return resp
else:
return api_error('a term is required')
if __name__ == "__main__":
app.run(debug=True)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
from __future__ import print_function
import logging
logging.getLogger(__name__).setLevel(logging.INFO)
import os,sys,time
#import yaml
import signal
from snowboy import snowboydecoder
interrupted = False
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
from play_audio import play_music
from microphone import microphone
from alexa_query import internet_on,alexa_query
from busman import busman_query
path = os.path.realpath(__file__).rstrip(os.path.basename(__file__))
alexa_tmp = '/tmp/alexa-pi'
if sys.platform.startswith('linux'):
alexa_tmp = '/dev/shm/alexa-pi'
try: os.makedirs(os.path.join(alexa_tmp,'bak'))
except: pass
raw_recording = os.path.join(alexa_tmp,'recording.raw')
mp3_response = os.path.join(alexa_tmp,'response.mp3')
http_log = os.path.join(alexa_tmp,'http.log')
if sys.platform.startswith('linux'):
# handle alsa-lib error log things
from ctypes import CFUNCTYPE,cdll,c_char_p, c_int
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
def py_error_handler(filename, line, function, err, fmt): pass #print 'messages are yummy'
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
def ding(): snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)
# def handle():
# with open(raw_recording,'rb') as raw:
# directives = alexa_query(raw, mp3_response, http_log)
# if 'speak' in directives:
# play_music(mp3_response,60000)
# return directives
# def start2():
# while True:
# ding()
# if record_to_file(raw_recording):
# directives = handle()
def handle_alexa():
wait = True #False
while True:
ding()
mic = microphone(wait)
#logging.warn(('start microphone',wait))
#logging.warn(('end microphone',wait))
directives = alexa_query(mic, mp3_response, http_log)
logging.warn(('directives:', directives.keys()))
if 'speak' in directives:
play_music(mp3_response,60000)
#if len(directives) > 0 and not 'listen' in directives:
if not 'listen' in directives:
break
wait = True
logging.warn(('[Snowboy Listening...]'))
ding()
def handle_okbus():
wait = False
while True:
ding()
mic = microphone(wait)
directives = busman_query(mic)
logging.warn(('directives:', directives.keys()))
if len(directives) > 0 and not 'listen' in directives:
break
wait = True
logging.warn(('[Snowboy Listening...]'))
ding()
if __name__ == "__main__":
while not internet_on():
sys.stderr.write('.')
#start2()
models = [
'pmdl/Alexa.pmdl',
# 'pmdl/ok bus.pmdl'
]
sensitivity = [
0.45,
# 0.45
]
callbacks = [
handle_alexa,
# handle_okbus
]
# test
while True:
handle_alexa()
logging.warn(('handle_alexa finished'))
detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity)
logging.warn(('[Snowboy Listening...]'))
ding()
# main loop
detector.start(detected_callback=callbacks,
interrupt_check=interrupt_callback,
sleep_time=0.03)
detector.terminate()
# Emacs:
# mode: javascript
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
# vim: se ft=javascript st=4 ts=8 sts=4
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
import netifaces
from cinderclient import exceptions as cinder_exceptions
from os_brick import exception
from oslotest import base
from brick_cinderclient_ext import volume_actions
@ddt.ddt
class TestVolumeActions(base.BaseTestCase):
def setUp(self):
super(TestVolumeActions, self).setUp()
self.volume_id = '3d96b134-75bd-492b-8372-330455cae38f'
self.brick_client = mock.Mock()
self.v_client = mock.Mock()
self.command_args = [self.v_client, self.volume_id]
def test_reserve(self):
with volume_actions.Reserve(*self.command_args) as cmd:
cmd.reserve()
self.v_client.volumes.reserve.assert_called_once_with(self.volume_id)
def test_reserve_failed(self):
self.v_client.volumes.reserve.side_effect = (
cinder_exceptions.BadRequest(400))
try:
with volume_actions.Reserve(*self.command_args) as cmd:
cmd.reserve()
except cinder_exceptions.BadRequest:
self.v_client.volumes.unreserve.assert_called_once_with(
self.volume_id)
self.v_client.volumes.reserve.assert_called_once_with(self.volume_id)
@mock.patch('netifaces.ifaddresses',
return_value={netifaces.AF_INET: [{'addr': '127.0.0.1'}]})
@mock.patch('netifaces.interfaces', return_value=['eth1'])
@mock.patch('brick_cinderclient_ext.brick_utils.get_my_ip',
return_value='1.0.0.0')
@ddt.data((None, {'ip': '1.0.0.0'}),
('eth1', {'ip': '127.0.0.1'}))
@ddt.unpack
def test_initialize_connection(self, _nic, _conn_prop,
_fake_my_ip, _fake_interfaces,
_fake_ifaddresses):
"""Test calling initialize_connection with different input params.
Contains next initialize connection test cases:
1. Without any additional parameters in request;
2. Using --nic as a parameter;
TODO (mdovgal): add other test cases;
"""
self.brick_client.get_connector.return_value = _conn_prop
with volume_actions.InitializeConnection(*self.command_args) as cmd:
cmd.initialize(self.brick_client, False, False, _nic)
self.brick_client.get_connector.assert_called_once_with(False, False,
_nic)
self.v_client.volumes.initialize_connection.assert_called_once_with(
self.volume_id, _conn_prop)
@ddt.data('iscsi', 'iSCSI', 'ISCSI', 'rbd', 'RBD')
def test_verify_protocol(self, protocol):
with volume_actions.VerifyProtocol(*self.command_args) as cmd:
# NOTE(e0ne): veryfy that no exception is rased
cmd.verify(protocol)
def test_verify_protocol_failed(self):
try:
with volume_actions.VerifyProtocol(*self.command_args) as cmd:
cmd.verify('protocol')
except exception.ProtocolNotSupported:
self.v_client.volumes.unreserve.assert_called_once_with(
self.volume_id)
def test_connect_volume(self):
connector = mock.Mock()
connector.connect_volume.return_value = {'device': 'info'}
with volume_actions.ConnectVolume(*self.command_args) as cmd:
cmd.connect(connector,
'connection_data', 'mountpoint', 'mode', 'hostname')
connector.connect_volume.assert_called_once_with('connection_data')
self.v_client.volumes.attach.assert_called_once_with(
self.volume_id,
instance_uuid=None, mountpoint='mountpoint', mode='mode',
host_name='hostname')
@ddt.data((None, {}), ('connection_data', 'connection_data'))
@ddt.unpack
def test_disconnect_no_device_info(self, command_arg, connector_arg):
connector = mock.Mock()
with volume_actions.DisconnectVolume(*self.command_args) as cmd:
cmd.disconnect(connector, 'connection_data', command_arg)
connector.disconnect_volume.assert_called_once_with('connection_data',
connector_arg)
def test_detach(self):
brick_client = mock.Mock()
brick_client.get_connector.return_value = 'connector'
with volume_actions.DetachVolume(*self.command_args) as cmd:
cmd.detach(brick_client, 'attachment_uuid',
'multipath', 'enforce_multipath')
brick_client.get_connector.assert_called_once_with('multipath',
'enforce_multipath')
self.v_client.volumes.terminate_connection.assert_called_once_with(
self.volume_id, 'connector')
self.v_client.volumes.detach.assert_called_once_with(
self.volume_id, 'attachment_uuid')
|
nilq/baby-python
|
python
|
import os
import unittest
import numpy as np
import pandas as pd
from cgnal.core.data.model.ml import (
LazyDataset,
IterGenerator,
MultiFeatureSample,
Sample,
PandasDataset,
PandasTimeIndexedDataset,
CachedDataset,
features_and_labels_to_dataset,
)
from typing import Iterator, Generator
from cgnal.core.tests.core import TestCase, logTest
from tests import TMP_FOLDER
samples = [
Sample(features=[100, 101], label=1),
Sample(features=[102, 103], label=2),
Sample(features=[104, 105], label=3),
Sample(features=[106, 107], label=4),
Sample(features=[108, 109], label=5),
Sample(features=[110, 111], label=6),
Sample(features=[112, 113], label=7),
Sample(features=[114, 115], label=8),
Sample(features=[116, 117], label=9),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
lazyDat = LazyDataset(IterGenerator(samples_gen))
class features_and_labels_to_datasetTests(TestCase):
def test_features_and_labels_to_dataset(self):
dataset = features_and_labels_to_dataset(
pd.concat(
[
pd.Series([1, 0, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
pd.Series([0, 0, 0, 1], name="Label"),
)
dataset_no_labels = features_and_labels_to_dataset(
pd.concat(
[
pd.Series([1, 0, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
None,
)
self.assertTrue(isinstance(dataset_no_labels, CachedDataset))
self.assertTrue(isinstance(dataset, CachedDataset))
self.assertTrue(
(
dataset.getFeaturesAs("pandas")
== pd.concat(
[
pd.Series([1, 0, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
)
)
.all()
.all()
)
self.assertTrue(
(
dataset.getLabelsAs("pandas")
== pd.DataFrame(pd.Series([0, 0, 0, 1], name="Label"))
)
.all()
.all()
)
class LazyDatasetTests(TestCase):
@logTest
def test_withLookback_MultiFeatureSample(self):
samples = [
MultiFeatureSample(
features=[np.array([100.0, 101.0]), np.array([np.NaN])], label=1.0
),
MultiFeatureSample(
features=[np.array([102.0, 103.0]), np.array([1.0])], label=2.0
),
MultiFeatureSample(
features=[np.array([104.0, 105.0]), np.array([2.0])], label=3.0
),
MultiFeatureSample(
features=[np.array([106.0, 107.0]), np.array([3.0])], label=4.0
),
MultiFeatureSample(
features=[np.array([108.0, 109.0]), np.array([4.0])], label=5.0
),
MultiFeatureSample(
features=[np.array([110.0, 111.0]), np.array([5.0])], label=6.0
),
MultiFeatureSample(
features=[np.array([112.0, 113.0]), np.array([6.0])], label=7.0
),
MultiFeatureSample(
features=[np.array([114.0, 115.0]), np.array([7.0])], label=8.0
),
MultiFeatureSample(
features=[np.array([116.0, 117.0]), np.array([8.0])], label=9.0
),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
X1 = np.array(
[
[[102.0, 103.0], [104.0, 105.0], [106.0, 107.0]],
[[104.0, 105.0], [106.0, 107.0], [108.0, 109.0]],
[[106.0, 107.0], [108.0, 109.0], [110.0, 111.0]],
[[108.0, 109.0], [110.0, 111.0], [112.0, 113.0]],
]
)
y1 = np.array(
[
[[1.0], [2.0], [3.0]],
[[2.0], [3.0], [4.0]],
[[3.0], [4.0], [5.0]],
[[4.0], [5.0], [6.0]],
]
)
lab1 = np.array([4.0, 5.0, 6.0, 7.0])
X2 = np.array(
[
[[110.0, 111.0], [112.0, 113.0], [114.0, 115.0]],
[[112.0, 113.0], [114.0, 115.0], [116.0, 117.0]],
]
)
y2 = np.array([[[5.0], [6.0], [7.0]], [[6.0], [7.0], [8.0]]])
lab2 = np.array([8.0, 9.0])
lookback = 3
batch_size = 4
lazyDat = LazyDataset(IterGenerator(samples_gen))
lookbackDat = lazyDat.withLookback(lookback)
batch_gen = lookbackDat.batch(batch_size)
batch1: CachedDataset = next(batch_gen)
batch2: CachedDataset = next(batch_gen)
tmp1 = batch1.getFeaturesAs("array")
temp1X = np.array(list(map(lambda x: np.stack(x), tmp1[:, :, 0])))
temp1y = np.array(list(map(lambda x: np.stack(x), tmp1[:, :, 1])))
tmp1lab = batch1.getLabelsAs("array")
res = [
np.array_equal(temp1X, X1),
np.array_equal(temp1y, y1),
np.array_equal(tmp1lab, lab1),
]
tmp2 = batch2.getFeaturesAs("array")
temp2X = np.array(list(map(lambda x: np.stack(x), tmp2[:, :, 0])))
temp2y = np.array(list(map(lambda x: np.stack(x), tmp2[:, :, 1])))
tmp2lab = batch2.getLabelsAs("array")
res = res + [
np.array_equal(temp2X, X2),
np.array_equal(temp2y, y2),
np.array_equal(tmp2lab, lab2),
]
self.assertTrue(all(res))
@logTest
def test_withLookback_ArrayFeatureSample(self):
samples = [
Sample(features=np.array([100, 101]), label=1),
Sample(features=np.array([102, 103]), label=2),
Sample(features=np.array([104, 105]), label=3),
Sample(features=np.array([106, 107]), label=4),
Sample(features=np.array([108, 109]), label=5),
Sample(features=np.array([110, 111]), label=6),
Sample(features=np.array([112, 113]), label=7),
Sample(features=np.array([114, 115]), label=8),
Sample(features=np.array([116, 117]), label=9),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
X1 = np.array(
[
[[100, 101], [102, 103], [104, 105]],
[[102, 103], [104, 105], [106, 107]],
[[104, 105], [106, 107], [108, 109]],
[[106, 107], [108, 109], [110, 111]],
]
)
lab1 = np.array([3, 4, 5, 6])
X2 = np.array(
[
[[108, 109], [110, 111], [112, 113]],
[[110, 111], [112, 113], [114, 115]],
[[112, 113], [114, 115], [116, 117]],
]
)
lab2 = np.array([7, 8, 9])
lookback = 3
batch_size = 4
lazyDat = LazyDataset(IterGenerator(samples_gen))
lookbackDat = lazyDat.withLookback(lookback)
batch_gen = lookbackDat.batch(batch_size)
batch1: CachedDataset = next(batch_gen)
batch2: CachedDataset = next(batch_gen)
tmp1 = batch1.getFeaturesAs("array")
tmp1lab = batch1.getLabelsAs("array")
res = [np.array_equal(tmp1, X1), np.array_equal(tmp1lab, lab1)]
tmp2 = batch2.getFeaturesAs("array")
tmp2lab = batch2.getLabelsAs("array")
res = res + [np.array_equal(tmp2, X2), np.array_equal(tmp2lab, lab2)]
self.assertTrue(all(res))
@logTest
def test_withLookback_ListFeatureSample(self):
samples = [
Sample(features=[100, 101], label=1),
Sample(features=[102, 103], label=2),
Sample(features=[104, 105], label=3),
Sample(features=[106, 107], label=4),
Sample(features=[108, 109], label=5),
Sample(features=[110, 111], label=6),
Sample(features=[112, 113], label=7),
Sample(features=[114, 115], label=8),
Sample(features=[116, 117], label=9),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
X1 = np.array(
[
[[100, 101], [102, 103], [104, 105]],
[[102, 103], [104, 105], [106, 107]],
[[104, 105], [106, 107], [108, 109]],
[[106, 107], [108, 109], [110, 111]],
]
)
lab1 = np.array([3, 4, 5, 6])
X2 = np.array(
[
[[108, 109], [110, 111], [112, 113]],
[[110, 111], [112, 113], [114, 115]],
[[112, 113], [114, 115], [116, 117]],
]
)
lab2 = np.array([7, 8, 9])
lookback = 3
batch_size = 4
lazyDat = LazyDataset(IterGenerator(samples_gen))
lookbackDat = lazyDat.withLookback(lookback)
batch_gen = lookbackDat.batch(batch_size)
batch1: CachedDataset = next(batch_gen)
batch2: CachedDataset = next(batch_gen)
tmp1 = batch1.getFeaturesAs("array")
tmp1lab = batch1.getLabelsAs("array")
res = [np.array_equal(tmp1, X1), np.array_equal(tmp1lab, lab1)]
tmp2 = batch2.getFeaturesAs("array")
tmp2lab = batch2.getLabelsAs("array")
res = res + [np.array_equal(tmp2, X2), np.array_equal(tmp2lab, lab2)]
self.assertTrue(all(res))
@logTest
def test_features_labels(self):
self.assertTrue(isinstance(lazyDat.features(), Generator))
self.assertTrue(isinstance(lazyDat.labels(), Generator))
self.assertTrue(isinstance(lazyDat.getFeaturesAs(), Generator))
self.assertTrue(isinstance(lazyDat.getLabelsAs(), Generator))
self.assertEqual(next(lazyDat.getFeaturesAs()), samples[0].features)
self.assertEqual(next(lazyDat.getLabelsAs()), samples[0].label)
self.assertEqual(next(lazyDat.features()), samples[0].features)
self.assertEqual(next(lazyDat.labels()), samples[0].label)
class CachedDatasetTests(TestCase):
@logTest
def test_to_df(self):
self.assertTrue(isinstance(CachedDataset(lazyDat).to_df(), pd.DataFrame))
self.assertTrue(
(
CachedDataset(lazyDat).to_df()["features"][0].values
== [100, 102, 104, 106, 108, 110, 112, 114, 116]
).all()
)
self.assertTrue(
(
CachedDataset(lazyDat).to_df()["labels"][0].values
== [1, 2, 3, 4, 5, 6, 7, 8, 9]
).all()
)
@logTest
def test_asPandasDataset(self):
self.assertTrue(
isinstance(CachedDataset(lazyDat).asPandasDataset, PandasDataset)
)
self.assertTrue(
(
CachedDataset(lazyDat).asPandasDataset.features[0].values
== [100, 102, 104, 106, 108, 110, 112, 114, 116]
).all()
)
self.assertTrue(
(
CachedDataset(lazyDat).asPandasDataset.labels[0].values
== [1, 2, 3, 4, 5, 6, 7, 8, 9]
).all()
)
class PandasDatasetTests(TestCase):
dataset: PandasDataset = PandasDataset(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=pd.Series([0, 0, 0, 1], name="Label"),
)
dataset_no_label: PandasDataset = PandasDataset(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
)
)
@logTest
def test_check_none(self):
self.assertEqual(self.dataset._check_none(None), None)
self.assertEqual(self.dataset._check_none("test"), "test")
@logTest
def test__len__(self):
self.assertEqual(self.dataset.__len__(), 4)
@logTest
def test_items(self):
self.assertTrue(isinstance(self.dataset.items, Iterator))
self.assertEqual(next(self.dataset.items).features, {"feat1": 1.0, "feat2": 1})
self.assertEqual(next(self.dataset.items).label["Label"], 0)
self.assertEqual(
next(self.dataset_no_label.items).features, {"feat1": 1.0, "feat2": 1}
)
self.assertEqual(next(self.dataset_no_label.items).label, None)
@logTest
def test_dropna_none_labels(self):
res = pd.concat(
[pd.Series([1, 2, 3], name="feat1"), pd.Series([1, 3, 4], name="feat2")],
axis=1,
)
self.assertTrue(
(
self.dataset.dropna(subset=["feat1"]).features.reset_index(drop=True)
== res
)
.all()
.all()
)
self.assertTrue(
(
self.dataset.dropna(feat__subset=["feat1"]).features.reset_index(
drop=True
)
== res
)
.all()
.all()
)
self.assertTrue(
(
self.dataset.dropna(labs__subset=["Label"]).features.reset_index(
drop=True
)
== res
)
.all()
.all()
)
@logTest
def test_cached(self):
self.assertTrue(self.dataset.cached)
@logTest
def test_features_labels(self):
self.assertEqual(
self.dataset.features,
pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
)
self.assertTrue((self.dataset.labels["Label"] == pd.Series([0, 0, 0, 1])).all())
@logTest
def test_index(self):
self.assertTrue((self.dataset.index == range(4)).all())
@logTest
def test_createObject(self):
self.assertTrue(
isinstance(
PandasDataset.createObject(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=None,
),
PandasDataset,
)
)
self.assertEqual(
PandasDataset.createObject(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=None,
).features,
self.dataset_no_label.features,
)
self.assertEqual(
PandasDataset.createObject(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=None,
).labels,
self.dataset_no_label.labels,
)
@logTest
def test_take(self):
self.assertTrue(isinstance(self.dataset.takeAsPandas(1), PandasDataset))
self.assertEqual(
self.dataset.takeAsPandas(1).features.feat2, pd.Series([1], name="feat2")
)
self.assertEqual(
self.dataset.takeAsPandas(1).labels["Label"], pd.Series([0], name="Label")
)
@logTest
def test_loc(self):
self.assertEqual(self.dataset.loc(2).features[2]["feat1"], 2)
self.assertEqual(self.dataset.loc(2).features[2]["feat2"], 3)
self.assertEqual(self.dataset.loc(2).labels[2]["Label"], 0)
self.assertTrue(self.dataset_no_label.loc(2).labels is None)
@logTest
def test_from_sequence(self):
features_1 = pd.DataFrame(
{"feat1": [1, 2, 3, 4], "feat2": [100, 200, 300, 400]}, index=[1, 2, 3, 4]
)
features_2 = pd.DataFrame(
{"feat1": [9, 11, 13, 14], "feat2": [90, 110, 130, 140]},
index=[10, 11, 12, 13],
)
features_3 = pd.DataFrame(
{"feat1": [90, 10, 10, 1400], "feat2": [0.9, 0.11, 0.13, 0.14]},
index=[15, 16, 17, 18],
)
labels_1 = pd.DataFrame({"target": [1, 0, 1, 1]}, index=[1, 2, 3, 4])
labels_2 = pd.DataFrame({"target": [1, 1, 1, 0]}, index=[10, 11, 12, 13])
labels_3 = pd.DataFrame({"target": [0, 1, 1, 0]}, index=[15, 16, 17, 18])
dataset_1 = PandasDataset(features_1, labels_1)
dataset_2 = PandasDataset(features_2, labels_2)
dataset_3 = PandasDataset(features_3, labels_3)
dataset_merged = PandasDataset.from_sequence([dataset_1, dataset_2, dataset_3])
self.assertEqual(
pd.concat([features_1, features_2, features_3]), dataset_merged.features
)
self.assertEqual(
pd.concat([labels_1, labels_2, labels_3]), dataset_merged.labels
)
@logTest
def test_serialization(self):
filename = os.path.join(TMP_FOLDER, "my_dataset.p")
self.dataset.write(filename)
newDataset: PandasDataset = PandasDataset.load(filename)
self.assertTrue(isinstance(newDataset, PandasDataset))
self.assertTrue(
(self.dataset.features.fillna("NaN") == newDataset.features.fillna("NaN"))
.all()
.all()
)
@logTest
def test_creation_from_samples(self):
samples = [
Sample(features=[100, 101], label=1, name=1),
Sample(features=[102, 103], label=2, name=2),
Sample(features=[104, 105], label=1, name=3),
Sample(features=[106, 107], label=2, name=4),
Sample(features=[108, 109], label=2, name=5),
Sample(features=[110, 111], label=2, name=6),
Sample(features=[112, 113], label=1, name=7),
Sample(features=[114, 115], label=2, name=8),
Sample(features=[116, 117], label=2, name=9),
]
lazyDataset = CachedDataset(samples).filter(lambda x: x.label <= 5)
assert isinstance(lazyDataset, LazyDataset)
for format in ["pandas", "array", "dict"]:
features1 = lazyDataset.getFeaturesAs(format)
labels1 = lazyDataset.getLabelsAs(format)
cached: CachedDataset = lazyDataset.asCached
features2 = cached.getFeaturesAs(format)
labels2 = cached.getLabelsAs(format)
self.assertEqual(features1, features2)
self.assertEqual(labels1, labels2)
pandasDataset = cached.asPandasDataset
features3 = pandasDataset.getFeaturesAs(format)
labels3 = pandasDataset.getLabelsAs(format)
self.assertEqual(features1, features3)
self.assertEqual(labels1, labels3)
@logTest
def test_union(self):
union = self.dataset.union(
PandasDataset(
features=pd.concat(
[
pd.Series([np.nan, 5, 6, 7], name="feat1"),
pd.Series([7, 8, 9, 10], name="feat2"),
],
axis=1,
),
labels=pd.Series([0, 0, 0, 1], name="Label"),
)
)
self.assertTrue(isinstance(union, PandasDataset))
self.assertEqual(
union.features.reset_index(drop=True),
pd.concat(
[
pd.Series([1, np.nan, 2, 3, np.nan, 5, 6, 7], name="feat1"),
pd.Series([1, 2, 3, 4, 7, 8, 9, 10], name="feat2"),
],
axis=1,
),
)
self.assertEqual(
union.labels.Label.reset_index(drop=True),
pd.Series([0, 0, 0, 1, 0, 0, 0, 1], name="Label"),
)
@logTest
def test_intersection(self):
other = PandasDataset(
features=pd.concat(
[
pd.Series([1, 2, 3, 4], name="feat1"),
pd.Series([5, 6, 7, 8], name="feat2"),
],
axis=1,
),
labels=pd.Series([1, 1, 0, 0], name="Label", index=[0, 1, 4, 5]),
)
self.assertEqual(other.intersection().labels.index.to_list(), [0, 1])
self.assertEqual(other.intersection().features.index.to_list(), [0, 1])
@logTest
def test_getFeaturesAs(self):
self.assertTrue(isinstance(self.dataset.getFeaturesAs("array"), np.ndarray))
self.assertTrue(isinstance(self.dataset.getFeaturesAs("pandas"), pd.DataFrame))
self.assertTrue(isinstance(self.dataset.getFeaturesAs("dict"), dict))
@logTest
def test_getLabelsAs(self):
self.assertTrue(isinstance(self.dataset.getLabelsAs("array"), np.ndarray))
self.assertTrue(isinstance(self.dataset.getLabelsAs("pandas"), pd.DataFrame))
self.assertTrue(isinstance(self.dataset.getLabelsAs("dict"), dict))
class PandasTimeIndexedDatasetTests(TestCase):
dates = pd.date_range("2010-01-01", "2010-01-04")
dateStr = [str(x) for x in dates]
dataset = PandasTimeIndexedDataset(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], index=dateStr, name="feat1"),
pd.Series([1, 2, 3, 4], index=dateStr, name="feat2"),
],
axis=1,
)
)
@logTest
def test_time_index(self):
# duck-typing check
days = [x.day for x in self.dataset.features.index]
self.assertTrue(set(days), set(range(4)))
@logTest
def test_serialization(self):
filename = os.path.join(TMP_FOLDER, "my_dataset.p")
self.dataset.write(filename)
newDataset = type(self.dataset).load(filename)
self.assertTrue(isinstance(newDataset, PandasTimeIndexedDataset))
self.assertTrue(
(self.dataset.features.fillna("NaN") == newDataset.features.fillna("NaN"))
.all()
.all()
)
@logTest
def test_createObject(self):
NewDataset = self.dataset.createObject(
features=pd.concat(
[
pd.Series([1, 3], index=self.dateStr[0:2], name="feat1"),
pd.Series([1, 2], index=self.dateStr[0:2], name="feat2"),
],
axis=1,
),
labels=pd.Series([0, 0], index=self.dateStr[0:2], name="Label"),
)
self.assertTrue(isinstance(NewDataset, PandasTimeIndexedDataset))
self.assertTrue(
(
NewDataset.features
== pd.concat(
[
pd.Series(
[1, 3],
index=map(pd.to_datetime, self.dateStr[0:2]),
name="feat1",
),
pd.Series(
[1, 2],
index=map(pd.to_datetime, self.dateStr[0:2]),
name="feat2",
),
],
axis=1,
)
)
.all()
.all()
)
self.assertTrue(
(
NewDataset.labels.values
== pd.Series([0, 0], index=self.dateStr[0:2], name="Label").values
).all()
)
@logTest
def test_loc(self):
new_dataset = self.dataset.loc(
[x for x in pd.date_range("2010-01-01", "2010-01-02")]
)
to_check = PandasTimeIndexedDataset(
features=pd.DataFrame(self.dataset.features.iloc[:2])
)
self.assertIsInstance(new_dataset, PandasTimeIndexedDataset)
self.assertEqual(new_dataset.features, to_check.features)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
"""CheckingProxy derived from jsonrpc.proxy due to subclassing problems
w/getattr. Converts service errors into ServiceError exceptions, otherwise
call returns the jsonrpc "result" field.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import uuid
import json
import time
import os
import crds
from crds import python23
from crds import log, config
from crds import exceptions
def apply_with_retries(func, *pars, **keys):
"""Apply function func() as f(*pargs, **keys) and return the result. Retry on any exception as defined in config.py"""
retries = config.get_client_retry_count()
delay = config.get_client_retry_delay_seconds()
for retry in range(retries):
try:
return func(*pars, **keys)
except Exception as exc:
log.verbose("FAILED: Attempt", str(retry+1), "of", retries, "with:", str(exc))
log.verbose("FAILED: Waiting for", delay, "seconds before retrying") # waits after total fail...
time.sleep(delay)
exc2 = exc
else:
raise exc2
def message_id():
"""Return a nominal identifier for this program."""
return _program_name() + "-" + crds.__version__ + "-" + _PROCESS_ID + "-" + _request_id()
def _program_name():
"""Return the name of this program."""
return os.path.basename(os.path.splitext(sys.argv[0])[0])
try:
_PROCESS_ID = str(uuid.uuid4())
except Exception:
_PROCESS_ID = "00000000-0000-0000-00000000000000000"
MSG_NO = 0
def _request_id():
"""Return an identifier unique to this particular JSONRPC request."""
global MSG_NO
MSG_NO += 1
return "%08x" % MSG_NO
class CheckingProxy(object):
"""CheckingProxy converts calls to undefined methods into JSON RPC service
calls. If the JSON rpc returns an error, CheckingProxy raises a
ServiceError exception containing the error's message.
XXX NOTE: Always underscore new methods or you may hide a real JSONRPC method
which also appears in the proxy object's namespace with the same name.
"""
def __init__(self, service_url, service_name=None, version='1.0'):
self.__version = str(version)
self.__service_url = service_url
self.__service_name = service_name
def __getattr__(self, name):
if self.__service_name != None:
name = "%s.%s" % (self.__service_name, name)
return CheckingProxy(self.__service_url, name, self.__version)
def __repr__(self):
return self.__class__.__name__ + "(url='%s', method='%s')" % \
(self.__service_url, self.__service_name)
def _call(self, *args, **kwargs):
"""Core of RPC dispatch without error interpretation, logging, or return value decoding."""
params = kwargs if len(kwargs) else args
# if Any.kind(params) == Object and self.__version != '2.0':
# raise Exception('Unsupport arg type for JSON-RPC 1.0 '
# '(the default version for this client, '
# 'pass version="2.0" to use keyword arguments)')
jsonrpc_params = {"jsonrpc": self.__version,
"method": self.__service_name,
'params': params,
'id': message_id()
}
parameters = json.dumps(jsonrpc_params)
url = self._get_url(jsonrpc_params)
if "serverless" in url or "server-less" in url:
raise exceptions.ServiceError("Configured for server-less mode. Skipping JSON RPC " + repr(self.__service_name))
if log.get_verbose() <= 50:
log.verbose("CRDS JSON RPC", self.__service_name, params if len(str(params)) <= 60 else "(...)", "-->")
else:
log.verbose("CRDS JSON RPC to", url, "parameters", params, "-->")
response = apply_with_retries(self._call_service, parameters, url)
try:
rval = json.loads(response)
except Exception:
log.warning("Invalid CRDS jsonrpc response:\n", response)
raise
return rval
def _get_url(self, jsonrpc_params):
"""Return the JSONRPC URL used to perform a method call. Since post parameters are not visible in the
log, annotate the URL with additional method id paths which are functionally ignored but visible in
the log.
"""
return self.__service_url + jsonrpc_params["method"] + "/" + jsonrpc_params["id"] + "/"
def _call_service(self, parameters, url):
"""Call the JSONRPC defined by `parameters` and raise a ServiceError on any exception."""
if not isinstance(parameters, bytes):
parameters = parameters.encode("utf-8")
try:
# context = ssl.create_default_context()
# channel = urlopen(url, parameters, context=context)
channel = python23.urlopen(url, parameters)
return channel.read().decode("utf-8")
except Exception as exc:
raise exceptions.ServiceError("CRDS jsonrpc failure " + repr(self.__service_name) + " " + str(exc))
def __call__(self, *args, **kwargs):
jsonrpc = self._call(*args, **kwargs)
if jsonrpc["error"]:
decoded = str(python23.unescape(jsonrpc["error"]["message"]))
raise self.classify_exception(decoded)
else:
result = crds_decode(jsonrpc["result"])
result = fix_strings(result)
log.verbose("RPC OK", log.PP(result) if log.get_verbose() >= 70 else "")
return result
def classify_exception(self, decoded):
"""Interpret exc __str__ to define as more precise CRDS exception."""
if "Channel" in decoded and "not found" in decoded:
return exceptions.StatusChannelNotFoundError(decoded)
elif "External agent requested calling process termination." in decoded:
return exceptions.OwningProcessAbortedError(decoded)
else:
msg = "CRDS jsonrpc failure " + repr(self.__service_name) + " " + str(decoded)
return exceptions.ServiceError(msg)
def fix_strings(rval):
"""Convert unicode to strings."""
if isinstance(rval, python23.string_types):
return str(rval)
elif isinstance(rval, tuple):
return tuple([fix_strings(x) for x in rval])
elif isinstance(rval, list):
return [fix_strings(x) for x in rval]
elif isinstance(rval, dict):
return { fix_strings(key):fix_strings(val) for (key, val) in rval.items()}
else:
return rval
# ============================================================================
# These operate transparently in the proxy and are optionally used by the server.
#
# This makes a new client with crds_decoder compatible with both encoding and
# unencoding servers.
#
# An older client without crds_decoder will not work with a new server which is encoding.
# That could be achieved, but wasn't because the function where the feature was
# needed would not work without compression anyway.
def crds_encode(obj):
"""Return a JSON-compatible encoding of `obj`, nominally json-ified, compressed,
and base64 encooded. This is nominally to be called on the server.
"""
return dict(crds_encoded = "1.0",
crds_payload = json.dumps(obj).encode('zlib').encode('base64'))
def crds_decode(msg):
"""Decode something which was crds_encode'd, or return it unaltered if
it wasn't.
"""
if isinstance(msg, dict) and "crds_encoded" in msg:
json_str = msg["crds_payload"].decode('base64').decode('zlib')
return json.loads(json_str)
else:
return msg
|
nilq/baby-python
|
python
|
import sys
from PyQt5.QtWidgets import QAction,QHBoxLayout,QWidget,QApplication,QMainWindow
from PyQt5.QtGui import QIcon
class QToolBarDemo(QMainWindow):
def __init__(self):
super(QToolBarDemo, self).__init__()
#设置窗口大小
self.resize(400, 150)
#设置窗口标题
self.setWindowTitle("QToolBarDemo")
toolBar = self.addToolBar('File')
new = QAction(QIcon('u1.ico'), 'new', self)
toolBar.addAction(new)
open = QAction(QIcon('u2.ico'), 'open', self)
toolBar.addAction(open)
save = QAction(QIcon('u3.ico'), 'save', self)
toolBar.addAction(save)
toolBar.actionTriggered[QAction].connect(self.btnClick)
#创建水平布局
layout = QHBoxLayout()
mainFrame = QWidget()
mainFrame.setLayout(layout)
self.setCentralWidget(mainFrame)
def btnClick(self, w):
print("pressed tool button is:", w.text())
if __name__ == '__main__':
app = QApplication(sys.argv)
main = QToolBarDemo()
main.show()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
from django.contrib.auth.base_user import AbstractBaseUser
from django.db import models
from django.db.models import Manager
class EqualizeMixin:
equal_fields = ()
def __eq__(self, other):
equal_fields = self._get_equal_fields()
for field in equal_fields:
if getattr(self, field) != getattr(other, field):
return False
return True
def _get_equal_fields(self):
if not self.equal_fields:
raise NotImplementedError()
return self.equal_fields
def merge(self, other):
equal_fields = self._get_equal_fields()
for field in equal_fields:
if getattr(self, field) != getattr(other, field):
setattr(self, field, getattr(other, field))
class ChangeMixin:
def _change(self, **kwargs):
is_changed = False
for key, value in kwargs.items():
if getattr(self, key) == value:
continue
setattr(self, key, value)
is_changed = True
return is_changed
class BaseModel(EqualizeMixin, ChangeMixin, models.Model):
create_time = models.DateTimeField(auto_now_add=True, editable=False, verbose_name='등록일')
update_time = models.DateTimeField(auto_now=True, verbose_name='수정일')
objects = Manager()
equal_fields = ()
class Meta:
abstract = True
class BaseUserModel(BaseModel, AbstractBaseUser):
class Meta:
abstract = True
|
nilq/baby-python
|
python
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.mo_base_complex_type import MoBaseComplexType
from intersight.model.virtualization_esxi_clone_custom_spec import VirtualizationEsxiCloneCustomSpec
from intersight.model.virtualization_esxi_ova_custom_spec import VirtualizationEsxiOvaCustomSpec
globals()['MoBaseComplexType'] = MoBaseComplexType
globals()['VirtualizationEsxiCloneCustomSpec'] = VirtualizationEsxiCloneCustomSpec
globals()['VirtualizationEsxiOvaCustomSpec'] = VirtualizationEsxiOvaCustomSpec
class VirtualizationBaseCustomSpec(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'ACCESS.ADDRESSTYPE': "access.AddressType",
'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig",
'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings",
'ADAPTER.ETHSETTINGS': "adapter.EthSettings",
'ADAPTER.FCSETTINGS': "adapter.FcSettings",
'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings",
'APPLIANCE.APISTATUS': "appliance.ApiStatus",
'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase",
'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair",
'APPLIANCE.STATUSCHECK': "appliance.StatusCheck",
'ASSET.ADDRESSINFORMATION': "asset.AddressInformation",
'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential",
'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential",
'ASSET.CLOUDCONNECTION': "asset.CloudConnection",
'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage",
'ASSET.CONTRACTINFORMATION': "asset.ContractInformation",
'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation",
'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo",
'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo",
'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation",
'ASSET.DEVICEINFORMATION': "asset.DeviceInformation",
'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics",
'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction",
'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate",
'ASSET.HTTPCONNECTION': "asset.HttpConnection",
'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection",
'ASSET.METERINGTYPE': "asset.MeteringType",
'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential",
'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential",
'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential",
'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential",
'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions",
'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService",
'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature",
'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential",
'ASSET.PRODUCTINFORMATION': "asset.ProductInformation",
'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential",
'ASSET.SSHCONNECTION': "asset.SshConnection",
'ASSET.SUDIINFO': "asset.SudiInfo",
'ASSET.TARGETKEY': "asset.TargetKey",
'ASSET.TARGETSIGNATURE': "asset.TargetSignature",
'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails",
'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions",
'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential",
'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions",
'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService",
'ASSET.VMHOST': "asset.VmHost",
'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions",
'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions",
'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions",
'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions",
'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService",
'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions",
'BOOT.BOOTLOADER': "boot.Bootloader",
'BOOT.ISCSI': "boot.Iscsi",
'BOOT.LOCALCDD': "boot.LocalCdd",
'BOOT.LOCALDISK': "boot.LocalDisk",
'BOOT.NVME': "boot.Nvme",
'BOOT.PCHSTORAGE': "boot.PchStorage",
'BOOT.PXE': "boot.Pxe",
'BOOT.SAN': "boot.San",
'BOOT.SDCARD': "boot.SdCard",
'BOOT.UEFISHELL': "boot.UefiShell",
'BOOT.USB': "boot.Usb",
'BOOT.VIRTUALMEDIA': "boot.VirtualMedia",
'BULK.HTTPHEADER': "bulk.HttpHeader",
'BULK.RESTRESULT': "bulk.RestResult",
'BULK.RESTSUBREQUEST': "bulk.RestSubRequest",
'CAPABILITY.PORTRANGE': "capability.PortRange",
'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits",
'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits",
'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits",
'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability",
'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc",
'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone",
'CLOUD.BILLINGUNIT': "cloud.BillingUnit",
'CLOUD.CLOUDREGION': "cloud.CloudRegion",
'CLOUD.CLOUDTAG': "cloud.CloudTag",
'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes",
'CLOUD.IMAGEREFERENCE': "cloud.ImageReference",
'CLOUD.INSTANCETYPE': "cloud.InstanceType",
'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig",
'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress",
'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment",
'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment",
'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule",
'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables",
'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment",
'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment",
'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo",
'CLOUD.VOLUMETYPE': "cloud.VolumeType",
'CMRF.CMRF': "cmrf.CmRf",
'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock",
'COMM.IPV4INTERFACE': "comm.IpV4Interface",
'COMM.IPV6INTERFACE': "comm.IpV6Interface",
'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary",
'COMPUTE.IPADDRESS': "compute.IpAddress",
'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule",
'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation",
'COMPUTE.SERVERCONFIG': "compute.ServerConfig",
'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus",
'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation",
'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive",
'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation",
'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive",
'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation",
'COND.ALARMSUMMARY': "cond.AlarmSummary",
'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage",
'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage",
'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream",
'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt",
'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage",
'CONNECTOR.FILECHECKSUM': "connector.FileChecksum",
'CONNECTOR.FILEMESSAGE': "connector.FileMessage",
'CONNECTOR.HTTPREQUEST': "connector.HttpRequest",
'CONNECTOR.SSHCONFIG': "connector.SshConfig",
'CONNECTOR.SSHMESSAGE': "connector.SshMessage",
'CONNECTOR.STARTSTREAM': "connector.StartStream",
'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice",
'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge",
'CONNECTOR.STREAMINPUT': "connector.StreamInput",
'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive",
'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage",
'CONNECTOR.URL': "connector.Url",
'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest",
'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage",
'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate",
'CONTENT.COMPLEXTYPE': "content.ComplexType",
'CONTENT.PARAMETER': "content.Parameter",
'CONTENT.TEXTPARAMETER': "content.TextParameter",
'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary",
'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary",
'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary",
'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty",
'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity",
'FABRIC.LLDPSETTINGS': "fabric.LldpSettings",
'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings",
'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier",
'FABRIC.QOSCLASS': "fabric.QosClass",
'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings",
'FABRIC.UDLDSETTINGS': "fabric.UdldSettings",
'FABRIC.VLANSETTINGS': "fabric.VlanSettings",
'FCPOOL.BLOCK': "fcpool.Block",
'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData",
'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact",
'FIRMWARE.CIFSSERVER': "firmware.CifsServer",
'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact",
'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta",
'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload",
'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact",
'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory",
'FIRMWARE.HTTPSERVER': "firmware.HttpServer",
'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType",
'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare",
'FIRMWARE.NFSSERVER': "firmware.NfsServer",
'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact",
'FORECAST.MODEL': "forecast.Model",
'HCL.CONSTRAINT': "hcl.Constraint",
'HCL.FIRMWARE': "hcl.Firmware",
'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile",
'HCL.PRODUCT': "hcl.Product",
'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary",
'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint",
'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings",
'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo",
'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference",
'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack",
'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry",
'HYPERFLEX.FILEPATH': "hyperflex.FilePath",
'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo",
'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt",
'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt",
'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt",
'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt",
'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt",
'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt",
'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt",
'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt",
'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt",
'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt",
'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt",
'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange",
'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone",
'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange",
'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo",
'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint",
'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk",
'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan",
'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan",
'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap",
'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo",
'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule",
'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo",
'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore",
'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair",
'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule",
'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus",
'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus",
'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo",
'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry",
'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles",
'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief",
'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint",
'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus",
'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo",
'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo",
'HYPERFLEX.SUMMARY': "hyperflex.Summary",
'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk",
'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile",
'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine",
'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo",
'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage",
'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange",
'I18N.MESSAGE': "i18n.Message",
'I18N.MESSAGEPARAM': "i18n.MessageParam",
'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo",
'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo",
'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps",
'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions",
'IAM.CLIENTMETA': "iam.ClientMeta",
'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties",
'IAM.FEATUREDEFINITION': "iam.FeatureDefinition",
'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles",
'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties",
'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters",
'IAM.PERMISSIONREFERENCE': "iam.PermissionReference",
'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles",
'IAM.RULE': "iam.Rule",
'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection",
'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes",
'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage",
'INFRA.HARDWAREINFO': "infra.HardwareInfo",
'INFRA.METADATA': "infra.MetaData",
'INVENTORY.INVENTORYMO': "inventory.InventoryMo",
'INVENTORY.UEMINFO': "inventory.UemInfo",
'IPPOOL.IPV4BLOCK': "ippool.IpV4Block",
'IPPOOL.IPV4CONFIG': "ippool.IpV4Config",
'IPPOOL.IPV6BLOCK': "ippool.IpV6Block",
'IPPOOL.IPV6CONFIG': "ippool.IpV6Config",
'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock",
'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo",
'KUBERNETES.ADDON': "kubernetes.Addon",
'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration",
'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo",
'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig",
'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration",
'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig",
'KUBERNETES.CONFIGURATION': "kubernetes.Configuration",
'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus",
'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus",
'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon",
'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig",
'KUBERNETES.ETHERNET': "kubernetes.Ethernet",
'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher",
'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig",
'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus",
'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails",
'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config",
'KUBERNETES.KEYVALUE': "kubernetes.KeyValue",
'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer",
'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec",
'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress",
'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel",
'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint",
'KUBERNETES.NODEINFO': "kubernetes.NodeInfo",
'KUBERNETES.NODESPEC': "kubernetes.NodeSpec",
'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus",
'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta",
'KUBERNETES.OVSBOND': "kubernetes.OvsBond",
'KUBERNETES.PODSTATUS': "kubernetes.PodStatus",
'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig",
'KUBERNETES.ROUTE': "kubernetes.Route",
'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus",
'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus",
'KUBERNETES.TAINT': "kubernetes.Taint",
'MACPOOL.BLOCK': "macpool.Block",
'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal",
'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity",
'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace",
'META.ACCESSPRIVILEGE': "meta.AccessPrivilege",
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
'META.IDENTITYDEFINITION': "meta.IdentityDefinition",
'META.PROPDEFINITION': "meta.PropDefinition",
'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition",
'MO.MOREF': "mo.MoRef",
'MO.TAG': "mo.Tag",
'MO.VERSIONCONTEXT': "mo.VersionContext",
'NIAAPI.DETAIL': "niaapi.Detail",
'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail",
'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo",
'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex",
'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform",
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus",
'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo",
'NIATELEMETRY.INTERFACE': "niatelemetry.Interface",
'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement",
'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail",
'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink",
'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters",
'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni",
'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn",
'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp",
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus",
'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition",
'NOTIFICATION.SENDEMAIL': "notification.SendEmail",
'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer",
'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage",
'ONPREM.SCHEDULE': "onprem.Schedule",
'ONPREM.UPGRADENOTE': "onprem.UpgradeNote",
'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase",
'OPRS.KVPAIR': "oprs.Kvpair",
'OS.ANSWERS': "os.Answers",
'OS.GLOBALCONFIG': "os.GlobalConfig",
'OS.IPV4CONFIGURATION': "os.Ipv4Configuration",
'OS.IPV6CONFIGURATION': "os.Ipv6Configuration",
'OS.PHYSICALDISK': "os.PhysicalDisk",
'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse",
'OS.PLACEHOLDER': "os.PlaceHolder",
'OS.SERVERCONFIG': "os.ServerConfig",
'OS.VALIDATIONINFORMATION': "os.ValidationInformation",
'OS.VIRTUALDRIVE': "os.VirtualDrive",
'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse",
'OS.VMWAREPARAMETERS': "os.VmwareParameters",
'OS.WINDOWSPARAMETERS': "os.WindowsParameters",
'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName",
'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec",
'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec",
'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm",
'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName",
'POLICY.ACTIONPARAM': "policy.ActionParam",
'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier",
'POLICY.CONFIGCHANGE': "policy.ConfigChange",
'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext",
'POLICY.CONFIGCONTEXT': "policy.ConfigContext",
'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext",
'POLICY.QUALIFIER': "policy.Qualifier",
'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo",
'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule",
'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector",
'RESOURCE.SELECTOR': "resource.Selector",
'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources",
'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder",
'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters",
'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters",
'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics",
'SDCARD.DRIVERS': "sdcard.Drivers",
'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility",
'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem",
'SDCARD.PARTITION': "sdcard.Partition",
'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility",
'SDCARD.USERPARTITION': "sdcard.UserPartition",
'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType",
'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType",
'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger",
'SNMP.TRAP': "snmp.Trap",
'SNMP.USER': "snmp.User",
'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload",
'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer",
'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels",
'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer",
'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult",
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer",
'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup",
'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization",
'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity",
'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator",
'STORAGE.INITIATOR': "storage.Initiator",
'STORAGE.KEYSETTING': "storage.KeySetting",
'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting",
'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig",
'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup",
'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag",
'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan",
'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule",
'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability",
'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage",
'STORAGE.NETAPPPORT': "storage.NetAppPort",
'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency",
'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization",
'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization",
'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization",
'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization",
'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout",
'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization",
'STORAGE.R0DRIVE': "storage.R0Drive",
'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting",
'STORAGE.SPANDRIVES': "storage.SpanDrives",
'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus",
'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization",
'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration",
'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy",
'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization",
'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient",
'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient",
'TAM.ACTION': "tam.Action",
'TAM.APIDATASOURCE': "tam.ApiDataSource",
'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails",
'TAM.EOLSEVERITY': "tam.EolSeverity",
'TAM.IDENTIFIERS': "tam.Identifiers",
'TAM.MILESTONE': "tam.Milestone",
'TAM.PSIRTSEVERITY': "tam.PsirtSeverity",
'TAM.QUERYENTRY': "tam.QueryEntry",
'TAM.S3DATASOURCE': "tam.S3DataSource",
'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails",
'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource",
'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam",
'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam",
'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam",
'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage",
'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource",
'TERRAFORM.RUNSTATE': "terraform.Runstate",
'UCSD.CONNECTORPACK': "ucsd.ConnectorPack",
'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters",
'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage",
'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock",
'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo",
'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration",
'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration",
'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration",
'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration",
'VIRTUALIZATION.BONDSTATE': "virtualization.BondState",
'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig",
'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity",
'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation",
'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo",
'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus",
'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec",
'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration",
'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec",
'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration",
'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration",
'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration",
'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration",
'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo",
'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration",
'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo",
'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation",
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface",
'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort",
'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo",
'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity",
'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig",
'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig",
'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk",
'VIRTUALIZATION.VMDISK': "virtualization.VmDisk",
'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk",
'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface",
'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo",
'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption",
'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo",
'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover",
'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange",
'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo",
'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo",
'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo",
'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo",
'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo",
'VMEDIA.MAPPING': "vmedia.Mapping",
'VNIC.ARFSSETTINGS': "vnic.ArfsSettings",
'VNIC.CDN': "vnic.Cdn",
'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings",
'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings",
'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings",
'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings",
'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings",
'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings",
'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings",
'VNIC.FLOGISETTINGS': "vnic.FlogiSettings",
'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile",
'VNIC.LUN': "vnic.Lun",
'VNIC.NVGRESETTINGS': "vnic.NvgreSettings",
'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings",
'VNIC.PLOGISETTINGS': "vnic.PlogiSettings",
'VNIC.ROCESETTINGS': "vnic.RoceSettings",
'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings",
'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings",
'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings",
'VNIC.USNICSETTINGS': "vnic.UsnicSettings",
'VNIC.VIFSTATUS': "vnic.VifStatus",
'VNIC.VLANSETTINGS': "vnic.VlanSettings",
'VNIC.VMQSETTINGS': "vnic.VmqSettings",
'VNIC.VSANSETTINGS': "vnic.VsanSettings",
'VNIC.VXLANSETTINGS': "vnic.VxlanSettings",
'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition",
'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType",
'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles",
'WORKFLOW.CLICOMMAND': "workflow.CliCommand",
'WORKFLOW.COMMENTS': "workflow.Comments",
'WORKFLOW.CONSTRAINTS': "workflow.Constraints",
'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem",
'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty",
'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType",
'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties",
'WORKFLOW.DECISIONCASE': "workflow.DecisionCase",
'WORKFLOW.DECISIONTASK': "workflow.DecisionTask",
'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue",
'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta",
'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList",
'WORKFLOW.ENUMENTRY': "workflow.EnumEntry",
'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt",
'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask",
'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp",
'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations",
'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp",
'WORKFLOW.FILETRANSFER': "workflow.FileTransfer",
'WORKFLOW.FORKTASK': "workflow.ForkTask",
'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext",
'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties",
'WORKFLOW.JOINTASK': "workflow.JoinTask",
'WORKFLOW.LOOPTASK': "workflow.LoopTask",
'WORKFLOW.MESSAGE': "workflow.Message",
'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem",
'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType",
'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty",
'WORKFLOW.PARAMETERSET': "workflow.ParameterSet",
'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem",
'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty",
'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType",
'WORKFLOW.PROPERTIES': "workflow.Properties",
'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler",
'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask",
'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask",
'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty",
'WORKFLOW.SSHCMD': "workflow.SshCmd",
'WORKFLOW.SSHCONFIG': "workflow.SshConfig",
'WORKFLOW.SSHSESSION': "workflow.SshSession",
'WORKFLOW.STARTTASK': "workflow.StartTask",
'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask",
'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask",
'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext",
'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType",
'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty",
'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints",
'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo",
'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter",
'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError",
'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation",
'WORKFLOW.WAITTASK': "workflow.WaitTask",
'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt",
'WORKFLOW.WEBAPI': "workflow.WebApi",
'WORKFLOW.WORKERTASK': "workflow.WorkerTask",
'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx",
'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties",
'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties",
'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties",
'WORKFLOW.XMLAPI': "workflow.XmlApi",
'X509.CERTIFICATE': "x509.Certificate",
},
('object_type',): {
'ACCESS.ADDRESSTYPE': "access.AddressType",
'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig",
'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings",
'ADAPTER.ETHSETTINGS': "adapter.EthSettings",
'ADAPTER.FCSETTINGS': "adapter.FcSettings",
'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings",
'APPLIANCE.APISTATUS': "appliance.ApiStatus",
'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase",
'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair",
'APPLIANCE.STATUSCHECK': "appliance.StatusCheck",
'ASSET.ADDRESSINFORMATION': "asset.AddressInformation",
'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential",
'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential",
'ASSET.CLOUDCONNECTION': "asset.CloudConnection",
'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage",
'ASSET.CONTRACTINFORMATION': "asset.ContractInformation",
'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation",
'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo",
'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo",
'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation",
'ASSET.DEVICEINFORMATION': "asset.DeviceInformation",
'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics",
'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction",
'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate",
'ASSET.HTTPCONNECTION': "asset.HttpConnection",
'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection",
'ASSET.METERINGTYPE': "asset.MeteringType",
'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential",
'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential",
'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential",
'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential",
'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions",
'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService",
'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature",
'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential",
'ASSET.PRODUCTINFORMATION': "asset.ProductInformation",
'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential",
'ASSET.SSHCONNECTION': "asset.SshConnection",
'ASSET.SUDIINFO': "asset.SudiInfo",
'ASSET.TARGETKEY': "asset.TargetKey",
'ASSET.TARGETSIGNATURE': "asset.TargetSignature",
'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails",
'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions",
'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential",
'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions",
'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService",
'ASSET.VMHOST': "asset.VmHost",
'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions",
'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions",
'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions",
'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions",
'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService",
'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions",
'BOOT.BOOTLOADER': "boot.Bootloader",
'BOOT.ISCSI': "boot.Iscsi",
'BOOT.LOCALCDD': "boot.LocalCdd",
'BOOT.LOCALDISK': "boot.LocalDisk",
'BOOT.NVME': "boot.Nvme",
'BOOT.PCHSTORAGE': "boot.PchStorage",
'BOOT.PXE': "boot.Pxe",
'BOOT.SAN': "boot.San",
'BOOT.SDCARD': "boot.SdCard",
'BOOT.UEFISHELL': "boot.UefiShell",
'BOOT.USB': "boot.Usb",
'BOOT.VIRTUALMEDIA': "boot.VirtualMedia",
'BULK.HTTPHEADER': "bulk.HttpHeader",
'BULK.RESTRESULT': "bulk.RestResult",
'BULK.RESTSUBREQUEST': "bulk.RestSubRequest",
'CAPABILITY.PORTRANGE': "capability.PortRange",
'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits",
'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits",
'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits",
'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability",
'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc",
'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone",
'CLOUD.BILLINGUNIT': "cloud.BillingUnit",
'CLOUD.CLOUDREGION': "cloud.CloudRegion",
'CLOUD.CLOUDTAG': "cloud.CloudTag",
'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes",
'CLOUD.IMAGEREFERENCE': "cloud.ImageReference",
'CLOUD.INSTANCETYPE': "cloud.InstanceType",
'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig",
'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress",
'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment",
'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment",
'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule",
'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables",
'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment",
'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment",
'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo",
'CLOUD.VOLUMETYPE': "cloud.VolumeType",
'CMRF.CMRF': "cmrf.CmRf",
'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock",
'COMM.IPV4INTERFACE': "comm.IpV4Interface",
'COMM.IPV6INTERFACE': "comm.IpV6Interface",
'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary",
'COMPUTE.IPADDRESS': "compute.IpAddress",
'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule",
'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation",
'COMPUTE.SERVERCONFIG': "compute.ServerConfig",
'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus",
'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation",
'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive",
'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation",
'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive",
'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation",
'COND.ALARMSUMMARY': "cond.AlarmSummary",
'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage",
'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage",
'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream",
'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt",
'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage",
'CONNECTOR.FILECHECKSUM': "connector.FileChecksum",
'CONNECTOR.FILEMESSAGE': "connector.FileMessage",
'CONNECTOR.HTTPREQUEST': "connector.HttpRequest",
'CONNECTOR.SSHCONFIG': "connector.SshConfig",
'CONNECTOR.SSHMESSAGE': "connector.SshMessage",
'CONNECTOR.STARTSTREAM': "connector.StartStream",
'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice",
'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge",
'CONNECTOR.STREAMINPUT': "connector.StreamInput",
'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive",
'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage",
'CONNECTOR.URL': "connector.Url",
'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest",
'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage",
'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate",
'CONTENT.COMPLEXTYPE': "content.ComplexType",
'CONTENT.PARAMETER': "content.Parameter",
'CONTENT.TEXTPARAMETER': "content.TextParameter",
'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary",
'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary",
'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary",
'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty",
'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity",
'FABRIC.LLDPSETTINGS': "fabric.LldpSettings",
'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings",
'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier",
'FABRIC.QOSCLASS': "fabric.QosClass",
'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings",
'FABRIC.UDLDSETTINGS': "fabric.UdldSettings",
'FABRIC.VLANSETTINGS': "fabric.VlanSettings",
'FCPOOL.BLOCK': "fcpool.Block",
'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData",
'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact",
'FIRMWARE.CIFSSERVER': "firmware.CifsServer",
'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact",
'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta",
'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload",
'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact",
'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory",
'FIRMWARE.HTTPSERVER': "firmware.HttpServer",
'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType",
'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare",
'FIRMWARE.NFSSERVER': "firmware.NfsServer",
'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact",
'FORECAST.MODEL': "forecast.Model",
'HCL.CONSTRAINT': "hcl.Constraint",
'HCL.FIRMWARE': "hcl.Firmware",
'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile",
'HCL.PRODUCT': "hcl.Product",
'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary",
'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint",
'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings",
'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo",
'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference",
'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack",
'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry",
'HYPERFLEX.FILEPATH': "hyperflex.FilePath",
'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo",
'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt",
'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt",
'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt",
'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt",
'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt",
'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt",
'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt",
'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt",
'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt",
'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt",
'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt",
'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange",
'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone",
'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange",
'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo",
'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint",
'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk",
'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan",
'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan",
'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap",
'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo",
'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule",
'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo",
'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore",
'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair",
'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule",
'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus",
'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus",
'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo",
'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry",
'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles",
'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief",
'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint",
'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus",
'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo",
'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo",
'HYPERFLEX.SUMMARY': "hyperflex.Summary",
'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk",
'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile",
'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine",
'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo",
'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage",
'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange",
'I18N.MESSAGE': "i18n.Message",
'I18N.MESSAGEPARAM': "i18n.MessageParam",
'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo",
'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo",
'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps",
'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions",
'IAM.CLIENTMETA': "iam.ClientMeta",
'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties",
'IAM.FEATUREDEFINITION': "iam.FeatureDefinition",
'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles",
'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties",
'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters",
'IAM.PERMISSIONREFERENCE': "iam.PermissionReference",
'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles",
'IAM.RULE': "iam.Rule",
'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection",
'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes",
'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage",
'INFRA.HARDWAREINFO': "infra.HardwareInfo",
'INFRA.METADATA': "infra.MetaData",
'INVENTORY.INVENTORYMO': "inventory.InventoryMo",
'INVENTORY.UEMINFO': "inventory.UemInfo",
'IPPOOL.IPV4BLOCK': "ippool.IpV4Block",
'IPPOOL.IPV4CONFIG': "ippool.IpV4Config",
'IPPOOL.IPV6BLOCK': "ippool.IpV6Block",
'IPPOOL.IPV6CONFIG': "ippool.IpV6Config",
'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock",
'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo",
'KUBERNETES.ADDON': "kubernetes.Addon",
'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration",
'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo",
'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig",
'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration",
'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig",
'KUBERNETES.CONFIGURATION': "kubernetes.Configuration",
'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus",
'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus",
'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon",
'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig",
'KUBERNETES.ETHERNET': "kubernetes.Ethernet",
'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher",
'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig",
'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus",
'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails",
'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config",
'KUBERNETES.KEYVALUE': "kubernetes.KeyValue",
'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer",
'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec",
'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress",
'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel",
'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint",
'KUBERNETES.NODEINFO': "kubernetes.NodeInfo",
'KUBERNETES.NODESPEC': "kubernetes.NodeSpec",
'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus",
'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta",
'KUBERNETES.OVSBOND': "kubernetes.OvsBond",
'KUBERNETES.PODSTATUS': "kubernetes.PodStatus",
'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig",
'KUBERNETES.ROUTE': "kubernetes.Route",
'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus",
'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus",
'KUBERNETES.TAINT': "kubernetes.Taint",
'MACPOOL.BLOCK': "macpool.Block",
'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal",
'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity",
'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace",
'META.ACCESSPRIVILEGE': "meta.AccessPrivilege",
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
'META.IDENTITYDEFINITION': "meta.IdentityDefinition",
'META.PROPDEFINITION': "meta.PropDefinition",
'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition",
'MO.MOREF': "mo.MoRef",
'MO.TAG': "mo.Tag",
'MO.VERSIONCONTEXT': "mo.VersionContext",
'NIAAPI.DETAIL': "niaapi.Detail",
'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail",
'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo",
'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex",
'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform",
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus",
'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo",
'NIATELEMETRY.INTERFACE': "niatelemetry.Interface",
'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement",
'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail",
'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink",
'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters",
'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni",
'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn",
'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp",
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus",
'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition",
'NOTIFICATION.SENDEMAIL': "notification.SendEmail",
'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer",
'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage",
'ONPREM.SCHEDULE': "onprem.Schedule",
'ONPREM.UPGRADENOTE': "onprem.UpgradeNote",
'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase",
'OPRS.KVPAIR': "oprs.Kvpair",
'OS.ANSWERS': "os.Answers",
'OS.GLOBALCONFIG': "os.GlobalConfig",
'OS.IPV4CONFIGURATION': "os.Ipv4Configuration",
'OS.IPV6CONFIGURATION': "os.Ipv6Configuration",
'OS.PHYSICALDISK': "os.PhysicalDisk",
'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse",
'OS.PLACEHOLDER': "os.PlaceHolder",
'OS.SERVERCONFIG': "os.ServerConfig",
'OS.VALIDATIONINFORMATION': "os.ValidationInformation",
'OS.VIRTUALDRIVE': "os.VirtualDrive",
'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse",
'OS.VMWAREPARAMETERS': "os.VmwareParameters",
'OS.WINDOWSPARAMETERS': "os.WindowsParameters",
'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName",
'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec",
'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec",
'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm",
'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName",
'POLICY.ACTIONPARAM': "policy.ActionParam",
'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier",
'POLICY.CONFIGCHANGE': "policy.ConfigChange",
'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext",
'POLICY.CONFIGCONTEXT': "policy.ConfigContext",
'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext",
'POLICY.QUALIFIER': "policy.Qualifier",
'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo",
'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule",
'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector",
'RESOURCE.SELECTOR': "resource.Selector",
'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources",
'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder",
'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters",
'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters",
'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics",
'SDCARD.DRIVERS': "sdcard.Drivers",
'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility",
'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem",
'SDCARD.PARTITION': "sdcard.Partition",
'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility",
'SDCARD.USERPARTITION': "sdcard.UserPartition",
'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType",
'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType",
'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger",
'SNMP.TRAP': "snmp.Trap",
'SNMP.USER': "snmp.User",
'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload",
'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer",
'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels",
'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer",
'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult",
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer",
'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup",
'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization",
'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity",
'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator",
'STORAGE.INITIATOR': "storage.Initiator",
'STORAGE.KEYSETTING': "storage.KeySetting",
'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting",
'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig",
'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup",
'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag",
'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan",
'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule",
'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability",
'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage",
'STORAGE.NETAPPPORT': "storage.NetAppPort",
'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency",
'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization",
'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization",
'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization",
'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization",
'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout",
'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization",
'STORAGE.R0DRIVE': "storage.R0Drive",
'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting",
'STORAGE.SPANDRIVES': "storage.SpanDrives",
'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus",
'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization",
'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration",
'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy",
'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization",
'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient",
'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient",
'TAM.ACTION': "tam.Action",
'TAM.APIDATASOURCE': "tam.ApiDataSource",
'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails",
'TAM.EOLSEVERITY': "tam.EolSeverity",
'TAM.IDENTIFIERS': "tam.Identifiers",
'TAM.MILESTONE': "tam.Milestone",
'TAM.PSIRTSEVERITY': "tam.PsirtSeverity",
'TAM.QUERYENTRY': "tam.QueryEntry",
'TAM.S3DATASOURCE': "tam.S3DataSource",
'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails",
'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource",
'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam",
'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam",
'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam",
'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage",
'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource",
'TERRAFORM.RUNSTATE': "terraform.Runstate",
'UCSD.CONNECTORPACK': "ucsd.ConnectorPack",
'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters",
'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage",
'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock",
'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo",
'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration",
'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration",
'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration",
'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration",
'VIRTUALIZATION.BONDSTATE': "virtualization.BondState",
'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig",
'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity",
'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation",
'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo",
'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus",
'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec",
'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration",
'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec",
'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration",
'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration",
'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration",
'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration",
'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo",
'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration",
'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo",
'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation",
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface",
'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort",
'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo",
'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity",
'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig",
'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig",
'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk",
'VIRTUALIZATION.VMDISK': "virtualization.VmDisk",
'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk",
'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface",
'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo",
'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption",
'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo",
'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover",
'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange",
'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo",
'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo",
'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo",
'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo",
'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo",
'VMEDIA.MAPPING': "vmedia.Mapping",
'VNIC.ARFSSETTINGS': "vnic.ArfsSettings",
'VNIC.CDN': "vnic.Cdn",
'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings",
'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings",
'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings",
'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings",
'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings",
'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings",
'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings",
'VNIC.FLOGISETTINGS': "vnic.FlogiSettings",
'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile",
'VNIC.LUN': "vnic.Lun",
'VNIC.NVGRESETTINGS': "vnic.NvgreSettings",
'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings",
'VNIC.PLOGISETTINGS': "vnic.PlogiSettings",
'VNIC.ROCESETTINGS': "vnic.RoceSettings",
'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings",
'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings",
'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings",
'VNIC.USNICSETTINGS': "vnic.UsnicSettings",
'VNIC.VIFSTATUS': "vnic.VifStatus",
'VNIC.VLANSETTINGS': "vnic.VlanSettings",
'VNIC.VMQSETTINGS': "vnic.VmqSettings",
'VNIC.VSANSETTINGS': "vnic.VsanSettings",
'VNIC.VXLANSETTINGS': "vnic.VxlanSettings",
'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition",
'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType",
'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles",
'WORKFLOW.CLICOMMAND': "workflow.CliCommand",
'WORKFLOW.COMMENTS': "workflow.Comments",
'WORKFLOW.CONSTRAINTS': "workflow.Constraints",
'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem",
'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty",
'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType",
'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties",
'WORKFLOW.DECISIONCASE': "workflow.DecisionCase",
'WORKFLOW.DECISIONTASK': "workflow.DecisionTask",
'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue",
'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta",
'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList",
'WORKFLOW.ENUMENTRY': "workflow.EnumEntry",
'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt",
'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask",
'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp",
'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations",
'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp",
'WORKFLOW.FILETRANSFER': "workflow.FileTransfer",
'WORKFLOW.FORKTASK': "workflow.ForkTask",
'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext",
'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties",
'WORKFLOW.JOINTASK': "workflow.JoinTask",
'WORKFLOW.LOOPTASK': "workflow.LoopTask",
'WORKFLOW.MESSAGE': "workflow.Message",
'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem",
'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType",
'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty",
'WORKFLOW.PARAMETERSET': "workflow.ParameterSet",
'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem",
'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty",
'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType",
'WORKFLOW.PROPERTIES': "workflow.Properties",
'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler",
'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask",
'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask",
'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty",
'WORKFLOW.SSHCMD': "workflow.SshCmd",
'WORKFLOW.SSHCONFIG': "workflow.SshConfig",
'WORKFLOW.SSHSESSION': "workflow.SshSession",
'WORKFLOW.STARTTASK': "workflow.StartTask",
'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask",
'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask",
'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext",
'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType",
'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty",
'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints",
'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo",
'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter",
'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError",
'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation",
'WORKFLOW.WAITTASK': "workflow.WaitTask",
'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt",
'WORKFLOW.WEBAPI': "workflow.WebApi",
'WORKFLOW.WORKERTASK': "workflow.WorkerTask",
'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx",
'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties",
'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties",
'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties",
'WORKFLOW.XMLAPI': "workflow.XmlApi",
'X509.CERTIFICATE': "x509.Certificate",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'virtualization.EsxiCloneCustomSpec': VirtualizationEsxiCloneCustomSpec,
'virtualization.EsxiOvaCustomSpec': VirtualizationEsxiOvaCustomSpec,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501
"""VirtualizationBaseCustomSpec - a model defined in OpenAPI
Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseComplexType,
],
'oneOf': [
],
}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding=utf8
"""\
Our Standards
Jill-Jênn Vie et Christoph Dürr - 2020
"""
from sys import stdin
def readint():
"""
function to read an integer from stdin
"""
return int(stdin.readline())
def readstr():
"""
function to read a string from stdin
"""
return stdin.readline().strip()
def readarray(typ):
"""
function to read an array
"""
return list(map(typ, stdin.readline().split()))
# pylint: disable=redefined-outer-name
def readmatrix(n):
"""
function to read a matrix
"""
M = []
for _ in range(n):
row = readarray(int)
assert len(row) == n
M.append(row)
return M
|
nilq/baby-python
|
python
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from ECAgent.Core import Model
# Can be used to customize CSS of Visualizer
external_stylesheets = ['https://rawgit.com/BrandonGower-Winter/ABMECS/master/Assets/VisualizerCustom.css',
'https://rawgit.com/BrandonGower-Winter/ABMECS/master/Assets/VisualizerBase.css']
class VisualInterface:
"""
Ths is the base class for Visual Interfaces.
VisualInterface's utilize the dash package to create a WebApp to allow individuals to view the results of their
model once a run has been completed or in real-time.
There are a few things to note about the VisualInterface class:
* By calling the VisualInterface.__init__() method, your WebApp will have features setup for you: Namely, play,
stop, restart and step. It'll also include a banner with your System's name as a title on it.
* A frameFreq of 0.0 means that your system is static and will only ever be constructed once.
If you want a dynamic WebApp, you must set the frameFreq to some non-zero positive number. If your frameFreq is 0.0,
the play, stop, restart and step buttons will not be added to your WebApp.
* The server/WebApp will start once you call the VisualInterface.app.run_server().
* The frameFreq property determines how frequently (in milliseconds) the SystemManager.executeSystems() method is
called and how often your your graphs will update.
"""
def __init__(self, name, model: Model, frameFreq: float = 0.0):
self.name = name
self.model = model
self.frameFreq = frameFreq
self.running = False # Is used to determine whether a dynamic model is running or not.
# Create app
self.app = dash.Dash(
self.name, meta_tags=[{"name": "viewport", "content": "width=device-width"}],
external_stylesheets=external_stylesheets
)
# Create parameter lists
self.displays = []
self.parameters = []
self.createBaseLayout()
def isStatic(self) -> bool:
return self.frameFreq == 0.0
def execute(self):
self.render()
def render(self):
pass
def createBaseLayout(self):
"""Creates the base layout"""
# Create banner
banner = html.Div(
className="app-banner row",
children=[
html.H2(className="h2-title", children=self.name),
html.H2(className="h2-title-mobile", children=self.name),
],
)
# Add parameter header
self.addParameter(createLabel('parameter-heading', 'Parameters:'))
# If framerate > 0, create the play, stop, and restart buttons and Timestep label
if not self.isStatic():
# Add Play/Restart/Step Buttons
banner.children.append(
html.Div(
className='div-play-buttons',
id='dynamic-button',
children=[
html.Button("Play", id='play-stop-button', n_clicks=0),
html.Button('Restart', id='restart-button', n_clicks=0),
html.Button('Step', id='step-button', n_clicks=0),
dcc.Interval(
id='interval-component',
interval=self.frameFreq,
n_intervals=0
)
]
)
)
# Add Timestep label
self.parameters.append(createLabel('timestep-label', 'Timestep: 0'))
# Apply Play/Stop Callback
self.app.callback(
dash.dependencies.Output('play-stop-button', 'children'),
[dash.dependencies.Input('play-stop-button', 'n_clicks')]
)(self.play_button_callback)
# Apply executeSystems() on interval callback and Step button callback
self.app.callback(
dash.dependencies.Output('timestep-label', 'children'),
[dash.dependencies.Input('interval-component', 'n_intervals'),
dash.dependencies.Input('step-button', 'n_clicks')]
)(self.execute_system_on_play_callback)
self.app.layout = html.Div(
children=[
# Error Message
html.Div(id="error-message"),
# Top Banner
banner,
# Body of the App
html.Div(
className="row app-body",
children=[
# User Controls
html.Div(
className="four columns card",
children=html.Div(
className="bg-white user-control",
children=self.parameters)
),
# Graph
html.Div(
className="eight columns card-left",
children=self.displays,
style={'margin-left': 0}
),
dcc.Store(id="error", storage_type="memory"),
],
),
]
)
def addDisplay(self, content, add_break=True):
self.displays.append(content)
if add_break:
self.displays.append(html.Br())
def addParameter(self, content):
self.parameters.append(content)
# #################################### Class Callbacks ###########################################
def play_button_callback(self, n_clicks):
if n_clicks % 2 == 0:
self.running = False
return 'Play'
else:
self.running = True
return 'Stop'
def execute_system_on_play_callback(self, n_intervals, n_clicks):
context = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
if context == 'step-button':
if not self.running:
self.model.systemManager.executeSystems()
elif self.running:
self.model.systemManager.executeSystems()
return "Timestep: {}".format(self.model.systemManager.timestep)
# ############################## Graph and Parameter Functionality ##############################
def createScatterPlot(title, data: [[[float], [float], dict]], layout_kwargs: dict = {}):
"""Creates a Scatter plot Figure. This function supports multiple traces supplied to the 'data' parameter
Data should be supplied in the following format:
[[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]]
The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of
the properties specified..
"""
traces = []
for data_packet in data:
scatter = go.Scatter(x=data_packet[0], y=data_packet[1])
traces.append(scatter)
if len(data_packet) > 2:
scatter.update(data_packet[2])
return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs))
def createScatterGLPlot(title, data: [[[float], [float], dict]], layout_kwargs: dict = {}):
"""Creates a Scatter plot Figure that will be rendered using WebGL.
This function supports multiple traces supplied to the 'data' parameter Data should be supplied in the
following format:
[[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]]
The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of
the properties specified..
"""
traces = []
for data_packet in data:
scatter = go.Scattergl(x=data_packet[0], y=data_packet[1])
traces.append(scatter)
if len(data_packet) > 2:
scatter.update(data_packet[2])
return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs))
def createBarGraph(title: str, data: [[[float], [float], dict]], layout_kwargs: dict = {}):
"""Creates a Bar Graph Figure. This function supports multiple traces supplied to the 'data' parameter
Data should be supplied in the following format:
[[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]]
The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of
the properties specified..
"""
traces = []
for data_packet in data:
bar = go.Bar(x=data_packet[0], y=data_packet[1])
traces.append(bar)
if len(data_packet) > 2:
bar.update(data_packet[2])
return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs))
def createHeatMap(title: str, data: [[float]], heatmap_kwargs: dict = {}, layout_kwargs: dict = {}):
"""Creates a HeatMap Figure object using Plotly graph objects. The data object determines the dimensions of the
heatmap. The len(data) will be the height. The len(data[i]) will be the width of the heatmap. The Heatmap is
constructed in a bottom-up and left-to-right manner.
Discrete X and Y categories can be specified, this is done by supplying xData and yData with the X and Y category
name respectively. The len(xData) must be equal to the width of your Heatmap, while len(yData) must be equal to the
height of your Heatmap.
A custom color scale can be supplied, ensure that it follows the correct format and that the threshold values are
normalized and that the color scales are in rgb like so 'rgb(r_val, g_val, b_val)'"""
return go.Figure(data=go.Heatmap(
z=data,
**heatmap_kwargs
), layout=go.Layout(title=title, **layout_kwargs))
def createHeatMapGL(title: str, data: [[float]], heatmap_kwargs: dict = {}, layout_kwargs: dict = {}):
"""Creates a HeatMap Figure object using Plotly graph objects that will be rendered by WebGL.
The data object determines the dimensions of the heatmap. The len(data) will be the height.
The len(data[i]) will be the width of the heatmap.
The Heatmap is constructed in a bottom-up and left-to-right manner.
Discrete X and Y categories can be specified, this is done by supplying xData and yData with the X and Y category
name respectively. The len(xData) must be equal to the width of your Heatmap, while len(yData) must be equal to the
height of your Heatmap.
A custom color scale can be supplied, ensure that it follows the correct format and that the threshold values are
normalized and that the color scales are in rgb like so 'rgb(r_val, g_val, b_val)'"""
return go.Figure(data=go.Heatmapgl(
z=data,
**heatmap_kwargs
), layout=go.Layout(title=title, **layout_kwargs))
def createContourMap(title: str, data: [[float]], contour_kwargs: dict = {}, layout_kwargs: dict = {}):
"""Creates a Contour Figure object using Plotly graph objects. The data object determines the dimensions of the
Contour plot. The len(data) will be the height. The len(data[i]) will be the width of the contour plot.
The contour plot is constructed in a bottom-up and left-to-right manner.
The contour plot can be customized using the contour_kwargs dict. The dict will be supplied to the contour plot
graph object when it is created. See the plotly api for a list of customizable properties. This can be similarly be
applied to layout_kwargs which can change the layout of contour plot."""
return go.Figure(data=go.Contour(
z=data,
**contour_kwargs
), layout=go.Layout(title=title, **layout_kwargs))
def createTable(title: str, headers: [str], cells: [[]], header_kwargs: dict = {}, cell_kwargs: dict = {},
layout_kwargs: dict = {}):
"""Creates a Table figure using Plotly graph objects. Table headers and cells need to be supplied separately.
The data format for the headers and cells are as follows:
Headers: [hdr1, hdr2,...,hdrN]
Cells: [column1_data, column2_data,..., columnN_data].
The Table headers and cells are customized separately using the header_kwargs and cell_kwargs parameters. The
layout of the Table can also be customized using the layout_kwargs."""
return go.Figure(data=go.Table(
header=dict(values=headers, **header_kwargs),
cells=dict(values=cells, **cell_kwargs)
), layout=go.Layout(title=title, **layout_kwargs))
def createPieChart(title: str, labels: [str], values: [float], pie_kwargs: dict = {}, layout_kwargs: dict = {}):
""" Creates a Pie Chart Figure using Plotly graph objects. Chart labels and values need to be supplied separately.
The data format for the labels and values are as follows:
Labels: [lbl1, lbl2,..., lblN]
Values: [val1, val2,..., valN]
The Pie chart can be customized using the pie_kwargs parameter. The layout of the Pie chart can be customized using
the layout_kwargs parameter."""
return go.Figure(data=go.Pie(labels=labels, values=values, **pie_kwargs),
layout=go.Layout(title=title, **layout_kwargs))
def createGraph(graphID: str, figure: go.Figure, classname: str = 'bg-white'):
return html.Div(
className=classname,
children=[
dcc.Graph(id=graphID, figure=figure)
],
style={'height': figure.layout.height}
)
def createLiveGraph(graphID: str, figure: go.Figure, vs: VisualInterface, callback, classname: str = 'bg-white'):
graph = createGraph(graphID, figure, classname)
def update_live_graph_callback(n_intervals, n_clicks, figure):
context = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
if (context == 'step-button' and not vs.running) or vs.running:
return callback(figure)
else:
return figure
# Add Callback
vs.app.callback(
dash.dependencies.Output(graphID, 'figure'),
[dash.dependencies.Input('interval-component', 'n_intervals'),
dash.dependencies.Input('step-button', 'n_clicks'),
dash.dependencies.Input(graphID, 'figure')]
)(update_live_graph_callback)
return graph
def createLabel(label_id, content):
return html.Div(className="padding-top-bot", children=[html.H6(content, id=label_id)])
def createLiveLabel(label_id, initial_content, vs: VisualInterface, callback):
label = createLabel(label_id, initial_content)
def update_live_label_callback(n_intervals, n_clicks, children):
context = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
if (context == 'step-button' and not vs.running) or vs.running:
return callback(children)
else:
return children
# Add Callback
vs.app.callback(
dash.dependencies.Output(label_id, 'children'),
[dash.dependencies.Input('interval-component', 'n_intervals'),
dash.dependencies.Input('step-button', 'n_clicks'),
dash.dependencies.Input(label_id, 'children')]
)(update_live_label_callback)
return label
def createSlider(slider_id: str, slider_name: str, vs: VisualInterface, set_val, min_val: float = 0.0,
max_val: float = 1.0, step: float = 0.01):
"""This function will add a slider to the parameter window of the visual interface. It will also automatically add
a callback function that will supply your custom function 'set_val' with the value of the slider"""
# Add html
slider = html.Div(
className="padding-top-bot",
children=[
html.H6('{}: [{}]'.format(slider_name, max_val), id=slider_id + '-title'),
dcc.Slider(
id=slider_id,
min=min_val,
max=max_val,
value=max_val,
step=step
)
]
)
# Add callback
def set_slider_val(value):
set_val(value)
return '{}: [{}]'.format(slider_name, value)
vs.app.callback(dash.dependencies.Output(slider_id + '-title', 'children'),
[dash.dependencies.Input(slider_id, 'value')])(set_slider_val)
return slider
def addRect(fig: go.Figure, x, y, width=1, height=1, **shape_kwargs):
"""Adds a rectangle to Figure 'fig'. x & y refer to the coordinates of the bottom left corner of the rectangle."""
x1 = x + width
y1 = y + height
fig.add_shape(
x0=x,
y0=y,
x1=x1,
y1=y1,
type='rect',
**shape_kwargs
)
def addCircle(fig: go.Figure, x, y, radius=0.5, **shape_kwargs):
"""Adds a circle to Figure 'fig'. x & y are the coordinates of the center of the circle"""
x0 = x - radius
x1 = x + radius
y0 = y - radius
y1 = y + radius
fig.add_shape(
x0=x0,
x1=x1,
y0=y0,
y1=y1,
type='circle',
**shape_kwargs
)
def createTabs(labels: [str], tabs: []):
return html.Div([
dcc.Tabs(
[
dcc.Tab(label=labels[x], children=tabs[x]) for x in range(len(labels))
]
)])
|
nilq/baby-python
|
python
|
"""Test ``X-Forwarded-For`` middleware."""
from __future__ import annotations
from ipaddress import _BaseNetwork, ip_network
from typing import Dict, List, Optional
import pytest
from fastapi import FastAPI, Request
from httpx import AsyncClient
from safir.middleware.x_forwarded import XForwardedMiddleware
def build_app(proxies: Optional[List[_BaseNetwork]] = None) -> FastAPI:
"""Construct a test FastAPI app with the middleware registered."""
app = FastAPI()
app.add_middleware(XForwardedMiddleware, proxies=proxies)
return app
@pytest.mark.asyncio
async def test_ok() -> None:
app = build_app([ip_network("11.0.0.0/8")])
@app.get("/")
async def handler(request: Request) -> Dict[str, str]:
assert request.state.forwarded_host == "foo.example.com"
assert request.state.forwarded_proto == "https"
assert request.client.host == "10.10.10.10"
return {}
async with AsyncClient(app=app, base_url="http://example.com") as client:
r = await client.get(
"/",
headers={
"X-Forwarded-For": "10.10.10.10, 11.11.11.11",
"X-Forwarded-Proto": "https, http",
"X-Forwarded-Host": "foo.example.com",
},
)
assert r.status_code == 200
@pytest.mark.asyncio
async def test_defaults() -> None:
app = build_app()
@app.get("/")
async def handler(request: Request) -> Dict[str, str]:
assert request.state.forwarded_host == "foo.example.com"
assert request.state.forwarded_proto == "http"
assert request.client.host == "192.168.0.1"
return {}
async with AsyncClient(app=app, base_url="http://example.com") as client:
r = await client.get(
"/",
headers={
"X-Forwarded-For": ("1.1.1.1, 192.168.0.1"),
"X-Forwarded-Proto": "https, http",
"X-Forwarded-Host": "foo.example.com",
},
)
assert r.status_code == 200
@pytest.mark.asyncio
async def test_no_forwards() -> None:
app = build_app([ip_network("127.0.0.1")])
@app.get("/")
async def handler(request: Request) -> Dict[str, str]:
assert not request.state.forwarded_host
assert not request.state.forwarded_proto
assert request.client.host == "127.0.0.1"
return {}
async with AsyncClient(app=app, base_url="http://example.com") as client:
r = await client.get("/")
assert r.status_code == 200
@pytest.mark.asyncio
async def test_all_filtered() -> None:
app = build_app([ip_network("10.0.0.0/8")])
@app.get("/")
async def handler(request: Request) -> Dict[str, str]:
assert request.state.forwarded_host == "foo.example.com"
assert request.state.forwarded_proto == "https"
assert request.client.host == "10.10.10.10"
return {}
async with AsyncClient(app=app, base_url="http://example.com") as client:
r = await client.get(
"/",
headers={
"X-Forwarded-For": "10.10.10.10, 10.0.0.1",
"X-Forwarded-Proto": "https, http",
"X-Forwarded-Host": "foo.example.com",
},
)
assert r.status_code == 200
@pytest.mark.asyncio
async def test_one_proto() -> None:
app = build_app([ip_network("11.11.11.11")])
@app.get("/")
async def handler(request: Request) -> Dict[str, str]:
assert request.state.forwarded_host == "foo.example.com"
assert request.state.forwarded_proto == "https"
assert request.client.host == "10.10.10.10"
return {}
async with AsyncClient(app=app, base_url="http://example.com") as client:
r = await client.get(
"/",
headers={
"X-Forwarded-For": "10.10.10.10, 11.11.11.11",
"X-Forwarded-Proto": "https",
"X-Forwarded-Host": "foo.example.com",
},
)
assert r.status_code == 200
@pytest.mark.asyncio
async def test_no_proto_or_host() -> None:
app = build_app([ip_network("11.11.11.11")])
@app.get("/")
async def handler(request: Request) -> Dict[str, str]:
assert not request.state.forwarded_host
assert not request.state.forwarded_proto
assert request.client.host == "10.10.10.10"
return {}
async with AsyncClient(app=app, base_url="http://example.com") as client:
r = await client.get(
"/", headers={"X-Forwarded-For": "10.10.10.10, 11.11.11.11"}
)
assert r.status_code == 200
@pytest.mark.asyncio
async def test_too_many_headers() -> None:
"""Test handling of duplicate headers.
httpx doesn't allow passing in duplicate headers, so we cannot test end to
end. Instead, test by generating a mock request and then calling the
underling middleware functions directly.
"""
state = {
"type": "http",
"headers": [
("X-Forwarded-For", "10.10.10.10"),
("X-Forwarded-For", "10.10.10.1"),
("X-Forwarded-Proto", "https"),
("X-Forwarded-Proto", "http"),
("X-Forwarded-Host", "example.org"),
("X-Forwarded-Host", "example.com"),
],
}
request = Request(state)
app = FastAPI()
middleware = XForwardedMiddleware(app, proxies=[ip_network("10.0.0.0/8")])
assert middleware._get_forwarded_for(request) == []
assert middleware._get_forwarded_proto(request) == []
assert not middleware._get_forwarded_host(request)
|
nilq/baby-python
|
python
|
# ---------------------------------------------------------------------------
# MTDA Client
# ---------------------------------------------------------------------------
#
# This software is a part of MTDA.
# Copyright (c) Mentor, a Siemens business, 2017-2020
#
# ---------------------------------------------------------------------------
# SPDX-License-Identifier: MIT
# ---------------------------------------------------------------------------
import os
import random
import socket
import time
import zerorpc
from mtda.main import MentorTestDeviceAgent
import mtda.constants as CONSTS
class Client:
def __init__(self, host=None):
agent = MentorTestDeviceAgent()
agent.load_config(host)
if agent.remote is not None:
uri = "tcp://%s:%d" % (agent.remote, agent.ctrlport)
self._impl = zerorpc.Client(heartbeat=20, timeout=2*60)
self._impl.connect(uri)
else:
self._impl = agent
self._agent = agent
HOST = socket.gethostname()
USER = os.getenv("USER")
WORDS = "/usr/share/dict/words"
if os.path.exists(WORDS):
WORDS = open(WORDS).read().splitlines()
name = random.choice(WORDS)
if name.endswith("'s"):
name = name.replace("'s", "")
elif USER is not None and HOST is not None:
name = "%s@%s" % (USER, HOST)
else:
name = "mtda"
self._session = os.getenv('MTDA_SESSION', name)
def agent_version(self):
return self._impl.agent_version()
def console_prefix_key(self):
return self._agent.console_prefix_key()
def command(self, args):
return self._impl.command(args, self._session)
def console_clear(self):
return self._impl.console_clear(self._session)
def console_dump(self):
return self._impl.console_dump(self._session)
def console_flush(self):
return self._impl.console_flush(self._session)
def console_getkey(self):
return self._agent.console_getkey()
def console_init(self):
return self._agent.console_init()
def console_head(self):
return self._impl.console_head(self._session)
def console_lines(self):
return self._impl.console_lines(self._session)
def console_locked(self):
return self._impl.console_locked(self._session)
def console_print(self, data):
return self._impl.console_print(data, self._session)
def console_prompt(self, newPrompt=None):
return self._impl.console_prompt(newPrompt, self._session)
def console_remote(self, host):
return self._agent.console_remote(host)
def console_run(self, cmd):
return self._impl.console_run(cmd, self._session)
def console_send(self, data, raw=False):
return self._impl.console_send(data, raw, self._session)
def console_tail(self):
return self._impl.console_tail(self._session)
def env_get(self, name):
return self._impl.env_get(name, self._session)
def env_set(self, name, value):
return self._impl.env_set(name, value, self._session)
def keyboard_write(self, data):
return self._impl.keyboard_write(data, self._session)
def power_locked(self):
return self._impl.power_locked(self._session)
def storage_bytes_written(self):
return self._impl.storage_bytes_written(self._session)
def storage_close(self):
return self._impl.storage_close(self._session)
def storage_locked(self):
return self._impl.storage_locked(self._session)
def storage_mount(self, part=None):
return self._impl.storage_mount(part, self._session)
def storage_open(self):
tries = 60
while tries > 0:
tries = tries - 1
status = self._impl.storage_open(self._session)
if status is True:
return True
time.sleep(1)
return False
def storage_status(self):
return self._impl.storage_status(self._session)
def _storage_write(self, image, imgname, imgsize, callback=None):
# Copy loop
bytes_wanted = 0
data = image.read(self._agent.blksz)
dataread = len(data)
totalread = 0
while totalread < imgsize:
totalread += dataread
# Report progress via callback
if callback is not None:
callback(imgname, totalread, imgsize)
# Write block to shared storage device
bytes_wanted = self._impl.storage_write(data, self._session)
# Check what to do next
if bytes_wanted < 0:
break
elif bytes_wanted > 0:
# Read next block
data = image.read(bytes_wanted)
dataread = len(data)
else:
# Agent may continue without further data
data = b''
dataread = 0
# Close the local image
image.close()
# Wait for background writes to complete
while True:
status, writing, written = self._impl.storage_status(self._session)
if writing is False:
break
if callback is not None:
callback(imgname, totalread, imgsize)
time.sleep(0.5)
# Storage may be closed now
status = self.storage_close()
# Provide final update to specified callback
if status is True and callback is not None:
callback(imgname, totalread, imgsize)
# Make sure an error is reported if a write error was received
if bytes_wanted < 0:
status = False
return status
def storage_update(self, dest, src=None, callback=None):
path = dest if src is None else src
imgname = os.path.basename(path)
try:
st = os.stat(path)
imgsize = st.st_size
image = open(path, "rb")
except FileNotFoundError:
return False
status = self._impl.storage_update(dest, 0, self._session)
if status is False:
image.close()
return False
self._impl.storage_compression(CONSTS.IMAGE.RAW.value, self._session)
return self._storage_write(image, imgname, imgsize, callback)
def storage_write_image(self, path, callback=None):
# Get size of the (compressed) image
imgname = os.path.basename(path)
# Open the specified image
try:
st = os.stat(path)
imgsize = st.st_size
if path.endswith(".bz2"):
compression = CONSTS.IMAGE.BZ2.value
elif path.endswith(".gz"):
compression = CONSTS.IMAGE.GZ.value
elif path.endswith(".zst"):
compression = CONSTS.IMAGE.ZST.value
else:
compression = CONSTS.IMAGE.RAW.value
self._impl.storage_compression(compression, self._session)
image = open(path, "rb")
except FileNotFoundError:
return False
# Open the shared storage device
status = self.storage_open()
if status is False:
image.close()
return False
return self._storage_write(image, imgname, imgsize, callback)
def storage_to_host(self):
return self._impl.storage_to_host(self._session)
def storage_to_target(self):
return self._impl.storage_to_target(self._session)
def storage_swap(self):
return self._impl.storage_swap(self._session)
def start(self):
return self._agent.start()
def remote(self):
return self._agent.remote
def session(self):
return self._session
def target_lock(self, retries=0):
status = False
while status is False:
status = self._impl.target_lock(self._session)
if retries <= 0 or status is True:
break
retries = retries - 1
time.sleep(60)
return status
def target_locked(self):
return self._impl.target_locked(self._session)
def target_off(self):
return self._impl.target_off(self._session)
def target_on(self):
return self._impl.target_on(self._session)
def target_status(self):
return self._impl.target_status(self._session)
def target_toggle(self):
return self._impl.target_toggle(self._session)
def target_unlock(self):
return self._impl.target_unlock(self._session)
def toggle_timestamps(self):
return self._impl.toggle_timestamps()
def usb_find_by_class(self, className):
return self._impl.usb_find_by_class(className, self._session)
def usb_has_class(self, className):
return self._impl.usb_has_class(className, self._session)
def usb_off(self, ndx):
return self._impl.usb_off(ndx, self._session)
def usb_off_by_class(self, className):
return self._impl.usb_off_by_class(className, self._session)
def usb_on(self, ndx):
return self._impl.usb_on(ndx, self._session)
def usb_on_by_class(self, className):
return self._impl.usb_on_by_class(className, self._session)
def usb_ports(self):
return self._impl.usb_ports(self._session)
def usb_status(self, ndx):
return self._impl.usb_status(ndx, self._session)
def usb_toggle(self, ndx):
return self._impl.usb_toggle(ndx, self._session)
|
nilq/baby-python
|
python
|
"""Standard modules"""
import sys
import numpy as np
import ldp
import matplotlib.pyplot as plt
class SimMesh(object):
def __init__(self, mesh, neg, sep, pos):
self.mesh = mesh
self.neg = neg
self.pos = pos
self.sep = sep
class SimData(object):
def __init__(self, ce, cse, phie, phis, j):
self.ce = ce
self.cse = cse
self.phie = phie
self.phis = phis
self.j = j
def get_sim_data(self, time_index, location):
return SimData(
self.ce[time_index, location], self.cse[time_index, location],
self.phie[time_index, location], self.phis[time_index, location],
self.j[time_index, location])
def get_var(parameter, time, location=None, delta_t=0.1, delete=None):
"""Fetch parameter data from a given location and time"""
(x_parameter, y_parameter) = (parameter[:, 0], parameter[:, 1])
time_frame = np.nonzero(np.diff(x_parameter) < 0)[0]
start = np.insert(time_frame+1, 0, 0)
stop = np.append(time_frame, len(x_parameter))
time_range = np.arange(0, len(start))*delta_t
time_index = np.nonzero(time_range == time)[0][0]
data = y_parameter[start[time_index]:stop[time_index]+1]
if location:
data = data[
location == x_parameter[start[time_index]:stop[time_index]]]
if delete:
data = np.delete(data, delete)
return np.array([data])
def nice_abs(number):
"""Return the absolute of the given number"""
return ((np.sign(number)+1)/2)*np.abs(number)
def reaction_flux(sim_data, params, const):
"""J"""
reaction_flux0 = params['k_norm_ref'] * \
nice_abs((params['csmax']-sim_data.cse)/params['csmax']) ** \
(1-params['alpha']) * \
nice_abs(sim_data.cse/params['csmax']) ** params['alpha'] * \
nice_abs(sim_data.ce/const['ce0']) ** (1-params['alpha'])
soc = sim_data.cse/params['csmax']
# eta = phis-phie-params['eref'](soc)
eta = sim_data.phis-sim_data.phie-params['Uocp'][0](soc)
F = 96487
R = 8.314
return np.array([reaction_flux0*(
np.exp((1-params['alpha'])*F*eta/(R*const['Tref'])) -
np.exp(-params['alpha']*F*eta/(R*const['Tref'])))])
def region(mesh):
"""Find the regions in the mesh"""
xneg = np.nonzero(mesh <= 1)[0]
xpos = np.nonzero(mesh > 2)[0]
xsep = np.nonzero((mesh > 1) & (mesh <= 2))[0]
if mesh[xneg[-1]] == mesh[xneg[-2]]:
xsep = np.concatenate((1, xneg[-1], xsep))
xneg = np.delete(xneg, -1)
if mesh[xsep[-1]] == mesh[xsep[-2]]:
xpos = np.concatenate((1, xsep[-1], xpos))
xsep = np.delete(xsep, -1)
return SimMesh(mesh, xneg, xsep, xpos)
def assemble_comsol(time, data, space=None, dt=0.1):
ce, cse, phie, phis, j = (np.empty((0, len(data['mesh']))) for i in range(5))
for ind in time:
ce = np.append(ce, get_var(data['ce'], ind), axis=0)
cse = np.append(cse, get_var(data['cse'], ind, delete=[80, 202]), axis=0)
phie = np.append(phie, get_var(data['phie'], ind), axis=0)
phis = np.append(phis, get_var(data['phis'], ind, delete=[80, 202]), axis=0)
j = np.append(j, get_var(data['j'], ind, delete=[80, 202]), axis=0)
return SimData(ce, cse, phie, phis, j)
def plot_j(time, data, mesh, params):
jneg = np.empty((0, len(mesh.neg)))
jpos = np.empty((0, len(mesh.pos)))
for ind in range(0,len(time)):
jneg = np.append(jneg, reaction_flux(data.get_sim_data(ind, mesh.neg), params['neg'], params['const']), axis=0)
jpos = np.append(jpos, reaction_flux(data.get_sim_data(ind, mesh.pos), params['pos'], params['const']), axis=0)
plt.plot(mesh.neg, jneg[ind,:], mesh.pos, jpos[ind,:])
print('Neg rms: {}'.format(np.sqrt(np.mean(np.square(jneg-data.get_sim_data(slice(0,len(time)), mesh.neg).j), axis=1))))
print('Pos rms: {}'.format(np.sqrt(np.mean(np.square(jpos-data.get_sim_data(slice(0,len(time)), mesh.pos).j), axis=1))))
plt.grid()
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.show()
def main():
print('Loading Cell Parameters')
params = dict()
time = [5, 15, 25, 35, 45]
sheet = ldp.read_excel(
'../tests/gold_standard/GuAndWang_parameter_list.xlsx', 0)
(ncol, pcol) = (2, 3)
params['const'] = ldp.load_params(sheet, range(7, 15), ncol, pcol)
params['neg'] = ldp.load_params(sheet, range(18, 43), ncol, pcol)
params['sep'] = ldp.load_params(sheet, range(47, 52), ncol, pcol)
params['pos'] = ldp.load_params(sheet, range(55, 75), ncol, pcol)
comsol = ldp.load('../tests/gold_standard/guwang2.npz')
comsol_parsed = assemble_comsol(time, comsol)
comsol_mesh = region(comsol['mesh'])
plot_j(time, comsol_parsed, comsol_mesh, params)
return
ce = get_var(comsol['ce'], 5)
cse = get_var(comsol['cse'], 5, delete=[80, 202])
phie = get_var(comsol['phie'], 5)
phis = get_var(comsol['phis'], 5, delete=[80, 202])
mesh_neg, mesh_sep, mesh_pos = region(comsol['mesh'])
print(mesh_neg)
print(reaction_flux(ce, cse, phie, phis, params['neg'], params['const']))
if __name__ == '__main__':
sys.exit(main())
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#"""
# Created on Mon Oct 28 15:12:43 2013
#
#@author: laure
#
# BROKEN : Doesn't work ##########################
#"""
# import sys
#
# import soma_workflow.constants as constants
# from soma_workflow.test.job_tests.job_tests import JobTests
# from soma_workflow.configuration import LIGHT_MODE
# from soma_workflow.configuration import LOCAL_MODE
# from soma_workflow.configuration import REMOTE_MODE
#
#
# class MPIParallelJobTest(JobTests):
# '''
# Submission of a parallel job (MPI)
# '''
# allowed_resources = [LIGHT_MODE, LOCAL_MODE, REMOTE_MODE]
#
# def setUp(self):
# self.my_jobs = []
# self.my_transfers = []
# self.node_num = 4
# info = self.job_examples.mpi_job_submission(node_num=self.node_num)
# self.my_jobs.append(info[0])
# self.output_files = info[1]
#
# def tearDown(self):
# super(MPIParallelJobTest, self).tearDown()
# for file in self.output_files:
# if os.path.isfile(file): os.remove(file)
#
# def test_result(self):
# jobid = self.my_jobs[0]
# self.wf_ctrl.wait_job(self.my_jobs)
#
# status = self.wf_ctrl.job_status(jobid)
# self.failUnless(status == constants.DONE,
# 'Job %s status after wait: %s' % (jobid, status))
# job_termination_status = self.wf_ctrl.job_termination_status(jobid)
# exit_status = job_termination_status[0]
# self.failUnless(exit_status == constants.FINISHED_REGULARLY,
# 'Job %s exit status: %s' % (jobid, exit_status))
# exit_value = job_termination_status[1]
# self.failUnless(exit_value == 0,
# 'Job exit value: %d' % exit_value)
#
# sys.stdout.write("stdout: \n")
# line = self.wf_ctrl.stdoutReadLine(jobid)
# process_num = 1
# while line:
# splitted_line = line.split()
# if splitted_line[0] == "Greetings":
# self.failUnless(line.rstrip() == "Greetings from process %d!" %
# (process_num),
# "stdout line: %sinstead of : "
# "'Greetings from process %d!'" %
# (line, process_num))
# process_num = process_num + 1
# line = self.wf_ctrl.stdoutReadLine(jobid)
#
# self.failUnless(process_num == self.node_num,
# "%d process(es) run instead of %d." %
# (process_num - 1, self.node_num))
#
#
# if __name__ == '__main__':
# MPIParallelJobTest.run_test(debug=False)
# sys.exit(0)
|
nilq/baby-python
|
python
|
from matplotlib import colors
import matplotlib.pyplot as plt
from copy import deepcopy
import numpy as np
import matplotlib.gridspec as gridspec
from scipy.interpolate import interp1d
class TrianglePlot(object):
_default_contour_colors = [(colors.cnames['darkslategrey'], colors.cnames['black'], 'k'),
(colors.cnames['dodgerblue'], colors.cnames['blue'], 'k'),
(colors.cnames['orchid'], colors.cnames['darkviolet'], 'k'),
(colors.cnames['lightcoral'], colors.cnames['red'], 'k')]
truth_color = 'g'
spacing = np.array([0.1, 0.1, 0.05, 0.05, 0.2, 0.11])
spacing_scale = 1.
_tick_rotation = 0
_color_eval = 0.9
show_intervals_68 = False
def __init__(self, independent_likelihoods_list, param_ranges=None, cmap='gist_heat'):
"""
:param independent_likelihoods_list: a list of IndependentLikelihoods classes (see trikde.pdfs)
:param cmap: name of the color map to use if not using filled contours
:param custom_ticks:
"""
self.param_names = independent_likelihoods_list[0].param_names
self._nchains = len(independent_likelihoods_list)
if param_ranges is None:
parameter_ranges = independent_likelihoods_list[0].param_ranges
else:
parameter_ranges = param_ranges
if isinstance(parameter_ranges, list):
self._prange_list = parameter_ranges
self.parameter_ranges = {}
for i, pname in enumerate(self.param_names):
self.parameter_ranges.update({pname:parameter_ranges[i]})
elif isinstance(parameter_ranges, dict):
self.parameter_ranges = parameter_ranges
self._prange_list = []
for pi in self.param_names:
self._prange_list.append(self.parameter_ranges[pi])
self._NDdensity_list = independent_likelihoods_list
self.set_cmap(cmap)
def _load_projection_1D(self, pname, idx):
return self._NDdensity_list[idx].projection_1D(pname)
def _load_projection_2D(self, p1, p2, idx):
return self._NDdensity_list[idx].projection_2D(p1, p2)
def set_cmap(self, newcmap, color_eval=0.9, marginal_col=None):
self.cmap = newcmap
self.cmap_call = plt.get_cmap(newcmap)
self._color_eval = color_eval
self._marginal_col = marginal_col
def make_joint(self, p1, p2, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6,
fig_size=8, label_scale=1, tick_label_font=12,
xtick_label_rotate=0, show_contours=True):
self.fig = plt.figure(1)
self._init(fig_size)
ax = plt.subplot(111)
if contour_colors is None:
contour_colors = self._default_contour_colors
for i in range(self._nchains):
axes = self._make_joint_i(p1, p2, ax, i, contour_colors=contour_colors, levels=levels,
filled_contours=filled_contours, contour_alpha=contour_alpha,
labsize=15*label_scale, tick_label_font=tick_label_font,
xtick_label_rotate=xtick_label_rotate, show_contours=show_contours)
return axes
def make_triplot(self, contour_levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6,
fig_size=8, truths=None, contour_colors=None,
axis_label_font=16, tick_label_font=12,
xtick_label_rotate=0, show_contours=True,
marginal_alpha=0.6, show_intervals=True,
display_params=None):
self.fig = plt.figure(1)
self._init(fig_size)
axes = []
counter = 1
if display_params is None:
display_params = self.param_names
n_subplots = len(display_params)
gs1 = gridspec.GridSpec(n_subplots, n_subplots)
gs1.update(wspace=0.15, hspace=0.15)
for row in range(n_subplots):
for col in range(n_subplots):
axes.append(plt.subplot(gs1[counter-1]))
counter += 1
if contour_colors is None:
contour_colors = self._default_contour_colors
self._auto_scale = []
for i in range(self._nchains):
axes.append(self._make_triplot_i(axes, i, contour_colors, contour_levels, filled_contours, contour_alpha,
fig_size, truths, tick_label_font=tick_label_font,
xtick_label_rotate=xtick_label_rotate,
axis_label_font=axis_label_font, cmap=self.cmap_call, show_contours=show_contours,
marginal_alpha=marginal_alpha, show_intervals=show_intervals,
display_params=display_params))
for key in display_params:
max_h = []
for scale in self._auto_scale:
max_h.append(scale[key][1])
plot_index = scale[key][0]
max_h = max(max_h)
axes[plot_index].set_ylim(0., 1.1 * max_h)
self._auto_scale = []
plt.subplots_adjust(left=self.spacing[0] * self.spacing_scale, bottom=self.spacing[1] * self.spacing_scale,
right=1 - self.spacing[2] * self.spacing_scale,
top=1 - self.spacing[3] * self.spacing_scale,
wspace=self.spacing[4] * self.spacing_scale, hspace=self.spacing[5] * self.spacing_scale)
return axes
def make_marginal(self, p1, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, param_names=None,
fig_size=8, truths=None, load_from_file=True,
transpose_idx=None, bandwidth_scale=0.7, label_scale=1,
cmap=None, xticklabel_rotate=0, bar_alpha=0.7, bar_colors=['k','m','g','r'],
height_scale=1.1, show_low=False, show_high=False):
self.fig = plt.figure(1)
self._init(fig_size)
ax = plt.subplot(111)
self._auto_scale = []
if contour_colors is None:
contour_colors = self._default_contour_colors
self._auto_scale = []
for i in range(self._nchains):
out = self._make_marginal_i(p1, ax, i, contour_colors, levels, filled_contours, contour_alpha, param_names,
fig_size, truths, load_from_file=load_from_file,
transpose_idx=transpose_idx, bandwidth_scale=bandwidth_scale,
label_scale=label_scale, cmap=cmap, xticklabel_rotate=xticklabel_rotate,
bar_alpha=bar_alpha, bar_color=bar_colors[i], show_low=show_low, show_high=show_high)
scales = []
for c in range(0, self._nchains):
scales.append(self._auto_scale[c][0])
maxh = np.max(scales) * height_scale
ax.set_ylim(0, maxh)
pmin, pmax = self._get_param_minmax(p1)
asp = maxh * (pmax - pmin) ** -1
ax.set_aspect(asp ** -1)
self._auto_scale = []
return out
def _make_marginal_i(self, p1, ax, color_index, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, param_names=None, fig_size=8,
truths=None, labsize=15, tick_label_font=14,
load_from_file=True, transpose_idx=None,
bandwidth_scale=0.7, label_scale=None, cmap=None, xticklabel_rotate=0,
bar_alpha=0.7, bar_color=None, show_low=False, show_high=False):
autoscale = []
density = self._load_projection_1D(p1, color_index)
xtick_locs, xtick_labels, xlabel, rotation = self.ticks_and_labels(p1)
pmin, pmax = self._get_param_minmax(p1)
coords = np.linspace(pmin, pmax, len(density))
bar_centers, bar_width, bar_heights = self._bar_plot_heights(density, coords, None)
bar_heights *= np.sum(bar_heights) ** -1 * len(bar_centers) ** -1
autoscale.append(np.max(bar_heights))
max_idx = np.argmax(bar_heights)
for i, y in enumerate(bar_heights):
x1, x2 = bar_centers[i] - bar_width * .5, bar_centers[i] + bar_width * .5
ax.plot([x1, x2], [y, y], color=bar_color,
alpha=bar_alpha)
ax.fill_between([x1, x2], y, color=bar_color,
alpha=0.6)
ax.plot([x1, x1], [0, y], color=bar_color,
alpha=bar_alpha)
ax.plot([x2, x2], [0, y], color=bar_color,
alpha=bar_alpha)
ax.set_xlim(pmin, pmax)
ax.set_yticks([])
mean_of_distribution, [low68, high68] = self._confidence_int(pmin, pmax, bar_centers, bar_heights, 1)
mean_of_distribution, [low95, high95] = self._confidence_int(pmin, pmax, bar_centers, bar_heights, 2)
mean_of_distribution = 0
for i in range(0, len(bar_heights)):
mean_of_distribution += bar_heights[i] * bar_centers[i] / np.sum(bar_heights)
if low95 is not None and show_low:
ax.axvline(low95, color=bar_color,
alpha=0.8, linewidth=2.5, linestyle='-.')
if high95 is not None and show_high:
ax.axvline(high95, color=bar_color,
alpha=0.8, linewidth=2.5, linestyle='-.')
ax.set_xticks(xtick_locs)
ax.set_xticklabels(xtick_labels, fontsize=tick_label_font, rotation=xticklabel_rotate)
if xlabel == r'$\frac{r_{\rm{core}}}{r_s}$':
ax.set_xlabel(xlabel, fontsize=40 * label_scale)
else:
ax.set_xlabel(xlabel, fontsize=labsize * label_scale)
if truths is not None:
t = deepcopy(truths[p1])
if isinstance(t, float) or isinstance(t, int):
pmin, pmax = self._get_param_minmax(p1)
if t <= pmin:
t = pmin * 1.075
ax.axvline(t, linestyle='--', color=self.truth_color, linewidth=3)
elif isinstance(t, list):
ax.axvspan(t[0], t[1], alpha=0.25, color=self.truth_color)
self._auto_scale.append(autoscale)
return ax
def _make_joint_i(self, p1, p2, ax, color_index, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, labsize=None, tick_label_font=None,
xtick_label_rotate=None, show_contours=None):
density = self._load_projection_2D(p1, p2, color_index)
extent, aspect = self._extent_aspect([p1, p2])
pmin1, pmax1 = extent[0], extent[1]
pmin2, pmax2 = extent[2], extent[3]
xtick_locs, xtick_labels, xlabel, rotation = self.ticks_and_labels(p1)
ytick_locs, ytick_labels, ylabel, _ = self.ticks_and_labels(p2)
if filled_contours:
coordsx = np.linspace(extent[0], extent[1], density.shape[0])
coordsy = np.linspace(extent[2], extent[3], density.shape[1])
ax.imshow(density, extent=extent, aspect=aspect,
origin='lower', cmap=self.cmap, alpha=0)
self._contours(coordsx, coordsy, density, ax, extent=extent,
contour_colors=contour_colors[color_index], contour_alpha=contour_alpha,
levels=levels)
ax.set_xlim(pmin1, pmax1)
ax.set_ylim(pmin2, pmax2)
else:
coordsx = np.linspace(extent[0], extent[1], density.shape[0])
coordsy = np.linspace(extent[2], extent[3], density.shape[1])
ax.imshow(density, origin='lower', cmap=self.cmap, alpha=1, vmin=0,
vmax=np.max(density), aspect=aspect, extent=extent)
if show_contours:
self._contours(coordsx, coordsy, density, ax, extent=extent, filled_contours=False,
contour_colors=contour_colors[color_index], contour_alpha=contour_alpha,
levels=levels)
ax.set_xlim(pmin1, pmax1)
ax.set_ylim(pmin2, pmax2)
ax.set_xticks(xtick_locs)
ax.set_xticklabels(xtick_labels, fontsize=tick_label_font, rotation=xtick_label_rotate)
ax.set_yticks(ytick_locs)
ax.set_yticklabels(ytick_labels, fontsize=tick_label_font)
if xlabel == r'$\frac{r_{\rm{core}}}{r_s}$':
ax.set_xlabel(xlabel, fontsize=40)
elif ylabel == r'$\frac{r_{\rm{core}}}{r_s}$':
ax.set_ylabel(ylabel, fontsize=40)
else:
ax.set_xlabel(xlabel, fontsize=labsize)
ax.set_ylabel(ylabel, fontsize=labsize)
return ax
def _make_triplot_i(self, axes, color_index, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, fig_size=8,
truths=None, tick_label_font=14, xtick_label_rotate=0,
axis_label_font=None, cmap=None,
show_contours=True, marginal_alpha=0.9, show_intervals=True,
display_params=None):
size_scale = len(display_params) * 0.1 + 1
self.fig.set_size_inches(fig_size * size_scale, fig_size * size_scale)
marg_in_row, plot_index = 0, 0
n_subplots = len(display_params)
self._reference_grid = None
autoscale = {}
self.triplot_densities = []
self.joint_names = []
row = 0
col = 0
for _ in range(n_subplots):
marg_done = False
for _ in range(n_subplots):
if self.param_names[row] not in display_params:
continue
elif self.param_names[col] not in display_params:
continue
if col < marg_in_row:
density = self._load_projection_2D(display_params[row], display_params[col], color_index)
self.triplot_densities.append(density)
self.joint_names.append(display_params[row]+'_'+display_params[col])
extent, aspect = self._extent_aspect([display_params[col], display_params[row]])
pmin1, pmax1 = extent[0], extent[1]
pmin2, pmax2 = extent[2], extent[3]
xtick_locs, xtick_labels, xlabel, rotation = self.ticks_and_labels(display_params[col])
ytick_locs, ytick_labels, ylabel, _ = self.ticks_and_labels(display_params[row])
if row == n_subplots - 1:
axes[plot_index].set_xticks(xtick_locs)
axes[plot_index].set_xticklabels(xtick_labels, fontsize=tick_label_font,
rotation=xtick_label_rotate)
if col == 0:
axes[plot_index].set_yticks(ytick_locs)
axes[plot_index].set_yticklabels(ytick_labels, fontsize=tick_label_font)
axes[plot_index].set_ylabel(ylabel, fontsize=axis_label_font)
else:
axes[plot_index].set_yticks([])
axes[plot_index].set_yticklabels([])
axes[plot_index].set_xlabel(xlabel, fontsize=axis_label_font)
elif col == 0:
axes[plot_index].set_yticks(ytick_locs)
axes[plot_index].set_yticklabels(ytick_labels, fontsize=tick_label_font)
axes[plot_index].set_xticks([])
axes[plot_index].set_ylabel(ylabel, fontsize=axis_label_font)
else:
axes[plot_index].set_xticks([])
axes[plot_index].set_yticks([])
axes[plot_index].set_xticklabels([])
axes[plot_index].set_yticklabels([])
if filled_contours:
coordsx = np.linspace(extent[0], extent[1], density.shape[0])
coordsy = np.linspace(extent[2], extent[3], density.shape[1])
axes[plot_index].imshow(density.T, extent=extent, aspect=aspect,
origin='lower', cmap=self.cmap, alpha=0)
self._contours(coordsx, coordsy, density.T, axes[plot_index], extent=extent,
contour_colors=contour_colors[color_index], contour_alpha=contour_alpha,
levels=levels)
axes[plot_index].set_xlim(pmin1, pmax1)
axes[plot_index].set_ylim(pmin2, pmax2)
else:
axes[plot_index].imshow(density.T, origin='lower', cmap=self.cmap, alpha=1, vmin=0,
vmax=np.max(density), aspect=aspect, extent=extent)
if show_contours:
coordsx = np.linspace(extent[0], extent[1], density.shape[0])
coordsy = np.linspace(extent[2], extent[3], density.shape[1])
self._contours(coordsx, coordsy, density.T, axes[plot_index], filled_contours=False,
extent=extent,
contour_colors=contour_colors[color_index], contour_alpha=contour_alpha,
levels=levels)
axes[plot_index].set_xlim(pmin1, pmax1)
axes[plot_index].set_ylim(pmin2, pmax2)
axes[plot_index].set_xlim(pmin1, pmax1)
axes[plot_index].set_ylim(pmin2, pmax2)
if truths is not None:
t1, t2 = truths[display_params[col]], truths[display_params[row]]
axes[plot_index].scatter(t1, t2, color=self.truth_color, s=50)
axes[plot_index].axvline(t1, linestyle='--', color=self.truth_color, linewidth=3)
axes[plot_index].axhline(t2, linestyle='--', color=self.truth_color, linewidth=3)
elif marg_in_row == col and marg_done is False:
marg_done = True
marg_in_row += 1
density = self._load_projection_1D(display_params[col], color_index)
xtick_locs, xtick_labels, xlabel, rotation = self.ticks_and_labels(display_params[col])
pmin, pmax = self._get_param_minmax(display_params[col])
coords = np.linspace(pmin, pmax, len(density))
bar_centers, bar_width, bar_heights = self._bar_plot_heights(density, coords, None)
bar_heights *= (np.sum(bar_heights) * len(bar_centers)) ** -1
autoscale[display_params[col]] = [plot_index, max(bar_heights)]
for i, y in enumerate(bar_heights):
x1, x2 = bar_centers[i] - bar_width * .5, bar_centers[i] + bar_width * .5
if filled_contours:
axes[plot_index].plot([x1, x2], [y, y], color=contour_colors[color_index][1],
alpha=1)
axes[plot_index].fill_between([x1, x2], y, color=contour_colors[color_index][1],
alpha=marginal_alpha)
axes[plot_index].plot([x1, x1], [0, y], color=contour_colors[color_index][1],
alpha=1)
axes[plot_index].plot([x2, x2], [0, y], color=contour_colors[color_index][1],
alpha=1)
else:
if self._marginal_col is None:
marginal_col = cmap(self._color_eval)
else:
marginal_col = self._marginal_col
axes[plot_index].plot([x1, x2], [y, y], color=marginal_col,
alpha=1)
axes[plot_index].fill_between([x1, x2], y, color=marginal_col,
alpha=marginal_alpha)
axes[plot_index].plot([x1, x1], [0, y], color=marginal_col,
alpha=1)
axes[plot_index].plot([x2, x2], [0, y], color=marginal_col,
alpha=1)
axes[plot_index].set_xlim(pmin, pmax)
axes[plot_index].set_yticks([])
if show_intervals:
mean_of_distribution, [low68, high68] = self._confidence_int(pmin, pmax, bar_centers, bar_heights,1)
mean_of_distribution, [low95, high95] = self._confidence_int(pmin, pmax, bar_centers, bar_heights,2)
if show_intervals and low95 is not None:
axes[plot_index].axvline(low95, color=contour_colors[color_index][1],
alpha=0.8, linewidth=2.5, linestyle='-.')
if show_intervals and high95 is not None:
axes[plot_index].axvline(high95, color=contour_colors[color_index][1],
alpha=0.8, linewidth=2.5, linestyle='-.')
if self.show_intervals_68 and low68 is not None:
axes[plot_index].axvline(low68, color=contour_colors[color_index][1],
alpha=0.8, linewidth=2.5, linestyle=':')
if self.show_intervals_68 and high68 is not None:
axes[plot_index].axvline(high68, color=contour_colors[color_index][1],
alpha=0.8, linewidth=2.5, linestyle=':')
if col != n_subplots - 1:
axes[plot_index].set_xticks([])
else:
axes[plot_index].set_xticks(xtick_locs)
axes[plot_index].set_xticklabels(xtick_labels, fontsize=tick_label_font, rotation=xtick_label_rotate)
axes[plot_index].set_xlabel(xlabel, fontsize=axis_label_font)
if truths is not None:
t = deepcopy(truths[display_params[col]])
pmin, pmax = self._get_param_minmax(display_params[col])
if isinstance(t, float) or isinstance(t, int):
if t <= pmin:
t_ = pmin * 1.075
else:
t_ = t
axes[plot_index].axvline(t_, linestyle='--', color=self.truth_color, linewidth=3)
else:
t_ = 0.5*(t[0] + t[1])
axes[plot_index].axvline(t_, linestyle='--', color=self.truth_color, linewidth=3)
axes[plot_index].axvspan(t[0], t[1], color=self.truth_color, alpha=0.25)
else:
axes[plot_index].axis('off')
plot_index += 1
col += 1
row += 1
col = 0
self._auto_scale.append(autoscale)
def _confidence_int(self, pmin, pmax, centers, heights, num_sigma, thresh=None):
centers = np.array(centers)
heights = np.array(heights)
heights *= np.max(heights) ** -1
prob_interp = interp1d(centers, heights, bounds_error=False,
fill_value=0)
samples = []
while len(samples)<10000:
samp = np.random.uniform(pmin, pmax)
prob = prob_interp(samp)
u = np.random.uniform(0,1)
if prob >= u:
samples.append(samp)
#print('num sigma:', num_sigma)
mu, sigmas = compute_confidence_intervals(samples, num_sigma, thresh)
return mu, [mu-sigmas[0], mu+sigmas[1]]
def _extent_aspect(self, param_names):
aspect = (self.parameter_ranges[param_names[0]][1] - self.parameter_ranges[param_names[0]][0]) * \
(self.parameter_ranges[param_names[1]][1] - self.parameter_ranges[param_names[1]][0]) ** -1
extent = [self.parameter_ranges[param_names[0]][0], self.parameter_ranges[param_names[0]][1],
self.parameter_ranges[param_names[1]][0],
self.parameter_ranges[param_names[1]][1]]
return extent, aspect
def _init(self, fig_size):
self._tick_lab_font = 12 * fig_size * 7 ** -1
self._label_font = 15 * fig_size * 7 ** -1
plt.rcParams['axes.linewidth'] = 2.5 * fig_size * 7 ** -1
plt.rcParams['xtick.major.width'] = 2.5 * fig_size * 7 ** -1
plt.rcParams['xtick.major.size'] = 6 * fig_size * 7 ** -1
plt.rcParams['xtick.minor.size'] = 2 * fig_size * 7 ** -1
plt.rcParams['ytick.major.width'] = 2.5 * fig_size * 7 ** -1
plt.rcParams['ytick.major.size'] = 6 * fig_size * 7 ** -1
plt.rcParams['ytick.minor.size'] = 2 * fig_size * 7 ** -1
def _get_param_minmax(self, pname):
ranges = self.parameter_ranges[pname]
return ranges[0], ranges[1]
def _get_param_inds(self, params):
inds = []
for pi in params:
for i, name in enumerate(self.param_names):
if pi == name:
inds.append(i)
break
return np.array(inds)
def _bar_plot_heights(self, bar_heights, coords, rebin):
if rebin is not None:
new = []
if len(bar_heights) % rebin == 0:
fac = int(len(bar_heights) / rebin)
for i in range(0, len(bar_heights), fac):
new.append(np.mean(bar_heights[i:(i + fac)]))
bar_heights = np.array(new)
else:
raise ValueError('must be divisible by rebin.')
bar_width = np.absolute(coords[-1] - coords[0]) * len(bar_heights) ** -1
bar_centers = []
for i in range(0, len(bar_heights)):
bar_centers.append(coords[0] + bar_width * (0.5 + i))
integral = np.sum(bar_heights) * bar_width * len(bar_centers) ** -1
bar_heights = bar_heights * integral ** -1
return bar_centers, bar_width, bar_heights
def _contours(self, x, y, grid, ax, linewidths=4, filled_contours=True, contour_colors='',
contour_alpha=1., extent=None, levels=[0.05, 0.32, 1]):
levels = np.array(levels) * np.max(grid)
X, Y = np.meshgrid(x, y)
if filled_contours:
ax.contour(X, Y, grid, levels, extent=extent,
colors=contour_colors, linewidths=linewidths, zorder=1, linestyles=['dashed', 'solid'])
ax.contourf(X, Y, grid, [levels[0], levels[1]], colors=[contour_colors[0], contour_colors[1]],
alpha=contour_alpha * 0.5, zorder=1,
extent=extent)
ax.contourf(X, Y, grid, [levels[1], levels[2]], colors=[contour_colors[1], contour_colors[2]],
alpha=contour_alpha, zorder=1,
extent=extent)
else:
ax.contour(X, Y, grid, extent=extent, colors=contour_colors, zorder=1,
levels=levels,
linewidths=linewidths)
def ticks_and_labels(self, pname):
rotation = self._tick_rotation
decimals, nticks = auto_decimal_places(self.parameter_ranges[pname][0], self.parameter_ranges[pname][1])
tick_locs = np.round(np.linspace(self.parameter_ranges[pname][0], self.parameter_ranges[pname][1], nticks), decimals)
tick_labels = tick_locs
return tick_locs, tick_labels, pname, rotation
def get_parameter_confidence_interval(self, parameter, clevel, chain_num=None,
show_percentage=False, return_intervals=False,
print_intervals=True, thresh=None):
if print_intervals:
print('parameter name: ', parameter)
if thresh is None:
if show_percentage:
print('68% confidence intervals: \nformat: median (lower, upper) (-%, +%)\n')
else:
print('68% confidence intervals: \nformat: median (lower, upper) (param_min, param_max)\n')
else:
if show_percentage:
print(str(100 * thresh) + '% confidence intervals: \nformat: median (lower, upper) (-%, +%)\n')
else:
print(str(100 * thresh) + '% confidence intervals: \nformat: median (lower, upper)\n')
medians, uppers, lowers = [], [], []
for idx in range(0, self._nchains):
if chain_num is not None:
if idx != chain_num:
continue
samples = self._load_projection_1D(parameter, idx)
pmin, pmax = self._get_param_minmax(parameter)
coords = np.linspace(pmin, pmax, len(samples))
bar_centers, bar_widths, bar_heights = self._bar_plot_heights(samples, coords, None)
median, [lower, upper] = self._confidence_int(pmin, pmax, bar_centers, bar_heights, clevel, thresh)
#chain.append({''})
if print_intervals:
print('SAMPLES ' + str(idx + 1) + ':')
if show_percentage:
print(str(median) + ' (' + str(lower) + ', ' + str(upper) + ')')
else:
print(str(median) + ' ('+str(lower)+', '+str(upper)+')')
print('width: ', upper - lower)
medians.append(median)
uppers.append(upper)
lowers.append(lower)
if return_intervals:
return (medians, uppers, lowers)
else:
return None
def auto_decimal_places(param_min, param_max):
nticks = 5
if param_min == 0:
OM_low = -1
else:
OM_low = int(np.log10(abs(param_min)))
if param_max == 0:
OM_high = -1
else:
OM_high = int(np.log10(abs(param_max)))
OM_min = min(OM_low, OM_high)
if OM_min > 0:
decimals = 0
else:
decimals = abs(OM_min) + 2
dynamic_range = abs(OM_high - OM_low)
if dynamic_range > 0:
decimals += 0
else:
decimals += 1
if decimals > 2:
nticks -= 1
if decimals > 3:
nticks -= 1
if decimals > 4:
nticks -= 1
return decimals, nticks
def compute_confidence_intervals_histogram(sample, num_sigma):
"""
computes the upper and lower sigma from the median value.
This functions gives good error estimates for skewed pdf's
:param sample: 1-D sample
:return: median, lower_sigma, upper_sigma
"""
if num_sigma > 3:
raise ValueError("Number of sigma-constraints restricted to three. %s not valid" % num_sigma)
num = len(sample)
median = np.median(sample)
sorted_sample = np.sort(sample)
num_threshold1 = int(round((num-1)*0.841345))
num_threshold2 = int(round((num-1)*0.977249868))
num_threshold3 = int(round((num-1)*0.998650102))
if num_sigma == 1:
upper_sigma1 = sorted_sample[num_threshold1 - 1]
lower_sigma1 = sorted_sample[num - num_threshold1 - 1]
return median, [median-lower_sigma1, upper_sigma1-median]
if num_sigma == 2:
upper_sigma2 = sorted_sample[num_threshold2 - 1]
lower_sigma2 = sorted_sample[num - num_threshold2 - 1]
return median, [median-lower_sigma2, upper_sigma2-median]
def compute_confidence_intervals(sample, num_sigma, thresh=None):
"""
computes the upper and lower sigma from the median value.
This functions gives good error estimates for skewed pdf's
:param sample: 1-D sample
:return: median, lower_sigma, upper_sigma
"""
if thresh is not None and num_sigma > 3:
raise ValueError("Number of sigma-constraints restricted to three. %s not valid" % num_sigma)
num = len(sample)
median = np.median(sample)
sorted_sample = np.sort(sample)
if thresh is None:
num_threshold1 = int(round((num-1)*0.841345))
num_threshold2 = int(round((num-1)*0.977249868))
num_threshold3 = int(round((num-1)*0.998650102))
if num_sigma == 1:
upper_sigma1 = sorted_sample[num_threshold1 - 1]
lower_sigma1 = sorted_sample[num - num_threshold1 - 1]
return median, [median-lower_sigma1, upper_sigma1-median]
if num_sigma == 2:
upper_sigma2 = sorted_sample[num_threshold2 - 1]
lower_sigma2 = sorted_sample[num - num_threshold2 - 1]
return median, [median-lower_sigma2, upper_sigma2-median]
else:
assert thresh <= 1
thresh = (1 + thresh)/2
num_threshold = int(round((num-1) * thresh))
upper = sorted_sample[num_threshold - 1]
lower = sorted_sample[num - num_threshold - 1]
return median, [median - lower, upper - median]
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class CoinapiConfig(AppConfig):
name = 'coinapi'
|
nilq/baby-python
|
python
|
import os
import torch
import numpy as np
import torch.nn as nn
# import torch.nn.functional as F
import torch.distributed as dist
import datetime
import pandas as pd
from asyncfeddr.utils.models import SimpleNetMNIST, SimpleNetFEMNIST
from asyncfeddr.utils.serialization import ravel_model_params, unravel_model_params
from asyncfeddr.utils.messaging import MessageCode, send_message
import torch.optim as optim
from asyncfeddr.optim.perturbed_sgd import PerturbedSGD
import time
import torchvision.models as models
from asyncfeddr.utils.dataset import partition_dataset
def extract_model(sender, message_code, parameter):
if message_code == MessageCode.ParameterUpdate:
return parameter, False
elif message_code == MessageCode.Terminate:
return parameter, True
else:
raise ValueError('undefined message code')
def worker_main(args):
trainloader, testloader = partition_dataset(args)
torch.manual_seed(args.seed)
if args.dataset == 'MNIST':
model = SimpleNetMNIST()
elif args.dataset == 'FEMNIST':
model = SimpleNetFEMNIST()
optimizer = PerturbedSGD(model.parameters(), lr=args.lr, mu=1.0/args.eta)
alpha = args.alpha
# train
model.train()
# model size
model_size = ravel_model_params(model).numel()
# communication buffer
m_parameter = torch.zeros(ravel_model_params(model).numel() + 2)
# FedDR local variables
y_i = torch.zeros(model_size)
x_hat = torch.zeros(model_size)
x_i = ravel_model_params(model)
criterion = nn.CrossEntropyLoss()
while True:
_ = dist.recv(tensor=m_parameter)
latest_model, terminate = extract_model( int(m_parameter[0].item()),
MessageCode(m_parameter[1].item()),
m_parameter[2:])
if terminate:
break
# start local update
start_time = datetime.datetime.now()
# update y_i
y_i = y_i + alpha*(latest_model - x_i)
# update x_i
optimizer.update_v_star(y_i)
# loop over the dataset multiple times
for epoch in range(args.epochs):
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# update x_i
x_i = ravel_model_params(model)
# update x_hat
x_hat = 2*x_i - y_i
end_time = datetime.datetime.now()
training_time = (end_time - start_time).total_seconds()
# add a delay
if args.worker_max_delay > 0:
time.sleep(args.worker_max_delay*(args.rank-1)/args.world_size)
# sending parameters to server
send_message(MessageCode.ParameterUpdate, x_hat)
# finish training
print('Rank {:2} Finished Training'.format(args.rank))
|
nilq/baby-python
|
python
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Optional matplotlib helper functions
"""
import functools
try:
# pylint: disable = unused-import
from matplotlib import pyplot
HAS_MATPLOTLIB = True
except ImportError:
pyplot = None
HAS_MATPLOTLIB = False
def requires_matplotlib(func):
"""Decorator for functions requiring matplotlib"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not HAS_MATPLOTLIB:
raise ImportError(
f"{func} requires matplotlib to generate curve fit plot."
' Run "pip install matplotlib" before.'
)
# Analysis/plotting is done in a separate thread (so it doesn't block the
# main thread), but matplotlib doesn't support GUI mode in a child thread.
# The code below switches to a non-GUI backend "Agg" when creating the
# plot. An alternative is to run this in a separate process, but then
# we'd need to deal with pickling issues.
saved_backend = pyplot.get_backend()
pyplot.switch_backend("Agg")
try:
ret_val = func(*args, **kwargs)
finally:
pyplot.switch_backend(saved_backend)
return ret_val
return wrapped
|
nilq/baby-python
|
python
|
"""Utility functions for commissioning tests."""
# STDLIB
import os
import sys
from collections import Iterable
# THIRD-PARTY
import numpy as np
import pytest
from numpy.testing import assert_allclose
# ASTROLIB
try:
import pysynphot as S
from pysynphot.spparser import parse_spec as old_parse_spec
except ImportError:
HAS_PYSYNPHOT = False
else:
HAS_PYSYNPHOT = True
# LOCAL
from synphot import Observation
from ..config import conf
from ..spectrum import band
from ..spparser import parse_spec
use_pysynphot = pytest.mark.skipif('not HAS_PYSYNPHOT')
# Currently, this is here because only commissioning tests are considered
# slow. If there are slow tests in the core unit tests, we can move this
# one level higher.
try:
slow = pytest.mark.skipif(not pytest.config.getoption('--slow'),
reason='need --slow option to run')
except AttributeError: # Not using pytest
slow = pytest.mark.skipif(True, reason='need --slow option to run')
__all__ = ['use_pysynphot', 'slow', 'count_outliers', 'CommCase', 'ThermCase']
def count_outliers(data, sigma=3.0):
"""Count outliers in given data.
This is as defined in similar method in ``SpecCase``
in ``astrolib/pysynphot/from_commissioning/conv_base.py``.
.. note:: This is not used but kept for reference.
Parameters
----------
data : ndarray
Result differences to be analyzed.
sigma : float
Values outside this number of sigma of std. dev.
around mean are considered outliers.
Returns
-------
n_outliers : int
Number of outlier data points.
"""
return np.count_nonzero(abs(data) > (data.mean() + sigma * data.std()))
@use_pysynphot
@slow
@pytest.mark.remote_data
class CommCase:
"""Base class for commissioning tests."""
obsmode = None # Observation mode string
spectrum = None # SYNPHOT-like string to construct spectrum
force = None
# Default tables are the latest available as of 2016-07-25.
tables = {
'graphtable': os.path.join('mtab$OLD_FILES', '07r1502mm_tmg.fits'),
'comptable': os.path.join('mtab$OLD_FILES', '07r1502nm_tmc.fits'),
'thermtable': 'mtab$tae17277m_tmt.fits'}
def setup_class(self):
"""Subclass needs to define ``obsmode`` and ``spectrum``
class variables for this to work.
"""
if not HAS_PYSYNPHOT:
raise ImportError(
'ASTROLIB PYSYNPHOT must be installed to run these tests')
# Make sure both software use the same graph and component tables.
conf.graphtable = self.tables['graphtable']
conf.comptable = self.tables['comptable']
conf.thermtable = self.tables['thermtable']
S.setref(graphtable=self.tables['graphtable'],
comptable=self.tables['comptable'],
thermtable=self.tables['thermtable'])
# Construct spectra for both software.
self.sp = parse_spec(self.spectrum)
self.bp = band(self.obsmode)
# Astropy version has no prior knowledge of instrument-specific
# binset, so it has to be set explicitly.
if hasattr(self.bp, 'binset'):
self.obs = Observation(self.sp, self.bp, force=self.force,
binset=self.bp.binset)
else:
self.obs = Observation(self.sp, self.bp, force=self.force)
# Astropy version does not assume a default waveset
# (you either have it or you don't). If there is no
# waveset, no point comparing obs waveset against ASTROLIB.
if self.sp.waveset is None or self.bp.waveset is None:
self._has_obswave = False
else:
self._has_obswave = True
self.spref = old_parse_spec(self.spectrum)
self.bpref = S.ObsBandpass(self.obsmode)
self.obsref = S.Observation(self.spref, self.bpref, force=self.force)
# Ensure we are comparing in the same units
self.bpref.convert(self.bp._internal_wave_unit.name)
self.spref.convert(self.sp._internal_wave_unit.name)
self.spref.convert(self.sp._internal_flux_unit.name)
self.obsref.convert(self.obs._internal_wave_unit.name)
self.obsref.convert(self.obs._internal_flux_unit.name)
@staticmethod
def _get_new_wave(sp):
"""Astropy version does not assume a default waveset
(you either have it or you don't). This is a convenience
method to duck-type ASTROLIB waveset behavior.
"""
wave = sp.waveset
if wave is None:
wave = conf.waveset_array
else:
wave = wave.value
return wave
def _assert_allclose(self, actual, desired, rtol=1e-07,
atol=sys.float_info.min):
"""``assert_allclose`` only report percentage but we
also want to know some extra info conveniently."""
if isinstance(actual, Iterable):
ntot = len(actual)
else:
ntot = 1
n = np.count_nonzero(
abs(actual - desired) > atol + rtol * abs(desired))
msg = (f'obsmode: {self.obsmode}\n'
f'spectrum: {self.spectrum}\n'
f'(mismatch {n}/{ntot})')
assert_allclose(actual, desired, rtol=rtol, atol=atol, err_msg=msg)
# TODO: Confirm whether non-default atol is acceptable.
# Have to use this value to avoid AssertionError for very
# small non-zero flux values like 1.8e-26 to 2e-311.
def _compare_nonzero(self, new, old, thresh=0.01, atol=1e-29):
"""Compare normally when results from both are non-zero."""
i = (new != 0) & (old != 0)
# Make sure non-zero atol is not too high, otherwise just let it fail.
if atol > (thresh * min(new.max(), old.max())):
atol = sys.float_info.min
self._assert_allclose(new[i], old[i], rtol=thresh, atol=atol)
def _compare_zero(self, new, old, thresh=0.01):
"""Special handling for comparison when one of the results
is zero. This is because ``rtol`` will not work."""
i = ((new == 0) | (old == 0)) & (new != old)
try:
self._assert_allclose(new[i], old[i], rtol=thresh)
except AssertionError as e:
pytest.xfail(str(e)) # TODO: Will revisit later
def test_band_wave(self, thresh=0.01):
"""Test bandpass waveset."""
wave = self._get_new_wave(self.bp)
self._assert_allclose(wave, self.bpref.wave, rtol=thresh)
def test_spec_wave(self, thresh=0.01):
"""Test source spectrum waveset."""
wave = self._get_new_wave(self.sp)
# TODO: Failure due to different wavesets for blackbody; Ignore?
try:
self._assert_allclose(wave, self.spref.wave, rtol=thresh)
except (AssertionError, ValueError):
self._has_obswave = False # Skip obs waveset tests
if 'bb(' in self.spectrum:
pytest.xfail('Blackbody waveset implementations are different')
elif 'unit(' in self.spectrum:
pytest.xfail('Flat does not use default waveset anymore')
else:
raise
def test_obs_wave(self, thresh=0.01):
"""Test observation waveset."""
if not self._has_obswave: # Nothing to test
return
# Native
wave = self.obs.waveset.value
# TODO: Failure due to different wavesets for blackbody; Ignore?
try:
self._assert_allclose(wave, self.obsref.wave, rtol=thresh)
except (AssertionError, ValueError):
if 'bb(' in self.spectrum:
pytest.xfail('Blackbody waveset implementations are different')
elif 'unit(' in self.spectrum:
self._has_obswave = False # Skip binned flux test
pytest.xfail('Flat does not use default waveset anymore')
else:
raise
# Binned
binset = self.obs.binset.value
self._assert_allclose(binset, self.obsref.binwave, rtol=thresh)
@pytest.mark.parametrize('thrutype', ['zero', 'nonzero'])
def test_band_thru(self, thrutype, thresh=0.01):
"""Test bandpass throughput, which is always between 0 and 1."""
wave = self.bpref.wave
thru = self.bp(wave).value
if thrutype == 'zero':
self._compare_zero(thru, self.bpref.throughput, thresh=thresh)
else: # nonzero
self._compare_nonzero(thru, self.bpref.throughput, thresh=thresh)
@pytest.mark.parametrize('fluxtype', ['zero', 'nonzero'])
def test_spec_flux(self, fluxtype, thresh=0.01):
"""Test flux for source spectrum in PHOTLAM."""
wave = self.spref.wave
flux = self.sp(wave).value
if fluxtype == 'zero':
self._compare_zero(flux, self.spref.flux, thresh=thresh)
else: # nonzero
self._compare_nonzero(flux, self.spref.flux, thresh=thresh)
@pytest.mark.parametrize('fluxtype', ['zero', 'nonzero'])
def test_obs_flux(self, fluxtype, thresh=0.01):
"""Test flux for observation in PHOTLAM."""
wave = self.obsref.wave
flux = self.obs(wave).value
# Native
if fluxtype == 'zero':
self._compare_zero(flux, self.obsref.flux, thresh=thresh)
else: # nonzero
self._compare_nonzero(flux, self.obsref.flux, thresh=thresh)
if not self._has_obswave: # Do not compare binned flux
return
# Binned (cannot be resampled)
binflux = self.obs.binflux.value
if fluxtype == 'zero':
self._compare_zero(binflux, self.obsref.binflux, thresh=thresh)
else: # nonzero
try:
self._compare_nonzero(binflux, self.obsref.binflux,
thresh=thresh)
except AssertionError as e:
if 'unit(' in self.spectrum:
pytest.xfail('Flat does not use default waveset anymore:\n'
f'{repr(e)}')
else:
raise
def test_countrate(self, thresh=0.01):
"""Test observation countrate calculations."""
ans = self.obsref.countrate()
# Astropy version does not assume a default area.
val = self.obs.countrate(conf.area).value
self._assert_allclose(val, ans, rtol=thresh)
def test_efflam(self, thresh=0.01):
"""Test observation effective wavelength."""
ans = self.obsref.efflam()
val = self.obs.effective_wavelength().value
self._assert_allclose(val, ans, rtol=thresh)
def teardown_class(self):
"""Reset config for both software."""
for cfgname in self.tables:
conf.reset(cfgname)
S.setref()
class ThermCase(CommCase):
"""Commissioning tests with thermal component."""
@pytest.mark.parametrize('fluxtype', ['zero', 'nonzero'])
def test_therm_spec(self, fluxtype, thresh=0.01):
"""Test bandpass thermal spectrum."""
thspref = self.bpref.obsmode.ThermalSpectrum()
thsp = self.bp.obsmode.thermal_spectrum()
# Make sure comparing same units
thspref.convert(thsp._internal_wave_unit.name)
thspref.convert(thsp._internal_flux_unit.name)
# waveset not expected to be same here, so just compare flux
flux = thsp(thspref.wave).value
if fluxtype == 'zero':
self._compare_zero(flux, thspref.flux, thresh=thresh)
else: # nonzero
# TODO: Is the refactored version really better?
try:
self._compare_nonzero(flux, thspref.flux, thresh=thresh)
except AssertionError:
pytest.xfail('New thermal spectrum samples better')
def test_thermback(self, thresh=0.01):
"""Test bandpass thermal background."""
ans = self.bpref.thermback()
val = self.bp.thermback().value
self._assert_allclose(val, ans, rtol=thresh)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 19:11:30 2019
@author: wenbin
"""
"""
实现一个数据结构,使其具有以下方法:压栈,弹栈,取栈顶元素,判断栈是否为空以及获取栈中元素个数.
链表实现stack
"""
class LNode:
def __init__(self , x = 0 , y = None):
self.Data = x
self.Next = y
class MyStack:
def __init__(self):
self.Data = None
self.Next = None
# 判断stack是否为空,如果为空返回true, 否则返回false
def empty(self):
if self.Next == None:
return True
else:
return False
# 获取栈中元素的个数
def size(self):
size = 0
p = self.Next
while p != None:
p = p.Next
size += 1
return size
# 入栈
def push(self , e):
p = LNode(x = e , y = self.Next)
self.Next = p
# 出栈
def pop(self):
tmp = self.Next
if tmp != None:
self.Next = tmp.Next
return tmp.Data
else:
print("stack has been empty!")
return None
# 取得栈顶元素
def top(self):
if self.Next != None:
return self.Next.Data
else:
print("Stack has been empty!")
return None
if __name__ == "__main__":
stack = MyStack()
stack.push(5)
stack.push(3)
print("栈顶元素为:" , stack.top())
print("栈大小为:" , stack.size())
x = stack.pop()
print("pop successfully! The element is : " , x)
x = stack.pop()
print("pop successfully! The element is : " , x)
x = stack.pop()
|
nilq/baby-python
|
python
|
# Digitar algorithm for plucked-string synthesis
# Demo with "Frere Jacques"
# Abe Karplus, 2016
import wave
import array
sampling = 48e3 # Hz
bpm = 100
notenames = {'C': 0, 'D': 2, 'E': 4, 'F': 5, 'G': 7, 'A': 9, 'B': 11}
def notepitch(n):
step = notenames[n[0]]
octind = 2
if n[1] == '#':
step += 1
elif n[1] == 'b':
step -= 1
else:
octind = 1
octv = int(n[octind:])
exp = 12*octv+step-57
return 440 * 2**(exp/12)
def lerp(tbl, phase):
whole, frac = phase >> 16, phase & 0xFFFF
x0 = tbl[whole]
x1 = tbl[(whole+1)&0xFF]
return ((x0 * ((1<<16)-frac)) + (x1 * frac))>>16
def randwords():
y = 2463534242
while True:
y ^= (y << 13) & 0xFFFFFFFF
y ^= (y >> 17)
y ^= (y << 5) & 0xFFFFFFFF
yield (y & 0xFFFF) - 32768
yield (y >> 16) - 32768
rw = randwords()
def pluck(note, dur):
out = []
tbl = [next(rw)//4 for n in range(256)]
phase = 0
pos = 0
inc = int(round(notepitch(note)*2**24/sampling))
for n in range(int(dur*sampling)):
tbl[pos] = (tbl[pos] + tbl[(pos-1)&0xFF])//2
pos += 1
pos &= 0xFF
out.append(lerp(tbl, phase))
phase += inc
phase &= 0xFFFFFF
return out
crochet = 60/bpm
song = []
songdur = 0.0
dampfrac = 1/8
def addnotes(notes, tm):
global songdur
for n in notes:
song.append((n, songdur, tm*(1-dampfrac), tm*dampfrac))
songdur += tm
def quarter(notes):
addnotes(notes, crochet)
def eighth(notes):
addnotes(notes, crochet/2)
def half(notes):
addnotes(notes, crochet*2)
quarter(['F3'])
quarter(['G3'])
quarter(['A3'])
quarter(['F3'])
quarter(['F3'])
quarter(['G3'])
quarter(['A3'])
quarter(['F3'])
quarter(['A3'])
quarter(['B3'])
half(['C4'])
quarter(['A3'])
quarter(['B3'])
half(['C4'])
eighth(['C4'])
eighth(['D4'])
eighth(['C4'])
eighth(['B3'])
quarter(['A3'])
quarter(['F3'])
eighth(['C4'])
eighth(['D4'])
eighth(['C4'])
eighth(['B3'])
quarter(['A3'])
quarter(['F3'])
quarter(['F3'])
quarter(['C3'])
half(['F3'])
quarter(['F3'])
quarter(['C3'])
half(['F3'])
with wave.open('pluck.wav', 'wb') as f:
f.setnchannels(1)
f.setsampwidth(2)
f.setframerate(sampling)
out = array.array('h', [0]*int(sampling*songdur))
for note, start, dur, damp in song:
buf = pluck(note, dur+damp)
for n in range(int(dur*sampling)):
out[n+int(start*sampling)] += buf[n]
for n in range(int(dur*sampling), int((dur+damp)*sampling)):
out[n+int(start*sampling)] += int(buf[n]*((dur+damp)*sampling-n)/(damp*sampling))
f.writeframes(array.array('h', out))
|
nilq/baby-python
|
python
|
from django.test import SimpleTestCase
from corehq.apps.app_manager.xpath import (
CaseSelectionXPath,
CaseTypeXpath,
LedgerdbXpath,
XPath,
)
class XPathTest(SimpleTestCase):
def test_paren(self):
xp = XPath('/data/q1')
self.assertEqual('/data/q1', xp.paren())
self.assertEqual('(/data/q1)', xp.paren(force=True))
self.assertEqual('(/data/q1)', XPath('/data/q1', compound=True).paren())
def test_slash(self):
self.assertEqual('/data/1/2', XPath().slash('/data').slash('1').slash('2'))
self.assertEqual('/data/1/2', XPath('/data').slash('1').slash('2'))
def test_select(self):
self.assertEqual("/data/1[anything]", XPath('/data/1').select_raw('anything'))
self.assertEqual("/data/1[a='b']", XPath('/data/1').select('a', 'b'))
self.assertEqual("/data/1[a=/data/b]", XPath('/data/1').select('a', XPath('/data/b')))
def test_count(self):
self.assertEqual('count(/data/a)', XPath('/data/a').count())
def test_eq_neq(self):
self.assertEqual('a = b', XPath('a').eq('b'))
self.assertEqual('a != b', XPath('a').neq('b'))
def test_if(self):
self.assertEqual('if(a, b, c)', XPath.if_('a', 'b', 'c'))
def test_and_or(self):
self.assertEqual('a and b and c', XPath.and_('a', 'b', 'c'))
self.assertEqual('a and (b and c)', XPath.and_('a', XPath.and_('b', 'c')))
self.assertEqual('a or b or c', XPath.or_('a', 'b', 'c'))
self.assertEqual('(a or b) or c', XPath.or_(XPath.or_('a', 'b'), XPath('c')))
def test_not(self):
self.assertEqual('not a', XPath.not_('a'))
self.assertEqual('not (a or b)', XPath.not_(XPath.or_('a', 'b')))
def test_date(self):
self.assertEqual('date(a)', XPath.date('a'))
def test_int(self):
self.assertEqual('int(a)', XPath.int('a'))
def test_complex(self):
xp = XPath.and_(
XPath('a').eq('1'),
XPath('b').neq(XPath.string('')),
XPath.or_(
XPath('c').eq(XPath.string('')),
XPath.date('d').neq('today()')
))
self.assertEqual("a = 1 and b != '' and (c = '' or date(d) != today())", xp)
class CaseSelectionXPathTests(SimpleTestCase):
def setUp(self):
self.select_by_water = CaseSelectionXPath("'black'")
self.select_by_water.selector = 'water'
def test_case(self):
self.assertEqual(
self.select_by_water.case(),
"instance('casedb')/casedb/case[water='black']"
)
def test_instance_name(self):
self.assertEqual(
self.select_by_water.case(instance_name='doobiedb'),
"instance('doobiedb')/doobiedb/case[water='black']"
)
def test_case_name(self):
self.assertEqual(
self.select_by_water.case(instance_name='doobiedb', case_name='song'),
"instance('doobiedb')/doobiedb/song[water='black']"
)
def test_case_type(self):
self.assertEqual(
CaseTypeXpath('song').case(),
"instance('casedb')/casedb/case[@case_type='song']"
)
def test_ledger(self):
self.assertEqual(
LedgerdbXpath('ledger_id').ledger(),
"instance('ledgerdb')/ledgerdb/ledger[@entity-id=instance('commcaresession')/session/data/ledger_id]"
)
|
nilq/baby-python
|
python
|
__version__ = "1.2.0"
from .utils import drawLandmark_multiple, detection_adapter, bbox_from_pts, Aligner
from .fast_alignment import *
from .face_detection import *
from .face_reconstruction import *
|
nilq/baby-python
|
python
|
import unittest
class TestTransition(unittest.TestCase):
@unittest.skip("")
def test___init__(self):
# transition = Transition(type_, element, nuclear_charge, charge, wavelength, temperature, density, pec)
assert False # TODO: implement your test here
@unittest.skip("")
def test_energy(self):
# transition = Transition(type_, element, nuclear_charge, charge, wavelength, temperature, density, pec)
# self.assertEqual(expected, transition.energy())
assert False # TODO: implement your test here
@unittest.skip("")
def test_interpolate(self):
# transition = Transition(type_, element, nuclear_charge, charge, wavelength, temperature, density, pec)
# self.assertEqual(expected, transition.interpolate(temperature_grid, density_grid))
assert False # TODO: implement your test here
class TestTransitionPool(unittest.TestCase):
@unittest.skip("")
def test___init__(self):
# transition_pool = TransitionPool(transitions)
assert False # TODO: implement your test here
@unittest.skip("")
def test___iter__(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.__iter__())
assert False # TODO: implement your test here
@unittest.skip("")
def test_append_file(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.append_file(filename))
assert False # TODO: implement your test here
@unittest.skip("")
def test_append_files(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.append_files(files))
assert False # TODO: implement your test here
@unittest.skip("")
def test_coeffs(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.coeffs())
assert False # TODO: implement your test here
@unittest.skip("")
def test_create_atomic_data(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.create_atomic_data(ad))
assert False # TODO: implement your test here
@unittest.skip("")
def test_energies(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.energies())
assert False # TODO: implement your test here
@unittest.skip("")
def test_filter_energy(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.filter_energy(lo, hi, unit))
assert False # TODO: implement your test here
@unittest.skip("")
def test_filter_type(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.filter_type(*type_names))
assert False # TODO: implement your test here
@unittest.skip("")
def test_from_adf15(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.from_adf15(files))
assert False # TODO: implement your test here
@unittest.skip("")
def test_interpolate(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.interpolate(temperature_grid, density_grid))
assert False # TODO: implement your test here
@unittest.skip("")
def test_size(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.size())
assert False # TODO: implement your test here
@unittest.skip("")
def test_sum_transitions(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.sum_transitions())
assert False # TODO: implement your test here
@unittest.skip("")
def test_wavelengths(self):
# transition_pool = TransitionPool(transitions)
# self.assertEqual(expected, transition_pool.wavelengths())
assert False # TODO: implement your test here
class TestPBremsstrahlung(unittest.TestCase):
@unittest.skip("")
def test_p_bremsstrahlung(self):
# self.assertEqual(expected, P_bremsstrahlung(k, Te, ne))
assert False # TODO: implement your test here
class TestCoefficientFactory(unittest.TestCase):
@unittest.skip("")
def test___init__(self):
# coefficient_factory = CoefficientFactory(atomic_data, transition_pool, clip_limit)
assert False # TODO: implement your test here
@unittest.skip("")
def test_create(self):
# coefficient_factory = CoefficientFactory(atomic_data, transition_pool, clip_limit)
# self.assertEqual(expected, coefficient_factory.create(temperature_grid, density_grid))
assert False # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from django.test import TestCase
from meadow.models import Book
from meadow.tests.factories.book import BookFactory
from meadow.utils.book_searcher import book_preview, search_by_title
class BookPreviewTestCase(TestCase):
def test_book_preview_book_exists(self):
some_book = BookFactory()
result = book_preview(some_book.id)
self.assertEqual(result["title"], some_book.title)
self.assertEqual(result["description"], some_book.description)
self.assertEqual(result["author"]["first_name"], some_book.author.first_name)
self.assertEqual(result["author"]["last_name"], some_book.author.last_name)
def test_book_preview_book_doesnot_exist(self):
some_book = BookFactory()
# there is definitely no book with invalid_id in the DB
invalid_id = some_book.id + 1
# the function should raise an exception if the id is invalid
with self.assertRaises(Book.DoesNotExist):
book_preview(invalid_id)
class BookSearchTestCase(TestCase):
def test_search_empty_title(self):
books = [BookFactory() for _ in range(5)]
title = ""
result = search_by_title(title)
self.assertEqual(len(books), len(result))
def test_search_some_unique_title(self):
books = [BookFactory() for _ in range(5)]
book_to_search = books[1]
title = book_to_search.title
result = search_by_title(title)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].title, title)
def test_search_title_doesnot_exist(self):
[BookFactory() for _ in range(5)]
title = "Some cook title which doesn't exist in DB"
result = search_by_title(title)
self.assertEqual(result, [])
|
nilq/baby-python
|
python
|
def f():
pass
a = f()
b = f()
c = f()
str
|
nilq/baby-python
|
python
|
from itertools import islice
from queue import Queue
from typing import Iterator
import numpy as np
def limited_queue_iterator(queue: Queue, max_num_elements: int) -> Iterator:
"""Construct an iterator from a queue. The iterator will stop after max_num_elements."""
for _ in range(max_num_elements):
yield queue.get()
def sampled_iterator(input_iter, num_elements: int, sampling_rate: float):
if sampling_rate == 1.0:
yield from islice(input_iter, num_elements)
else:
num_taken = 0
for element in input_iter:
if np.random.rand() < sampling_rate:
yield element
num_taken += 1
if num_taken >= num_elements:
break
|
nilq/baby-python
|
python
|
"""
This module illustrates code that accepts a single integer, a character, and an
uppercase flag as positional arguments and print this character 'n' amount of
times. If the uppercase flag is set to true, it prints uppercased.
"""
import argparse
def main(character, number):
print (character * number)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('number', type=int, help='A number')
parser.add_argument('-c', type=str,
help='Character to print (defaults to #)', default='#')
parser.add_argument('-U', action='store_true', default=False,
dest='uppercase', help='Uppercase the character (defaults to False)')
args = parser.parse_args()
if args.uppercase:
args.c = args.c.upper()
main(args.c, args.number)
|
nilq/baby-python
|
python
|
"""
Export module
"""
import os
import os.path
import sqlite3
import sys
import regex as re
# pylint: disable=E0611
# Defined at runtime
from .index import Index
class Export:
"""
Exports database rows into a text file line-by-line.
"""
@staticmethod
def stream(dbfile, output):
"""
Iterates over each row in dbfile and writes text to output file
Args:
dbfile: SQLite file to read
output: output file to store text
"""
with open(output, "w", encoding="utf-8") as out:
# Connection to database file
db = sqlite3.connect(dbfile)
cur = db.cursor()
# Get all indexed text
cur.execute(Index.SECTION_QUERY)
count = 0
for _, name, text in cur:
if not name or not re.search(Index.SECTION_FILTER, name.lower()):
count += 1
if count % 1000 == 0:
print(f"Streamed {count} documents", end="\r")
# Write row
if text:
out.write(text + "\n")
print(f"Iterated over {count} total rows")
# Free database resources
db.close()
@staticmethod
def run(output, path):
"""
Exports data from database to text file, line by line.
Args:
output: output file path
path: model path, if None uses default path
"""
# Derive path to dbfile
dbfile = os.path.join(path, "articles.sqlite")
# Stream text from database to file
Export.stream(dbfile, output)
if __name__ == "__main__":
# Export data
Export.run(sys.argv[1], sys.argv[2] if len(sys.argv) > 2 else None)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import argparse
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
try:
import comet_ml
use_tensorboard = False
except ImportError:
use_tensorboard = True
import datasets
import numpy as np
import torch
import transformers
from datasets import concatenate_datasets, load_dataset, load_metric
from scipy.stats import entropy
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: Optional[str] = field(
default=None,
metadata={
"help": "The name of the task to train on: "
+ ", ".join(task_to_keys.keys())
},
)
dataset_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the dataset to use (via the datasets library)."},
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": "The configuration name of the dataset to use (via the datasets library)."
},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached preprocessed datasets or not."},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
train_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the training data."},
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the validation data."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the test data."},
)
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys.keys():
raise ValueError(
"Unknown task, you should pick one in "
+ ",".join(task_to_keys.keys())
)
elif self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError(
"Need either a GLUE task, a training/validation file or a dataset name."
)
else:
train_extension = self.train_file.split(".")[-1]
assert train_extension in [
"csv",
"json",
], "`train_file` should be a csv or a json file."
validation_extension = self.validation_file.split(".")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={
"help": "Path to pretrained model or model identifier from huggingface.co/models"
}
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name"
},
)
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models downloaded from huggingface.co"
},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={
"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."
},
)
model_revision: str = field(
default="main",
metadata={
"help": "The specific model version to use (can be a branch name, tag name or commit id)."
},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
def _train(raw_datasets, args_dict=None):
"""Reference: https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py"""
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if args_dict is not None:
model_args, data_args, training_args = parser.parse_dict(args_dict)
elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if (
os.path.isdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif (
last_checkpoint is not None and training_args.resume_from_checkpoint is None
):
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in [
"float32",
"float64",
]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name
if model_args.config_name
else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the raw_datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [
name for name in raw_datasets["train"].column_names if name != "label"
]
if (
"sentence1" in non_label_column_names
and "sentence2" in non_label_column_names
):
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {
i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)
}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in config.label2id.items()}
elif data_args.task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in config.label2id.items()}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],)
if sentence2_key is None
else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(
*args, padding=padding, max_length=max_seq_length, truncation=True
)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [
(label_to_id[l] if l != -1 else -1) for l in examples["label"]
]
return result
with training_args.main_process_first(desc="dataset map pre-processing"):
raw_datasets = raw_datasets.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if (
"validation" not in raw_datasets
and "validation_matched" not in raw_datasets
):
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets[
"validation_matched" if data_args.task_name == "mnli" else "validation"
]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if (
training_args.do_predict
or data_args.task_name is not None
or data_args.test_file is not None
):
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets[
"test_matched" if data_args.task_name == "mnli" else "test"
]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(
range(data_args.max_predict_samples)
)
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
else:
metric = load_metric("accuracy")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
max_train_samples = (
data_args.max_train_samples
if data_args.max_train_samples is not None
else len(train_dataset)
)
metrics_prefix = f"train_size_{min(max_train_samples, len(train_dataset))}_4e_all"
if trainer.is_world_process_zero() and not use_tensorboard:
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment.set_name(metrics_prefix)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics(metrics_prefix + "_train_metrics", metrics)
trainer.save_metrics(metrics_prefix + "_train_metrics", metrics)
trainer.save_state()
# Evaluation
evaluation_metrics = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(raw_datasets["validation_mismatched"])
for eval_dataset, task in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (
data_args.max_eval_samples
if data_args.max_eval_samples is not None
else len(eval_dataset)
)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics(metrics_prefix + "eval_metrics", metrics)
trainer.save_metrics(metrics_prefix + "eval_metrics", metrics)
evaluation_metrics = metrics
test_predictions = None
if training_args.do_predict:
logger.info("*** Predict ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
predict_datasets = [predict_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
predict_datasets.append(raw_datasets["test_mismatched"])
for predict_dataset, task in zip(predict_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
predict_dataset = predict_dataset.remove_columns("label")
test_predictions = trainer.predict(
predict_dataset, metric_key_prefix=metrics_prefix + "_predict_metrics"
).predictions
return evaluation_metrics, test_predictions
def run_on_all_train_set(hf_args, raw_datasets):
evaluation_metrics, _ = _train(raw_datasets, args_dict=hf_args)
def _calculate_entropy(logits):
probas = torch.nn.Softmax(dim=1)(torch.from_numpy(logits))
samples_entropy = entropy(probas.transpose(0, 1).cpu())
samples_entropy = torch.from_numpy(samples_entropy)
return samples_entropy
def _ask_oracle(unlabled_samples):
# In our example, the original dataset is already labeled
# However, in case where you have an unlabled dataset, here is where you send the data to a labeling platform
return unlabled_samples
def run_active_learning(
hf_args, raw_datasets, target_score, initial_train_dataset_size, query_samples_count
):
original_train_dataset = raw_datasets["train"]
train_dataset = original_train_dataset.select(
random.sample(
range(original_train_dataset.num_rows),
int(original_train_dataset.num_rows * initial_train_dataset_size),
)
)
# fake unlabled dataset
unlabeled_dataset = original_train_dataset.filter(
lambda s: s["idx"] not in train_dataset["idx"]
)
raw_datasets["train"] = train_dataset
raw_datasets["test"] = unlabeled_dataset
hf_args["do_predict"] = True
current_score = -1
while unlabeled_dataset.num_rows > 0 and current_score < target_score:
logger.info(f'Training using {raw_datasets["train"].num_rows}')
evaluation_metrics, test_predictions = _train(raw_datasets, args_dict=hf_args)
current_score = evaluation_metrics["eval_combined_score"]
samples_entropy = _calculate_entropy(test_predictions)
samples_entropy = torch.topk(samples_entropy, query_samples_count)
new_train_samples = unlabeled_dataset.select(samples_entropy.indices.tolist())
new_train_samples = _ask_oracle(new_train_samples)
extended_train_dataset = concatenate_datasets(
[raw_datasets["train"], new_train_samples],
info=original_train_dataset.info,
)
unlabeled_dataset = original_train_dataset.filter(
lambda s: s["idx"] not in extended_train_dataset["idx"]
)
raw_datasets["train"] = extended_train_dataset
raw_datasets["test"] = unlabeled_dataset
def main(
task_name,
do_al,
random_seed,
target_score=None,
initial_train_dataset_size=None,
query_samples_count=None,
epochs=3,
batch_size=32,
):
random.seed(random_seed)
if use_tensorboard:
hf_args.update(
{
"logging_dir": f"/tmp/{task_name}/tensorboard",
"report_to": "tensorboard",
}
)
raw_datasets = load_dataset("glue", task_name)
hf_args = {
"model_name_or_path": "bert-base-cased",
"task_name": task_name,
"do_train": True,
"do_eval": True,
"max_seq_length": 128,
"per_device_train_batch_size": batch_size,
"per_device_eval_batch_size": batch_size,
"learning_rate": 2e-5,
"overwrite_output_dir": True,
"output_dir": f"/tmp/{task_name}/",
"logging_strategy": "steps",
"logging_steps": 50,
"evaluation_strategy": "steps",
"eval_steps": 50,
"seed": 12,
"max_steps": int((raw_datasets["train"].num_rows / batch_size) * epochs),
}
# Using max_steps instead of epochs so that all active learning experiment run
# number of iterations
if not do_al:
run_on_all_train_set(hf_args, raw_datasets)
else:
run_active_learning(
hf_args,
raw_datasets,
target_score,
initial_train_dataset_size,
query_samples_count,
)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--do_al", action="store_true", default=False)
parser.add_argument("--target_score", type=float, default=0.8568075117370892)
parser.add_argument("--task_name", type=str, default="mrpc")
parser.add_argument("--random_seed", type=int, default=123)
parser.add_argument("--initial_train_dataset_size", type=float, default=0.3)
parser.add_argument("--query_samples_count", type=int, default=256)
return parser.parse_args()
if __name__ == "__main__":
args = _parse_args()
main(
args.task_name,
args.do_al,
args.random_seed,
target_score=args.target_score,
initial_train_dataset_size=args.initial_train_dataset_size,
query_samples_count=args.query_samples_count,
)
|
nilq/baby-python
|
python
|
import os
import shutil
import tempfile
import ply.yacc as yacc
import sympy
from . import _node as node
from ._qasmerror import QasmError
from ._qasmlexer import QasmLexer
class QasmParser(object):
pass
def __init__(self, filename):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def update_symtab(self, obj):
pass
def verify_declared_bit(self, obj):
pass
def verify_bit_list(self, obj):
pass
def verify_exp_list(self, obj):
pass
def verify_as_gate(self, obj, bitlist, arglist=None):
pass
def verify_reg(self, obj, object_type):
pass
def verify_reg_list(self, obj, object_type):
pass
def id_tuple_list(self, id_node):
pass
def verify_distinct(self, list_of_nodes):
pass
def pop_scope(self):
pass
def push_scope(self):
pass
def p_main(self, program):
pass
def p_program_0(self, program):
pass
def p_program_1(self, program):
pass
def p_statement(self, program):
pass
def p_format(self, program):
pass
def p_format_0(self, program):
pass
def p_id(self, program):
pass
def p_id_e(self, program):
pass
def p_indexed_id(self, program):
pass
def p_primary(self, program):
pass
def p_id_list_0(self, program):
pass
def p_id_list_1(self, program):
pass
def p_gate_id_list_0(self, program):
pass
def p_gate_id_list_1(self, program):
pass
def p_bit_list_0(self, program):
pass
def p_bit_list_1(self, program):
pass
def p_primary_list_0(self, program):
pass
def p_primary_list_1(self, program):
pass
def p_decl(self, program):
pass
def p_qreg_decl(self, program):
pass
def p_qreg_decl_e(self, program):
pass
def p_creg_decl(self, program):
pass
def p_creg_decl_e(self, program):
pass
def p_gate_decl_0(self, program):
pass
def p_gate_decl_1(self, program):
pass
def p_gate_decl_2(self, program):
pass
def p_gate_scope(self, program):
pass
def p_gate_body_0(self, program):
pass
def p_gate_body_1(self, program):
pass
def p_gate_op_list_0(self, program):
pass
def p_gate_op_list_1(self, program):
pass
def p_unitary_op_0(self, program):
pass
def p_unitary_op_1(self, program):
pass
def p_unitary_op_2(self, program):
pass
def p_unitary_op_3(self, program):
pass
def p_unitary_op_4(self, program):
pass
def p_gate_op_0(self, program):
pass
def p_gate_op_0e1(self, p):
pass
def p_gate_op_0e2(self, program):
pass
def p_gate_op_1(self, program):
pass
def p_gate_op_1e1(self, program):
pass
def p_gate_op_1e2(self, program):
pass
def p_gate_op_2(self, program):
pass
def p_gate_op_2e(self, program):
pass
def p_gate_op_3(self, program):
pass
def p_gate_op_4(self, program):
pass
def p_gate_op_4e0(self, program):
pass
def p_gate_op_4e1(self, program):
pass
def p_gate_op_5(self, program):
pass
def p_gate_op_5e(self, program):
pass
def p_opaque_0(self, program):
pass
def p_opaque_1(self, program):
pass
def p_opaque_2(self, program):
pass
def p_opaque_1e(self, program):
pass
def p_measure(self, program):
pass
def p_measure_e(self, program):
pass
def p_barrier(self, program):
pass
def p_reset(self, program):
pass
def p_if(self, program):
pass
def p_quantum_op(self, program):
pass
def p_unary_0(self, program):
pass
def p_unary_1(self, program):
pass
def p_unary_2(self, program):
pass
def p_unary_3(self, program):
pass
def p_unary_4(self, program):
pass
def p_unary_6(self, program):
pass
def p_expression_1(self, program):
pass
def p_expression_0(self, program):
pass
def p_expression_2(self, program):
pass
def p_exp_list_0(self, program):
pass
def p_exp_list_1(self, program):
pass
def p_ignore(self, program):
pass
def p_error(self, program):
pass
def find_column(self, input_, token):
pass
def get_tokens(self):
pass
def parse_debug(self, val):
pass
def parse(self, data):
pass
def print_tree(self):
pass
def run(self, data):
pass
|
nilq/baby-python
|
python
|
from django.db import DatabaseError
from django.test import TestCase
from app.models import BigInteger
class BigIntegerTests(TestCase):
def setUp(self):
self.int0_id = BigInteger.objects.create(big_integer=0).id
self.int1_id = BigInteger.objects.create(big_integer=1111).id
def test_create_integer(self):
int0 = BigInteger.objects.get(id=self.int0_id)
int1 = BigInteger.objects.get(id=self.int1_id)
self.assertEqual(int0.big_integer, 0)
self.assertEqual(int1.big_integer, 1111)
self.assertLess(int0.big_integer, int1.big_integer)
self.assertGreater(int1.big_integer, int0.big_integer)
def test_extremal_values(self):
int_biggest = BigInteger.objects.create(big_integer=18446744073709551615)
self.assertEqual(int_biggest.big_integer, 18446744073709551615)
int_smallest = BigInteger.objects.create(big_integer=-9223372036854775808)
self.assertEqual(int_smallest.big_integer, -9223372036854775808)
self.assertLess(int_smallest.big_integer, int_biggest.big_integer)
with self.assertRaises(ValueError):
BigInteger.objects.create(big_integer=18446744073709551616)
with self.assertRaises(ValueError):
BigInteger.objects.create(big_integer=-9223372036854776840)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'burndown.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_CWidgetBurndown(object):
def setupUi(self, CWidgetBurndown):
CWidgetBurndown.setObjectName("CWidgetBurndown")
CWidgetBurndown.resize(763, 549)
CWidgetBurndown.setStyleSheet("background-color: rgb(17, 149, 189);")
self.gridLayout_2 = QtWidgets.QGridLayout(CWidgetBurndown)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox = QtWidgets.QGroupBox(CWidgetBurndown)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMinimumSize(QtCore.QSize(500, 500))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_projectName = QtWidgets.QLabel(self.groupBox)
self.label_projectName.setMinimumSize(QtCore.QSize(0, 30))
self.label_projectName.setMaximumSize(QtCore.QSize(16777215, 30))
self.label_projectName.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-radius:5px;")
self.label_projectName.setObjectName("label_projectName")
self.horizontalLayout_2.addWidget(self.label_projectName)
self.label_sprintNo = QtWidgets.QLabel(self.groupBox)
self.label_sprintNo.setMaximumSize(QtCore.QSize(16777215, 30))
self.label_sprintNo.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-radius:5px;")
self.label_sprintNo.setObjectName("label_sprintNo")
self.horizontalLayout_2.addWidget(self.label_sprintNo)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.gridLayout.addLayout(self.horizontalLayout_2, 0, 0, 1, 1)
self.widget = QtWidgets.QWidget(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMinimumSize(QtCore.QSize(100, 100))
self.widget.setObjectName("widget")
self.gridLayout.addWidget(self.widget, 1, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtWidgets.QSpacerItem(141, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btnGetback = QtWidgets.QPushButton(self.groupBox)
self.btnGetback.setMinimumSize(QtCore.QSize(100, 30))
self.btnGetback.setStyleSheet("QPushButton{\n"
"border:1px solid black;\n"
"border-radius:4px;\n"
"padding:4px;\n"
"}\n"
"QPushButton::hover{\n"
"border:1px solid black;\n"
"border-radius:4px;\n"
"background-color:lightgray;\n"
"padding:4px\n"
"}\n"
"QPushButton::pressed{\n"
"border:1px solid black;\n"
"border-radius:4px;\n"
"background-color:gray;\n"
"padding:4px\n"
"}")
self.btnGetback.setObjectName("btnGetback")
self.horizontalLayout.addWidget(self.btnGetback)
self.gridLayout.addLayout(self.horizontalLayout, 2, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBox, 0, 1, 2, 1)
spacerItem2 = QtWidgets.QSpacerItem(223, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem2, 0, 2, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem3, 1, 0, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(17, 80, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem4, 2, 1, 1, 1)
self.retranslateUi(CWidgetBurndown)
QtCore.QMetaObject.connectSlotsByName(CWidgetBurndown)
def retranslateUi(self, CWidgetBurndown):
_translate = QtCore.QCoreApplication.translate
CWidgetBurndown.setWindowTitle(_translate("CWidgetBurndown", "Form"))
self.label_projectName.setText(_translate("CWidgetBurndown", "项目名称"))
self.label_sprintNo.setText(_translate("CWidgetBurndown", "迭代1"))
self.btnGetback.setText(_translate("CWidgetBurndown", "return"))
import ks24_03_rc
|
nilq/baby-python
|
python
|
import unittest
from mock import MagicMock
from abeja.datasets.dataset import Dataset, Datasets
from abeja.datasets.dataset_item import DatasetItems
class TestDataset(unittest.TestCase):
def setUp(self):
self.organization_id = '1234567890120'
self.dataset_id = '1234567890121'
self.dataset_item_id = '1234567890122'
self.name = 'test dataset'
self.type = 'detection'
self.props = {
"categories": [
{
"id": 1,
"name": "犬"
},
{
"id": 2,
"name": "猫"
}
],
"id": 0,
"name": "test dog or cat"
}
self.total_count = 3670
self.source_data = [
{
'data_type': 'image/jpeg',
'data_uri': 'datalake://1200123803688/20170815T044617-f20dde80-1e3b-4496-bc06-1b63b026b872',
'height': 500,
'width': 200}]
self.attributes = {
'classification': {
'id': 1,
'label': '犬'
},
'custom': {
'anything': 'something'
},
'detection': [
{
'id': 2,
'label': '猫',
'rect': [795, 118, 1143, 418]
}
]
}
def _build_dataset_response(self):
return {
"organization_id": self.organization_id,
"dataset_id": self.dataset_id,
"name": self.name,
"props": self.props,
"total_count": self.total_count,
"type": self.type,
"created_at": "2017-01-01T00:00:00.000000",
"updated_at": "2017-01-01T00:00:00.000000"
}
def test_init(self):
dataset = Dataset(
None,
self.organization_id,
self.dataset_id,
name=self.name,
type=self.type,
props=self.props,
total_count=self.total_count)
self.assertEqual(dataset.organization_id, self.organization_id)
self.assertEqual(dataset.dataset_id, self.dataset_id)
self.assertEqual(dataset.name, self.name)
self.assertEqual(dataset.type, self.type)
self.assertEqual(dataset.props, self.props)
self.assertEqual(dataset.total_count, self.total_count)
self.assertIsInstance(dataset.dataset_items, DatasetItems)
def test_skip_unrecognized_arguments(self):
# make sure constructor can ignore unknown parameters because API
# response can change any time
dataset = Dataset(None, self.organization_id, self.dataset_id,
name=self.name, type=self.type, props=self.props,
total_count=self.total_count,
____undefined='____undefined')
self.assertEqual(dataset.organization_id, self.organization_id)
self.assertEqual(dataset.dataset_id, self.dataset_id)
self.assertEqual(dataset.name, self.name)
self.assertEqual(dataset.type, self.type)
self.assertEqual(dataset.props, self.props)
self.assertEqual(dataset.total_count, self.total_count)
self.assertIsInstance(dataset.dataset_items, DatasetItems)
class TestDatasets(unittest.TestCase):
def setUp(self):
self.organization_id = '1234567890120'
self.dataset_id = '1234567890121'
self.dataset_item_id = '1234567890122'
self.name = 'test dataset'
self.type = 'detection'
self.props = {
"categories": [
{
"id": 1,
"name": "犬"
},
{
"id": 2,
"name": "猫"
}
],
"id": 0,
"name": "test dog or cat"
}
self.total_count = 3670
self.source_data = {
'data_type': 'image/jpeg',
'data_uri': 'datalake://1200123803688/20170815T044617-f20dde80-1e3b-4496-bc06-1b63b026b872',
'height': 500,
'width': 200}
self.attributes = {
'classification': {
'id': 1,
'label': '犬'
},
'custom': {
'anything': 'something'
},
'detection': [
{
'id': 2,
'label': '猫',
'rect': [795, 118, 1143, 418]
}
]
}
def _build_dataset_response(self):
return {
"organization_id": self.organization_id,
"dataset_id": self.dataset_id,
"name": self.name,
"props": self.props,
"total_count": self.total_count,
"type": self.type,
"created_at": "2017-01-01T00:00:00.000000",
"updated_at": "2017-01-01T00:00:00.000000"
}
def _build_dataset_item_response(self):
return {
'dataset_id': self.dataset_id,
'dataset_item_id': self.dataset_item_id,
'source_data': self.source_data,
'attributes': self.attributes,
'created_at': '2017-01-01T00:00:00.000000',
'updated_at': '2017-01-01T00:00:00.000000'
}
def _build_dataset_items_response(self):
return {
'items': [
self._build_dataset_item_response()
],
'next_page_token': 'dummy page token'
}
def test_create(self):
mock_api = MagicMock()
mock_api.create_dataset.return_value = self._build_dataset_response()
datasets = Datasets(mock_api, self.organization_id)
dataset = datasets.create(self.name, self.type, self.props)
self.assertIsInstance(dataset, Dataset)
self.assertEqual(dataset.dataset_id, self.dataset_id)
self.assertEqual(dataset.name, self.name)
self.assertEqual(dataset.type, self.type)
self.assertDictEqual(dataset.props, self.props)
self.assertEqual(dataset.total_count, self.total_count)
self.assertIsInstance(dataset.dataset_items, DatasetItems)
mock_api.create_dataset.assert_called_once()
def test_get(self):
mock_api = MagicMock()
mock_api.get_dataset.return_value = self._build_dataset_response()
datasets = Datasets(mock_api, self.organization_id)
dataset = datasets.get(self.dataset_id)
self.assertIsInstance(dataset, Dataset)
self.assertEqual(dataset.dataset_id, self.dataset_id)
self.assertEqual(dataset.name, self.name)
self.assertEqual(dataset.type, self.type)
self.assertDictEqual(dataset.props, self.props)
self.assertEqual(dataset.total_count, self.total_count)
self.assertIsInstance(dataset.dataset_items, DatasetItems)
mock_api.get_dataset.assert_called_once()
def test_list(self):
mock_api = MagicMock()
mock_api.list_datasets.return_value = [self._build_dataset_response()]
datasets = Datasets(mock_api, self.organization_id)
_datasets = datasets.list()
dataset = _datasets[0]
self.assertIsInstance(dataset, Dataset)
self.assertEqual(dataset.dataset_id, self.dataset_id)
self.assertEqual(dataset.name, self.name)
self.assertEqual(dataset.type, self.type)
self.assertDictEqual(dataset.props, self.props)
self.assertEqual(dataset.total_count, self.total_count)
self.assertIsInstance(dataset.dataset_items, DatasetItems)
mock_api.list_datasets.assert_called_once()
def test_delete(self):
mock_api = MagicMock()
mock_api.delete_dataset.return_value = self._build_dataset_response()
datasets = Datasets(mock_api, self.organization_id)
dataset = datasets.delete(self.dataset_id)
self.assertIsInstance(dataset, Dataset)
self.assertEqual(dataset.dataset_id, self.dataset_id)
self.assertEqual(dataset.name, self.name)
self.assertEqual(dataset.type, self.type)
self.assertDictEqual(dataset.props, self.props)
self.assertEqual(dataset.total_count, self.total_count)
self.assertIsInstance(dataset.dataset_items, DatasetItems)
mock_api.delete_dataset.assert_called_once()
|
nilq/baby-python
|
python
|
model.add(Dense(total_words, activation='softmax'))
|
nilq/baby-python
|
python
|
from django.urls import path, include
from snippets import views
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
path('pure/snippets/', views.pure_snippet_list),
path('pure/snippets/<int:pk>/', views.pure_snippet_detail),
path('func/snippets/', views.func_api_view_snippet_list),
path('func/snippets/<int:pk>', views.func_api_view_snippet_detail),
path('class/snippets/', views.ClassSnippetList.as_view()),
path('class/snippets/<int:pk>/', views.ClassSnippetDetail.as_view()),
path('users/', views.UserList.as_view()),
path('users/<int:pk>/', views.UserDetail.as_view()),
]
urlpatterns += [
path('api-auth/', include('rest_framework.urls')),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
nilq/baby-python
|
python
|
import logging
import sys
from pathlib import Path
import yaml
def get_version() -> str:
"""Checks _version.py or build metadata for package version.
Returns:
str: Version string.
"""
try:
from ._version import version
return version
except ModuleNotFoundError:
logging.debug("No _version.py found")
# importlib is only available on Python 3.8+
if sys.version_info >= (3, 8):
# pylint: disable=no-member
import importlib.metadata
try:
return importlib.metadata.version("dbt-metabase")
except importlib.metadata.PackageNotFoundError:
logging.warning("No version found in metadata")
return "0.0.0-UNKONWN"
def load_config() -> dict:
config_data = {}
config_path = Path.home() / ".dbt-metabase"
if (config_path / "config.yml").exists():
with open(config_path / "config.yml", "r", encoding="utf-8") as f:
config_data = yaml.safe_load(f).get("config", {})
elif (config_path / "config.yaml").exists():
with open(config_path / "config.yaml", "r", encoding="utf-8") as f:
config_data = yaml.safe_load(f).get("config", {})
return config_data
|
nilq/baby-python
|
python
|
from discord.ext import commands
import asyncio
import discord
class Vcwhite(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
#通知の対象としたいチャンネルidを入力
allow_01 = self.bot.get_channel(762575797327757322) #犬
allow_02 = self.bot.get_channel(762576631810228256) #猫
allow_03 = self.bot.get_channel(780611246155497482) #亀
allow_04 = self.bot.get_channel(812312211112198144) #恐竜
#対象チャンネルかつlengthが1の場合メッセージを送る。
if after.channel in [allow_01, allow_02, allow_03, allow_04]:
if before.channel is None and after.channel and len(after.channel.members) == 1:
#メッセージを送るテキストチャンネルID
channel_id = 822096585429090324
text_channel = self.bot.get_channel(channel_id)
await text_channel.send(f"**{member.display_name}** が **{after.channel.name}** をはじめました!")
else:
pass
def setup(bot):
bot.add_cog(Vcwhite(bot))
|
nilq/baby-python
|
python
|
import unittest
from biolinkml.generators.pythongen import PythonGenerator
from tests.test_issues.environment import env
from tests.utils.python_comparator import validate_python
class Issue39UnitTest(unittest.TestCase):
@unittest.skip("issue_38.yaml clinical profile conflicts with latest Biolink Model")
def test_python_import(self):
""" Import generates for biolink-model """
python = PythonGenerator(env.input_path('issue_38.yaml'),
importmap=env.input_path('biolink-model-importmap.json')).serialize()
msg = validate_python(python, expected_path=env.expected_path('foo.py'))
if msg:
self.fail(msg)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
from ....lib.aio import alru_cache
from ....utils import serialize_serializable, deserialize_serializable, extensible
from ...web import web_api, MarsServiceWebAPIHandler, MarsWebAPIClientMixin
from .core import AbstractMetaAPI
class MetaWebAPIHandler(MarsServiceWebAPIHandler):
_root_pattern = '/api/session/(?P<session_id>[^/]+)/meta'
@alru_cache(cache_exceptions=False)
async def _get_cluster_api(self):
from ...cluster import ClusterAPI
return await ClusterAPI.create(self._supervisor_addr)
@alru_cache(cache_exceptions=False)
async def _get_oscar_meta_api(self, session_id: str):
from .oscar import MetaAPI
cluster_api = await self._get_cluster_api()
[address] = await cluster_api.get_supervisors_by_keys([session_id])
return await MetaAPI.create(session_id, address)
@web_api('(?P<data_key>[^/]+)', method='get')
async def get_chunk_meta(self, session_id: str, data_key: str):
fields_str = self.get_argument('fields', None)
error = self.get_argument('error', 'raise')
fields = fields_str.split(',') if fields_str else None
oscar_api = await self._get_oscar_meta_api(session_id)
result = await oscar_api.get_chunk_meta(data_key, fields=fields, error=error)
self.write(serialize_serializable(result))
web_handlers = {
MetaWebAPIHandler.get_root_pattern(): MetaWebAPIHandler
}
class WebMetaAPI(AbstractMetaAPI, MarsWebAPIClientMixin):
def __init__(self, session_id: str, address: str):
self._session_id = session_id
self._address = address.rstrip('/')
@extensible
async def get_chunk_meta(self,
object_id: str,
fields: List[str] = None,
error: str = 'raise') -> Optional[Dict]:
req_addr = f'{self._address}/api/session/{self._session_id}/meta/{object_id}' \
f'?error={error}'
if fields:
req_addr += '&fields=' + ','.join(fields)
res = await self._request_url(req_addr)
return deserialize_serializable(res.body)
|
nilq/baby-python
|
python
|
"""Module for representing, moving, shifting, stretching plotting and otherwise
manipulating line segments in a convenient fashion.
Caleb Levy, 2015.
"""
import numpy as np
from .coordinates import Point, Coordinates
__all__ = ["Line"]
def parabola(sep, h, cut_short=0., n=100):
""" Return the array of x + 1j*f(x) sampled at n evenly spaced points on
the interval [cut_short, sep - cut_short], where f(x) is a parabola
satisfying f(0)=f(sep)=0 and f(sep/2)=h.
Used to construct curved arrows pointing between nodes of a graph. """
k = sep/2.
x_s = cut_short - k
x_f = k - cut_short
x = np.linspace(x_s, x_f, n)
f = -h/(1.*k**2) * (x + k) * (x - k)
z = x + 1j*f
return Coordinates(z + k)
class Line(object):
""" Line segment between two points. May be directed or undirected.
Internally represented as an ordered tuple of points in the complex plane,
(z1, z2); z1 is the tail, z2 is the head. This format is more convenient
for many purposes. """
def __init__(self, p1, p2):
"""input may be two complex numbers, or tuples (x1, y1), (x2, y2). """
self.p1 = Point(p1)
self.p2 = Point(p2)
def __repr__(self):
return self.__class__.__name__+'(p1=%s, p2=%s)' % (self.p1, self.p2)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.p1 == other.p1 and self.p2 == other.p2
return False
def __ne__(self, other):
return not self == other
@property
def vector(self):
"""Displacement vector from beginning of segment to end"""
return self.p2 - self.p1
@property
def length(self):
return self.vector.r
# Slope-intercept form: y = m*x + b
@property
def m(self):
"""Return the slope fo the line segment"""
rise = self.p2.y - self.p1.y
run = self.p2.x - self.p1.x
if run:
return 1.*rise/run
if rise:
return rise/abs(rise)*float('inf')
raise ZeroDivisionError
@property
def b(self):
"""Return the y-intercept of the extended line segment"""
return self.p1.y - self.m * self.p1.x
@property
def midpoint(self):
return (self.p1 + self.p2)/2
@property
def coordinates(self):
return Coordinates([self.p1, self.p2])
def bisecting_line(self):
"""Return a perpendicular line segment with overlapping midpoint"""
lc = self.coordinates
lc.rotate(angle=np.pi/2, origin=self.midpoint)
return self.__class__(lc[0], lc[1])
def projection(self, p):
"""Return projection of point p onto the extended segment"""
if self.length == 0: # p1 == p2 case
return self.p1
# Consider the line extending the segment, parameterized as v+t*(w-v).
# We find projection of point p onto the line.
# It falls where t = [(p-v) . (w-v)] / |w-v|^2
t = -(self.p1 - p)*(self.vector)/self.length**2
if t < 0:
return self.p1 # Beyond the 'p1' end of the segment
elif t > 1:
return self.p2 # Beyond the 'p2' end of the segment
return self.p1 + t*(self.vector)
def shorten(self, r):
"""Shorten the line segment by r/2 on each side."""
self.p1 = self.p1 + Point.from_polar(r/2., self.vector.theta)
self.p2 = self.p2 + Point.from_polar(r/2., self.vector.theta - np.pi)
def draw(self, ax=None):
"""Draw the line segment on the current axis."""
self.coordinates.plot(ax=None, color='blue', zorder=1)
def connecting_parabola(self, d=1./3):
""" Return a parabola sampled at n grid points connecting the end
points of the line segment with peak distance r away from the
connecting line. """
r, theta = self.vector.r, self.vector.theta
parab = parabola(r, d*self.length/2., n=100)
parab.rotate(theta)
return parab + self.p1
def draw_connecting_parabola(self, d=1./3, ax=None):
"""Draw parabola of width d connecting the ends of the line segment"""
self.connecting_parabola(d).plot(ax)
|
nilq/baby-python
|
python
|
from django.db import models
from newsroom.models import Article
from filebrowser.fields import FileBrowseField
from .common import SCHEDULE_RESULTS
# Create your models here.
class TwitterHandle(models.Model):
name = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True)
@staticmethod
def autocomplete_search_fields():
return ("name__icontains",)
class Meta:
verbose_name = "Twitter handle"
verbose_name_plural = "Twitter Handles"
ordering = ['name', ]
def __str__(self):
return self.name
def calc_chars_left(tweet_text, image, tags):
chars_left = 116 - len(tweet_text.strip())
if image:
chars_left = chars_left - 24
for account in tags:
chars_left = chars_left - len(account.strip()) - 2
return chars_left
class Tweet(models.Model):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
wait_time = models.PositiveIntegerField(help_text="Number of minutes "
"after publication "
"till tweet.")
status = models.CharField(max_length=20,
choices=SCHEDULE_RESULTS,
default="scheduled")
tweet_text = models.CharField(max_length=117, blank=True)
image = FileBrowseField(max_length=200, directory="images/", blank=True)
tag_accounts = models.ManyToManyField(TwitterHandle, blank=True)
characters_left = models.IntegerField(default=116)
class Meta:
ordering = ["article__published", "wait_time", ]
def __str__(self):
return self.article.title + ": " + str(self.wait_time)
def save(self, *args, **kwargs):
super(Tweet, self).save(*args, **kwargs)
twitter_handles = [str(name) for name in self.tag_accounts.all()]
self.characters_left = calc_chars_left(self.tweet_text,
self.image,
twitter_handles)
super(Tweet, self).save(force_update=True, *args, **kwargs)
|
nilq/baby-python
|
python
|
import pytest
import numpy as np
from funkyAD.helpers import count_recursive, unpack, nodify, recursive_append
from funkyAD.base import Node
def test_count_recursive_nparray():
x = np.array([2,3,1,0])
assert count_recursive(x)==4
def test_count_recursive_list():
x = [1,2,3]
assert count_recursive(x)==3
def test_count_recursive_ndarray():
x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
assert count_recursive(x)==6
def test_count_recursive_invalid_input():
x = "text"
with pytest.raises(TypeError):
count_recursive(x)
def test_unpack_1dlist():
x = [1,2]
assert unpack(x) == [1,2]
def test_unpack_2darray():
x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
assert unpack(x)==[1,2,3,4,5,6]
def test_unpack_3darray():
y = np.array([[[1,2],[3,4]],[[5,6],[7,8]],[[9,10],[11,12]]])
assert unpack(y) == [1,2,3,4,5,6,7,8,9,10,11,12]
def test_unpack_ndlist():
x = [[1,2,],[3,4]]
assert unpack(x)==[1,2,3,4]
def test_unpack_invalid_input():
with pytest.raises(TypeError):
unpack("text")
def test_nodify_nparray():
x = np.array([1,2,3])
seed = [1,2,3]
assert nodify(x, seed)==[Node(1,1), Node(2,2), Node(3,3)]
def test_nodify_list():
x = [1,2,3]
seed = [1,2,3]
assert nodify(x, seed)==[Node(1,1), Node(2,2), Node(3,3)]
def test_nodify_invalid_input():
with pytest.raises(TypeError):
nodify(3.14)
def test_nodify_text_input():
x = "test"
seed = [1,0,0]
with pytest.raises(TypeError):
nodify(x, seed)
def test_nodify_node_input():
x = Node(1,[1,1])
seed = [1,0]
with pytest.raises(TypeError):
nodify(x,seed)
def test_nodify_ndarray():
x=np.array([np.array([1])])
seed = [1]
assert nodify(x,seed)==[Node(1,1)]
def test_nodify_nested_list():
x=[[1,2],[3,4]]
seed = [1,2,3,4]
assert nodify(x,seed)==[[Node(1,1), Node(2,2)], [Node(3,3), Node(4,4)]]
def test_recursive_append():
x=Node(1,1)
x.parents = [Node(2,1)]
trace = []
recursive_append(x,trace)
assert trace == [Node(1,1),Node(2,1)]
|
nilq/baby-python
|
python
|
# Copyright (c) 2018 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from . import none_or_int, none_or_float, none_or_bool, remove_important, prefer_important
class ExpRunDetails(object):
@classmethod
def compile(cls, config, defaults):
invocations = prefer_important(config.get('invocations'), defaults.invocations)
iterations = prefer_important(config.get('iterations'), defaults.iterations)
warmup = prefer_important(config.get('warmup'), defaults.warmup)
min_iteration_time = none_or_int(config.get('min_iteration_time',
defaults.min_iteration_time))
max_invocation_time = none_or_int(config.get('max_invocation_time',
defaults.max_invocation_time))
ignore_timeouts = none_or_bool(config.get('ignore_timeouts',
defaults.ignore_timeouts))
parallel_interference_factor = none_or_float(config.get(
'parallel_interference_factor', defaults.parallel_interference_factor))
execute_exclusively = none_or_bool(config.get('execute_exclusively',
defaults.execute_exclusively))
retries_after_failure = none_or_int(config.get('retries_after_failure',
defaults.retries_after_failure))
return ExpRunDetails(invocations, iterations, warmup, min_iteration_time,
max_invocation_time, ignore_timeouts, parallel_interference_factor,
execute_exclusively, retries_after_failure,
defaults.invocations_override, defaults.iterations_override)
@classmethod
def empty(cls):
return ExpRunDetails(None, None, None, None, None, None, None, None, None, None, None)
@classmethod
def default(cls, invocations_override, iterations_override):
return ExpRunDetails(1, 1, None, 50, -1, None, None, True, 0,
invocations_override, iterations_override)
def __init__(self, invocations, iterations, warmup, min_iteration_time,
max_invocation_time, ignore_timeouts, parallel_interference_factor,
execute_exclusively, retries_after_failure,
invocations_override, iterations_override):
self.invocations = invocations
self.iterations = iterations
self.warmup = warmup
self.min_iteration_time = min_iteration_time
self.max_invocation_time = max_invocation_time
self.ignore_timeouts = ignore_timeouts
self.parallel_interference_factor = parallel_interference_factor
self.execute_exclusively = execute_exclusively
self.retries_after_failure = retries_after_failure
self.invocations_override = invocations_override
self.iterations_override = iterations_override
def resolve_override_and_important(self):
# resolve overrides
if self.invocations_override is not None:
self.invocations = self.invocations_override
if self.iterations_override is not None:
self.iterations = self.iterations_override
# resolve important tags
self.invocations = remove_important(self.invocations)
self.iterations = remove_important(self.iterations)
self.warmup = remove_important(self.warmup)
def as_dict(self):
return {
'warmup': self.warmup,
'minIterationTime': self.min_iteration_time,
'maxInvocationTime': self.max_invocation_time
}
|
nilq/baby-python
|
python
|
#Crear una carpeta que se llame clases y adentro poner los$
# archivos dino.py persona.py
# Crear una clase Persona() que tenga como atributos nombre, edad
# y profesion. Al instanciar la clase tiene que saludar igual que el
# dino diciendo sus atributos
# Agregar un metodo a la clase persona, que se llame cumpleanhos y que aumente la edad de la
# persona en un anho y retorne la edad nueva
# Agregarle un atributo de clase a la clase persona que almacene una lista de hobbies
# Crear un metodo getter que retorne los hobbies de la persona
# Crear un metodo que agregue hobbies a la lista
class Persona:
lista_hobbies=[]
def __init__(self, un_nombre,una_edad,una_profesion, hobbies=None):
self.nombre=un_nombre
self.edad = una_edad
self.profesion = una_profesion
self.lista_hobbies = hobbies
def __repr__(self):
valor = "<Objeto Persona: " + self.nombre + ">"
return valor
def agregar_hobbies(self, something):
self.lista_hobbies.append(something)
return self.lista_hobbies
# if type(something)==str or type(something)==list:
# self.lista_hobbies.append(something)
# return self.lista_hobbies
# else:
# return ("Debes ingresar una lista [] o una cadena de texto '' ")
#self.agregados = something
def obtener_hobbies(self):
return self.lista_hobbies
def cumpleanhos(self):
self.edad = self.edad + 1
return self.edad
patata = Persona("Guillermo", 27, "Programador", ['comer'])
print(patata.lista_hobbies)
print(patata.obtener_hobbies())
print(patata.agregar_hobbies("dormir"))
# Crear una clase que se llame Agenda que tenga metodos
# para agregar objetos de tipo Persona a una lista(atributo de clase)
# y tambien poder eliminar personas de esa lista
# crear 3 personas y agregarlas a un objeto Agenda
class Agenda:
#contactos = []
def __init__(self):
self.contactos = []
def agregar_persona(self, milanesa):
if type(milanesa)==Persona:
self.contactos.append(milanesa)
else:
print("Necesito una persona")
def eliminar_persona(self, personita):
self.contactos.remove(personita)
agendita = Agenda()
pepito = Persona("Pepe", 27, "EEEEEE", ['comer'])
pepita = Persona("Pepa", 27, "DDDD", ['comer'])
pepite = Persona("Pepx", 27, "Programador", ['comer'])
agendita.agregar_persona(pepito)
agendita.agregar_persona(pepita)
agendita.agregar_persona(pepite)
|
nilq/baby-python
|
python
|
import re
class ReDict(dict):
"""
Special dictionary which expects values to be *set* with regular expressions
(REs) as keys, and expects values to be retreived using input text for an
RE as keys. The value corresponding to the regular expression which matches
the input text will be returned. In the case where the input text matches
multiple REs, one of the matching values will be returned, but precisely
which one is undefined.
Example usage:
>>> d = ReDict()
>>> d['hello( world(!)*)?'] = 1
>>> d['regex|dict key'] = 2
>>> d['hello']
1
>>> d['hello world!!!!']
1
>>> d['regex']
2
>>> d['dict key']
2
"""
def __init__(self, *args, **kwargs):
super(ReDict, self).__init__(*args, **kwargs)
# This *must* be lower than 100
self.groups_per_regex = 75
self.flags = re.IGNORECASE
self.groupid = 1
self.compiled = None
self.patterns = {}
self.subgroups = None
def groups(self):
"""
Return tuple of all subgroups from the last regex match performed
when fetching an item, as returned by re.MatchObject.groups()
:return: tuple of subgroups from last match
:rtype: tuple
"""
return self.subgroups
def _block_to_regexs(self, block):
total_len = len(block)
override_slice = None
num_regexs = 1
start = 0
ret = []
end = 0
i = 0
while True:
slice_size = int(total_len / num_regexs)
while start < total_len:
start = i * slice_size # Slice start index
end = min(total_len, start + slice_size) # Slice end index
blockslice = block[start:end]
regex = '|'.join(blockslice)
try:
compiled = re.compile(regex, flags=self.flags)
except AssertionError:
# Raises AssertionError for too many named groups
if (num_regexs == total_len) or (len(block) == 1):
raise AssertionError("Too many groups in regex '%s'"
% regex)
num_regexs *= 2
i = 0
ret = []
break
i += 1
ret.append(compiled)
if ret:
break
return ret
def compile(self):
"""
Compile all regular expressions in the dictionary
"""
i = 0
ret = []
block = []
self.compiled = []
for groupname in self.patterns:
pattern, _ = self.patterns[groupname]
block.append('(?P<%s>^%s$)' % (groupname, pattern))
i += 1
if i == self.groups_per_regex:
self.compiled.extend(self._block_to_regexs(block))
i = 0
block = []
if block:
self.compiled.extend(self._block_to_regexs(block))
def dump_to_dict(self):
"""
Dump all pattern/value pairs to a regular dict, where the regular
expressions are the keys
:return: dict of pattern/value pairs
:rtype: dict
"""
ret = {}
for pattern, value in self.iteritems():
ret[pattern] = value
return ret
def load_from_dict(self, data):
"""
Load pattern/value pairs from a regular dict. This overwrites any
existing pattern/value pairs
:param dict data: pattern/value pairs to load
"""
self.groupid = 1
self.compiled = None
self.patterns = {}
for pattern in data:
self.__setitem__(pattern, data[pattern])
return self
def _do_match(self, text):
if not self.compiled:
self.compile()
ret = None
m = None
for compiled in self.compiled:
m = compiled.match(text)
if m and m.lastgroup:
ret = m
break
if not ret:
raise KeyError("No patterns matching '%s' in dict" % text)
return ret
def __setitem__(self, pattern, value):
if not pattern:
return
self.patterns["g%d" % self.groupid] = (pattern, value)
self.groupid += 1
self.compiled = None
def __getitem__(self, text):
m = self._do_match(text)
self.subgroups = m.groups()[m.lastindex:]
return self.patterns[m.lastgroup][1]
def __delitem__(self, pattern):
key = None
for groupname in self.patterns:
p, v = self.patterns[groupname]
if p == pattern:
key = groupname
break
if key is None:
raise KeyError("No such pattern in ReDict: '%s'" % pattern)
del self.patterns[key]
self.compiled = None
def __contains__(self, text):
try:
_ = self.__getitem__(text)
except KeyError:
return False
return True
def pop(self, text):
"""
Return and delete the first value associated with a pattern matching
'text'
:param str text: text to match against
:return: value associated with pattern matching 'text' (if any)
"""
m = self._do_match(text)
ret = self.patterns[m.lastgroup][1]
del self.patterns[m.lastgroup]
if self.compiled is not None:
self.compiled = None
return ret
def items(self):
"""
Return all values stored in this dict
:return: list of values
:rtype: list
"""
return [self.patterns[groupname] for groupname in self.patterns]
def values(self):
"""
Return all values stored in this dict
:return: list of values
:rtype: list
"""
return [value for _, value in self.iteritems()]
def keys(self):
"""
Return all keys stored in this dict
:return: list of keys
:rtype: list
"""
return [pattern for pattern, _ in self.iteritems()]
def iteritems(self):
"""
Returns a generator to get all key/value pairs stored in this dict
:return: generator to get pattern/value pairs
"""
for groupname in self.patterns:
yield self.patterns[groupname]
def __str__(self):
return str(self.dump_to_dict())
def __repr__(self):
return repr(self.dump_to_dict())
def __len__(self):
return len(self.patterns)
def clear(self):
"""
Clear all key/value pairs stored in this dict
"""
self.groupid = 1
self.compiled = None
self.patterns.clear()
def copy(self):
"""
Create a new ReDict instance and copy all items in this dict into the
new instance
:return: new ReDict instance containing copied data
:rtype: ReDict
"""
new = ReDict()
for pattern, value in self.iteritems():
new[pattern] = value
return new
def update(self, other):
"""
Add items from 'other' into this dict
:param ReDict other: dict containing items to copy
"""
for pattern, value in other.iteritems():
self.__setitem__(pattern, value)
|
nilq/baby-python
|
python
|
# The MIT License (MIT)
#
# Copyright (c) 2018 UMONS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import argparse
import os
from tokenizer import repair
from generators import Generator
from analyzers import Analyzer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='save',
help='directory to store checkpointed models')
parser.add_argument('--model', type=str, default='Verne-french-weighted-1024',
help='which model must be used in the save directory')
parser.add_argument('-n', type=int, default=400,
help='number of tokens to sample')
parser.add_argument('--count', '-c', type=int, default=10,
help='number of samples to print')
parser.add_argument('--prime', type=str, default=' ',
help='prime text')
parser.add_argument('--input_encoding', type=str, default='UTF-8',
help='character encoding of preprocessed files, from '
'https://docs.python.org/3/library/codecs.html#standard-encodings')
parser.add_argument('--pick', type=int, default=2,
help='1 = weighted pick, 2 = beam search pick')
parser.add_argument('--width', type=int, default=4,
help='width of the beam search')
parser.add_argument('--sample', type=int, default=1,
help='0 to use max at each timestep, 1 to sample at each timestep, 2 to sample on spaces')
parser.add_argument('--quiet', '-q', default=False, action='store_true',
help='suppress printing the prime text (default false)')
parser.add_argument('--suppress_prime', '-s', default=False, action='store_true',
help='suppress the prime text in the returned result (default false))')
args = parser.parse_args()
args.save_dir = os.path.join(os.path.dirname(__file__), args.save_dir)
analyze(args)
def analyze(args):
analyzer = Analyzer(args.save_dir, args.input_encoding, args.model)
print_analyze_data(analyzer)
generator = Generator(args.save_dir, args.input_encoding, args.model)
generator.load()
print("=== Tests by sample ===")
results = []
for i in range(args.count):
result = generator.generate(args.n, args.prime, args.sample, args.pick, args.width, args.quiet,
args.suppress_prime)
results.append(result)
print_sample(i, generator.change_words(result), analyzer)
print_stats(analyzer, result)
print("=== Results for ALL samples ===")
print_global_stats(analyzer)
generator.close()
def print_analyze_data(analyzer):
voc_distrib, data_distrib = analyzer.analyze_data()
print("--- Vocabulary Distribution ---")
for corpus in voc_distrib:
print(corpus + ": " + str(voc_distrib[corpus]) + "%")
print()
print("--- Data Distribution ---")
for corpus in data_distrib:
print(corpus + ": " + str(data_distrib[corpus]) + "%")
print()
def print_sample(i, sample, analyzer):
print("--- Sample N°" + str(i) + " ---")
print(sample)
nice_sample, _, _ = repair(sample, config=analyzer.get_config())
print(nice_sample)
print()
def print_stats(analyzer, sample):
stats = analyzer.analyze_sample(sample)
print_hybridation(stats)
print_RLS(stats)
print_LS(stats)
print_DP(stats)
print_OC(stats)
print_VAR(stats)
def print_hybridation(stats):
if 'Hybrid' not in stats.keys():
return
print("--- Corpus Vocabulary Use ---")
for corpus in stats['Hybrid']['usage']:
print(corpus + ": " + str(stats['Hybrid']['usage'][corpus]) + "%")
if corpus != "Common":
print("\tWords: " + str([word for word in stats['Hybrid']['voc'][corpus]]))
print()
def print_RLS(stats):
if 'RLS' not in stats.keys():
return
print("--- Original Sequences Detector ---")
print("Matched values : ")
print(stats['RLS']['values'])
print("Diff sequence length between data and sample : ")
print([stats['RLS']['data_lengths'][i] - stats['RLS']['sample_lengths'][i]
for i in range(len(stats['RLS']['sample_lengths']))])
print()
def print_LS(stats):
if 'LS' not in stats.keys():
return
print("--- The longest copied sequence with a tolerance window of " + str(stats['LS']['window']) + " ---")
print("Length : " + str(stats['LS']['value']) + " (" + str(stats['LS']['percent']) + "% of sample size)")
print("Sample sequence : " + stats['LS']['sample_seq'].replace("_APPEND_", "_")) # Help readability
print("Original sequence : " + stats['LS']['orig_seq'].replace("_APPEND_", "_"))
print()
def print_DP(stats):
if 'DP' not in stats.keys():
return
print("--- Pattern Detector ---")
if len(stats['DP']['pattern']) == 0:
print("No pattern detected.")
else:
print("Longest pattern (" + str(len(stats['DP']['pattern'])) + " words) found "
+ str(stats['DP']['occur']) + " times: ")
print(" ".join(stats['DP']['pattern']))
print()
def print_OC(stats):
if 'OC' not in stats.keys():
return
print("--- " + str(len(stats['OC']['list'])) + " Most Used Words in samples ---")
for word, value in stats['OC']['list']:
if word == '\n':
print("RETURN LINE: " + str(value) + "%")
else:
print(word + " : " + str(value) + "%")
print("\t> " + str(len(stats['OC']['list'])) + " words represent " +
str(stats['OC']['total']) + "% of all words in the samples.\n")
print()
def print_VAR(stats):
if 'VAR' not in stats.keys():
return
print("--- " + str(len(stats['VAR']['list'])) + " Highest Variations against Data Words Usage ---")
for word, sample_value, data_value in stats['VAR']['list']:
if word == '\n':
print("RETURN LINE: " + str(sample_value) + "% against " + str(data_value) + "%")
else:
print(word + " : " + str(sample_value) + "% against " + str(data_value) + "%")
print("\t> Variation Resolution Percentage: " + str(stats['VAR']['resolution']) + "%")
print()
def print_global_stats(analyzer):
stats = analyzer.analyze_global()
if 'Hybrid' in stats.keys():
print("--- Average Hybridation ---")
for corpus in stats['Hybrid']:
print(corpus + ": " + str(stats['Hybrid'][corpus]) + "%")
if 'Hybrid_Data' in stats.keys():
print("\twith respect to " + str(stats['Hybrid_Data'][corpus]) + " %")
print()
if 'LS' in stats.keys():
print("--- Longest Sequence in average---")
print("Length : " + str(stats['LS']['value']) + " (" + str(stats['LS']['percent']) + "% of sample size)")
print()
if 'DP' in stats.keys():
print("--- Average Pattern Detector ---")
print("Longest pattern (" + str(stats['DP']['length']) + " words) found "
+ str(stats['DP']['occur']) + " times: ")
print()
print_VAR(stats)
print_OC(stats)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 13:34:49 2020
@author: lukepinkel
"""
import numba
import numpy as np
import scipy as sp
import scipy.special
SQRT2 = np.sqrt(2)
ROOT2PI = np.sqrt(2.0 * np.pi)
def poisson_logp(x, mu, logp=True):
p = sp.special.xlogy(x, mu) - sp.special.gammaln(x + 1) - mu
if logp==False:
p = np.exp(p)
return p
def log1p(x):
return np.log(1+x)
def norm_cdf(x, mean=0.0, sd=1.0):
z = (x - mean) / sd
p = (sp.special.erf(z/SQRT2) + 1.0) / 2.0
return p
def norm_pdf(x, mean=0.0, sd=1.0):
z = (x - mean) / sd
p = np.exp(-z**2 / 2.0) / (ROOT2PI * sd)
return p
def get_part(arr, sol, size, step, maximum, res):
if step==size:
res.append(sol.copy())
else:
sol[step] = 1
while sol[step]<=maximum:
get_part(arr, sol, size, step+1, maximum, res)
sol[step] += 1
get_part(arr, sol, size, step+1, maximum+1, res)
def partition_set(n):
size = n
arr = np.arange(1, size+1)-1
sol = np.zeros(size, dtype=int)
res = []
get_part(arr, sol, size, 0, 0, res)
return res
@numba.jit(nopython=True)
def soft_threshold(x, t):
y = np.maximum(np.abs(x) - t, 0) * np.sign(x)
return y
@numba.jit(nopython=True)
def expit(x):
u = np.exp(x)
y = u / (1.0 + u)
return y
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import json
import os
import sys
import codecs
import io
import logging
logging.basicConfig(
filename='2_error.log',
filemode='w',
level='INFO',
format='[%(levelname)s] %(asctime)s: %(message)s'
)
asset_folder = 'assets'
def cmd(line):
return ' '+line
def print_console(text):
try:
print text
except:
pass
def exists_in_assets(file):
file_path = os.path.join(asset_folder, file)
if os.path.exists(file_path):
return True
else:
#print '%s not exists' % file_path
logging.error('%s not exists' % file_path)
return False
def exists(rd, file):
file_path = os.path.join(asset_folder, rd, file)
if os.path.exists(file_path):
return True
else:
#print '%s not exists' % file_path
logging.error('%s not exists' % file_path)
return False
def player_name_unification(line):
line = line.replace(u'{{主人公}}', player_name)
return line
# player_name = 'Master'
print "Player's name: ",
player_name = raw_input().decode(sys.stdin.encoding).strip()
data_directory = 'raw_scenario'
dst_folder = 'scenario'
# data_directory = 'nutaku/raw_scenario'
# dst_folder = 'nutaku/scenario'
if not os.path.exists(dst_folder):
os.mkdir(dst_folder)
character_types = os.listdir(data_directory)
dmm_scenarios = []
for character_type in character_types:
lst = os.listdir(os.path.join(data_directory, character_type).decode('utf8'))
for character in lst:
print_console(character)
scenarios = os.listdir(os.path.join(data_directory, character_type, character))
for filename in scenarios:
with open(os.path.join(data_directory, character_type, character, filename)) as file:
data = json.loads(file.read().replace(
' ', '_').replace('\\u00a0', '_'))
story_type = character_type.split('_')[0]
name = character
label = 'story_nutaku_' + story_type + '_' + \
"%03d" % int(filename.split('_')[0])
script = ['label %s:' % label]
chara = {}
name = ' '
# print label
rd = data['resource_directory']
if data.has_key('scene_data'):
dmm_scenarios.append(
dict(character=character, filename=filename, rd=rd))
if data.has_key('scenario'):
# print data
data = data['scenario'].replace('"][', '"]\n[').split('\n')
for command in data:
if command.startswith('*') or command.startswith('#') or command.startswith('Tap to continue'):
continue
if command.startswith('['):
endIdx = command.rfind(']')
command = command[1:endIdx].replace(
'[', '(').replace(']', ')').replace('"', '').split()
if len(command) < 2:
continue
line = dict(cmd=command[0])
for arg in command[1:]:
tmp = arg.split('=')
if len(tmp) == 2:
line[tmp[0]] = tmp[1]
if filename == '5009_harem-character.json' and line.has_key('name') and line['name'] == 'sukunahikona':
# Hot fix for [Masterpiece] Hermes 5009_harem-character.json
line['name'] = 'herumesu2nd'
# print line
if line['cmd'].startswith('chara_new'):
chara[line['name']] = dict(
name=line['jname'].replace('_', ' '))
if line['cmd'].startswith('chara_face'):
if not chara[line['name']].has_key('face'):
chara[line['name']]['face'] = dict()
chara[line['name']]['face'][line['face']
] = line['storage']
if line['cmd'].startswith('playbgm'):
if exists_in_assets(line['storage']):
script.append(
cmd('play music "nutaku/assets/%s"' % (line['storage'])))
if line['cmd'].startswith('bg'):
c = 'show expression ' + \
'(Frame("nutaku/assets/%s"))' % (line['storage']
) + ' as bg behind char with dissolve'
script.append(cmd(c))
if line['cmd'].startswith('chara_show'):
name = chara[line['name']]['name']
if line['cmd'].startswith('chara_mod'):
sprite = chara[line['name']]['face'][line['face']]
if '.png' not in sprite:
continue
c = 'show expression ' + \
'(im.Scale("nutaku/assets/%s",config.screen_height,config.screen_height))' % (
sprite) + ' as char with dissolve'
script.append(cmd(c))
if line['cmd'].startswith('playse'):
if exists(rd, line['storage']):
script.append(
cmd('voice ' + '"nutaku/assets/%s/' % rd + line['storage'])+'"')
if line['cmd'].startswith('chara_hide'):
script.append(cmd('hide char with dissolve'))
name = ' '
elif not command.startswith(';layer') and not command.startswith(u';画面'):
text = command.replace('"', "'").replace('%', '\\%')
text = text.replace('[l]', '').replace(
'[r]', '').replace('[cm]', '')
text = text.replace('%', '%%')
text = text.replace('\n', '').replace('\r', '')
if len(text.replace(' ', '')) < 2:
continue
# print text
text = player_name_unification(text)
script.append(cmd('"%s" "%s"' % (name, text)))
elif data.has_key('scene_data'):
transition = 'dissolve'
for entry in data['scene_data']:
if entry.has_key('bgm'):
if exists(rd, entry['bgm']):
script.append(
cmd('play music "nutaku/assets/%s"' % (entry['bgm'])))
else:
script.append(cmd('play music "nutaku/assets/bgm_h_003.mp3"'))
if entry.has_key('film'):
if entry['film'].startswith('pink'):
continue
fps = float(entry['fps'])
if fps > 1:
c = 'show expression '
c += '(Zoomable(Frame(anim.Filmstrip(im.Rotozoom("nutaku/assets/%s/%s",90,1.0),(900,640),(16,1),%f)))) ' % (
rd, entry['film'], 1/fps)
c += 'at top as cg with dissolve'
script.append(cmd(c))
else:
c = 'show expression '
c += '(Zoomable(Frame(im.Rotozoom("nutaku/assets/%s/%s",90,1.0)))) ' % (
rd, entry['film'])
c += 'at top as cg with dissolve'
script.append(cmd(c))
for line in entry['talk']:
if line.has_key('voice'):
if len(line['voice']):
if exists(rd, line['voice']):
script.append(
cmd('voice ' + '"nutaku/assets/%s/' % rd + line['voice'])+'"')
else:
script.append(cmd('voice sustain'))
if not line.has_key('words') or not len(line['words']):
line['chara'] = ' '
line['words'] = '{i}click to proceed'
line['words'] = line['words'].replace(
'[', '').replace(']', '').replace('"', '')
line['words'] = line['words'].replace('%', '%%')
line['words'] = line['words'].replace(
'\n', '').replace('\r', '')
line['words'] = player_name_unification(line['words'])
script.append(cmd('"%s" "%s"' % (line['chara'].replace('"', "'").replace(
'%', '\\%').replace('[', '(').replace(']', ')'), line['words'])))
script.append(cmd("hide char with dissolve"))
script.append(cmd("hide cg with dissolve"))
script.append(cmd("hide bg with dissolve"))
script.append(cmd("stop music"))
script.append(cmd("jump index"))
if not os.path.exists(dst_folder):
os.mkdir(dst_folder)
# with codecs.open('test.rpy', 'w', 'utf-8') as file:
with codecs.open(os.path.join(dst_folder, '%s.rpy' % label), 'w', 'utf-8') as file:
for line in script:
file.write(line)
file.write('\n')
|
nilq/baby-python
|
python
|
"""
Configuration file for
https://github.com/karlicoss/HPI/
https://github.com/seanbreckenridge/HPI/
[Human Programming Interface]
"""
import sys
import tempfile
from os import environ, path, listdir
from typing import Optional, Callable, List, Sequence
from pathlib import Path
from my.core.common import PathIsh, Paths
# e.g., converts to ~/Repos/name
# ~/Repos/ is where I store a lot of my git repositories
def repo(name: str) -> str:
return path.join(environ["REPOS"], name)
try:
# https://github.com/seanbreckenridge/reorder_editable
# if my easy-install.pth file was ordered wrong, fix it and exit!
from reorder_editable import Editable
except:
pass
else:
if Editable().reorder([repo("HPI"), repo("HPI-fork")]):
# this is true if we actually reordered the path, else path was already ordered
print(
"easy-install.pth was ordered wrong! It has been reordered, exiting to apply changes...",
file=sys.stderr,
)
sys.exit(0)
# https://github.com/seanbreckenridge/ipgeocache
try:
from .ipinfo_secret import ACCESS_TOKEN as ipinfo_secret_token
environ["IPINFO_TOKEN"] = ipinfo_secret_token
except ImportError:
pass
class core:
cache_dir: PathIsh = path.join(environ["HOME"], ".cache", "cachew")
tmp_dir: PathIsh = path.join(tempfile.gettempdir(), "HPI-tempdir")
enabled_modules: Sequence[str] = []
disabled_modules: Sequence[str] = [
"my.polar",
"my.stackexchange",
"my.rtm",
"my.media",
"my.google\.takeout", # ignore karlicoss google module
"my.orgmode",
"my.jawbone",
"my.twitter",
"my.vk",
"my.rss",
"my.photos",
"my.location.google",
"my.calendar",
"my.taplog",
"my.runnerup",
"my.rescuetime",
"my.pocket",
"my.lastfm",
"my.kobo",
"my.instapaper",
"my.hypothesis",
"my.foursquare",
"my.fbmessenger",
"my.endomondo",
"my.arbtt",
"my.emfit",
"my.bluemaestro",
"my.zotero", # temporarily? till I start using it
]
def if_exists(p: PathIsh) -> Optional[PathIsh]:
pp = Path(p)
if pp.exists():
return pp
return None
# if the HPIDATA environment variable is set (which points to my data)
# use that. Else, just default to ~/data
prefix: Path = Path(environ.get("HPIDATA", path.join(environ["HOME"], "data")))
# prepend my data directory onto this path
def data(p: PathIsh) -> Path:
return prefix / p
if "IPGEOCACHE_DIR" not in environ:
environ["IPGEOCACHE_DIR"] = str(data("ipgeocache"))
# combines:
# periodic exports from: https://github.com/karlicoss/ghexport
# github GDPR export
class github:
gdpr_dir: PathIsh = data("github/gdpr")
export_path: Paths = data("github/ghexport")
MAILDIR = Path(
environ.get("MAILDIR", path.join(environ["HOME"], ".local", "share", "mail"))
)
def list_mailboxes(p: Path) -> Sequence[Path]:
dirs: List[Path] = []
if MAILDIR.exists():
dirs = [p / f for f in listdir(p) if "@" in f]
return tuple(dirs)
# locally synced IMAP mailboxes using mbsync
class imap:
# path[s]/glob to the the mailboxes/IMAP files
mailboxes = list_mailboxes(MAILDIR)
# combines:
# periodic exports from: https://github.com/karlicoss/rexport/
# comment export from: https://github.com/seanbreckenridge/pushshift_comment_export
class reddit:
class rexport:
export_path: Paths = data("rexport")
class pushshift:
export_path: Paths = data("pushshift")
# prompt me for actions using https://github.com/seanbreckenridge/autotui
# interfaces created by https://github.com/seanbreckenridge/ttally
class body:
datadir: PathIsh = environ["TTALLY_DATA_DIR"]
# parses my zsh history and any backups
class zsh:
export_path: Paths = data("zsh_history")
live_file: Optional[PathIsh] = if_exists(
path.join(environ["ZDOTDIR"], ".zsh_history")
)
# parses bash history
class bash:
export_path: Paths = data("bash_history")
# parses current/finished http://todotxt.org/ using topydo
class todotxt:
export_path: Paths = data("todotxt")
live_file: Optional[PathIsh] = if_exists(
path.join(environ["HPIDATA"], "todo", "todo.txt")
)
# parses the history of me adding/removing rss feeds
class newsboat:
export_path: Paths = data("newsboat")
# parses information from git repositories which match my emails
class commits:
names: List[str] = ["Sean Breckenridge"]
emails: List[str] = [
"seanbrecke@gmail.com",
"sbrecken@ucsc.edu",
"purplepinapplesyt@gmail.com",
]
roots: Paths = [
Path(environ["REPOS"]),
]
# uses my dameon for watching mpv events
# https://github.com/seanbreckenridge/mpv-history-daemon
class mpv:
export_path: Paths = data("mpv/*.json")
# use my active firefox database
from browserexport.browsers.firefox import Firefox
live_dbs: List[Path] = []
try:
live_dbs.append(Firefox.locate_database())
except Exception:
pass
# uses browserexport https://github.com/seanbreckenridge/browserexport
class browsing:
export_path: Paths = data("browsing")
live_databases: Paths = tuple(live_dbs)
# uses lolexport: https://github.com/seanbreckenridge/lolexport
class league_of_legends:
export_path: Paths = data("league_of_legends/parsed*.json")
username = "purplepinapples"
# uses https://github.com/seanbreckenridge/chess_export
class chess:
export_path: Paths = data("chess")
# uses https://github.com/seanbreckenridge/listenbrainz_export
class listenbrainz:
export_path: Paths = data("listenbrainz")
# uses traktexport: https://github.com/seanbreckenridge/traktexport
class trakt:
export_path: Paths = data("trakt")
# uses malexport: https://github.com/seanbreckenridge/malexport
class mal:
export_path: PathIsh = data("malexport")
# uses https://github.com/seanbreckenridge/grouvee_export
class grouvee:
export_path: Paths = data("grouvee")
# uses my personal albums system: https://github.com/seanbreckenridge/albums
class albums:
export_path: Paths = data("albums.json")
# uses https://github.com/seanbreckenridge/steamscraper
class steam:
export_path: Paths = data("steam.json")
# https://github.com/seanbreckenridge/blizzard_gdpr_parser
class blizzard:
export_path: Paths = data("blizzard/parsed.json")
environ["OLD_FORUMS_SELECTORS"] = str(data("old_forum_selectors.json"))
# https://github.com/seanbreckenridge/old_forums
class old_forums:
# path[s]/glob to the folder which contains JSON/HTML files
export_path: Paths = data("old_forums")
class project_euler:
# path[s]/glob to the .txt export files
export_path: Paths = data("project_euler")
# parses the GDPR export
class skype:
export_path: Paths = data("skype.json")
# parses the GDPR export
class facebook:
gdpr_dir: PathIsh = data("facebook_gdpr")
# parses the GDPR export
class spotify:
gdpr_dir: PathIsh = data("spotify")
class twitch:
# my chatlogs from the overrustle_logs dump
# https://github.com/seanbreckenridge/overrustle_parser
class overrustle:
export_path: Paths = data("twitch/overrustle_logs.json")
# parses the privacy request
class gdpr:
gdpr_dir: PathIsh = data("twitch/gdpr")
# parses backups of my ipython history
class ipython:
export_path: Paths = data("ipython/*.sqlite")
# parses https://takeout.google.com using https://github.com/seanbreckenridge/google_takeout_parser
class google:
takeout_path: Paths = data("google_takeout/*.zip")
# https://github.com/seanbreckenridge/ttt
class ttt:
export_path: Paths = data("ttt/*.csv")
# https://github.com/seanbreckenridge/aw-watcher-window
class window_watcher:
export_path: Paths = data("window_watcher/*.csv")
force_individual: Optional[List[str]] = ["Alacritty"]
class smscalls:
export_path: Paths = data("SMSBackups")
class photos:
paths: List[PathIsh] = ["~/Pictures/iCloudPhotos/", data("google_takeout")]
# dont ignore anything
ignored: Callable[[Path], bool] = lambda p: False
# parses the GDPR export
class apple:
gdpr_dir: PathIsh = data("apple")
# parses the GDPR export
class discord:
export_path: Paths = data("discord/*.zip")
class runelite:
export_path: Paths = data("runelite_screenshots")
# .gpx files from https://github.com/mendhak/gpslogger
class gpslogger:
export_path: Paths = data("gpslogger")
class pdfs:
paths: Paths = [
"~/Documents/Books/",
"~/Documents/Notes/",
]
class stackexchange:
export_path: PathIsh = ""
from typing import Sequence, Union, Tuple
from datetime import datetime, date
DateIsh = Union[datetime, date, str]
LatLon = Tuple[float, float]
class location:
try:
from .locations_secret import home
except:
pass
class time:
class tz:
policy = "convert"
# class stackexchange:
# export_path: Paths = "~/data/stexport"
# stexport: Optional[PathIsh] = repo("stexport")
|
nilq/baby-python
|
python
|
# DExTer : Debugging Experience Tester
# ~~~~~~ ~ ~~ ~ ~~
#
# Copyright (c) 2018 by SN Systems Ltd., Sony Interactive Entertainment Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class ValueIR:
"""Data class to store the result of an expression evaluation."""
def __init__(self,
expression: str,
value: str,
type_name: str,
could_evaluate: bool,
error_string: str = None,
is_optimized_away: bool = False,
is_irretrievable: bool = False):
self.expression = expression
self.value = value
self.type_name = type_name
self.could_evaluate = could_evaluate
self.error_string = error_string
self.is_optimized_away = is_optimized_away
self.is_irretrievable = is_irretrievable
def __str__(self):
prefix = '"{}": '.format(self.expression)
if self.error_string is not None:
return prefix + self.error_string
if self.value is not None:
return prefix + '({}) {}'.format(self.type_name, self.value)
return (prefix +
'could_evaluate: {}; irretrievable: {}; optimized_away: {};'
.format(self.could_evaluate, self.is_irretrievable,
self.is_optimized_away))
|
nilq/baby-python
|
python
|
from krogon.config import Config
from datetime import datetime
from datetime import timedelta
import krogon.yaml as yaml
import krogon.either as E
from base64 import b64decode
import json
import re
from krogon.k8s.providers.k8s_provider import K8sProvider
class GKEProvider(K8sProvider):
def __init__(self,
project_id: str,
service_account_b64: str,
config: Config):
self._conf = config
self._project_id = _get_project_id(project_id, config)
self._service_account_info = _get_service_account_info(service_account_b64, config)
self._scripts_dir = config.scripts_dir
self._cache_dir = config.cache_dir
self._file = config.fs
self._run = lambda cmd: config.os.run(cmd, config.log)
self._is_macos = config.os.is_macos
self._log = config.log
self._service_account_file = config.cache_dir + '/service_account.json'
def get_project_id(self):
return self._project_id
def get_service_account_info(self):
return self._service_account_info
def get_clusters(self, by_regex: str):
def _parse_cluster_names(cluster_names: str):
names = list(map(lambda c: c.strip().strip(), cluster_names.split('\n')))
final_names = set()
matching_clusters = list(filter(lambda name: re.search(by_regex, name) is not None, names))
final_names.update(matching_clusters)
return list(final_names)
return self._get_all_clusters() | E.then | _parse_cluster_names
def kubectl(self, command: str, cluster_name: str):
kubeconfig_file = self._kubeconfig_file_path(cluster_name)
return self._gen_kubeconfig(cluster_name) \
| E.on | (dict(whatever=lambda _x, _y: self._log.info("\n\n==========kubectl: {}==========".format(cluster_name)))) \
| E.then | (lambda _: self._run('{cache_dir}/kubectl --kubeconfig {kubeconfig_file} {command}'
.format(cache_dir=self._cache_dir,
kubeconfig_file=kubeconfig_file,
command=command))) \
| E.on | (dict(whatever=lambda _x, _y: self._log.info("\n==========kubectl: {} END==========\n".format(cluster_name))))
def _get_all_clusters(self):
return self._configure_auth() \
| E.then | (lambda _: self._run("{cache_dir}/google-cloud-sdk/bin/gcloud "
"container clusters list --format=\"value(name)\""
.format(cache_dir=self._cache_dir)))
def _gen_kubeconfig(self, cluster_name: str):
if self._is_kubeconfig_valid(cluster_name):
return E.success()
kubeconfig_file = self._kubeconfig_file_path(cluster_name)
self._log.info("\n\n==========KUBECONFIG SETUP==========")
return self._configure_auth() \
| E.then | (lambda _: self._run('{scripts_dir}/create-kube-config.sh {cluster_name} '
'{cache_dir} {key_file} "{kubeconfig_file}" {project}'
.format(scripts_dir=self._scripts_dir,
cluster_name=cluster_name,
cache_dir=self._cache_dir,
kubeconfig_file=kubeconfig_file,
key_file=self._service_account_file,
project=self._project_id))) \
| E.on | (dict(whatever=lambda _x, _y: self._log.info("\n==========KUBECONFIG SETUP END==========\n")))
def _configure_auth(self):
return self._install_google_cloud_sdk() \
| E.then | (lambda _: self._install_kubectl()) \
| E.then | (lambda _: self._write_service_account_file()) \
| E.then | (lambda _: self._run("{cache_dir}/google-cloud-sdk/bin/gcloud "
"config set project {project}"
.format(cache_dir=self._cache_dir,
project=self._project_id)
)) \
| E.then | (lambda _: self._run("{cache_dir}/google-cloud-sdk/bin/gcloud "
"auth activate-service-account --key-file {key_file}"
.format(cache_dir=self._cache_dir,
key_file=self._service_account_file)))
def _cleanup(self, cluster_name: str):
return self._delete_service_account_file() \
| E.then | (lambda _: self._delete_kubeconfig(cluster_name))
def _delete_kubeconfig(self, cluster_name: str):
kubeconfig_file = self._kubeconfig_file_path(cluster_name)
return self._run('rm -f {}'.format(kubeconfig_file))
def _delete_service_account_file(self):
self._file.delete(self._service_account_file)
def _write_service_account_file(self):
self._file.write(self._service_account_file,
json.dumps(self._service_account_info, ensure_ascii=False))
def _install_kubectl(self):
if self._file.exists("{cache_dir}/kubectl".format(cache_dir=self._cache_dir)):
return E.success()
cur_os = 'darwin' if self._is_macos() else 'linux'
self._log.info("INSTALLING DEPENDENCY: Installing kubectl...")
self._run("curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt") \
| E.then | (lambda kube_version:
self._run("curl -L https://storage.googleapis.com/kubernetes-release/release"
"/{kube_version}/bin/{os}/amd64/kubectl > {cache_dir}/kubectl "
"&& chmod u+x {cache_dir}/kubectl"
.format(os=cur_os, kube_version=kube_version, cache_dir=self._cache_dir)))
def _install_google_cloud_sdk(self):
if self._file.exists("{cache_dir}/google-cloud-sdk".format(cache_dir=self._cache_dir)):
return E.success()
self._log.info("INSTALLING DEPENDENCY: Installing google-cloud-sdk...")
cur_os = 'darwin' if self._is_macos() else 'linux'
gcloud_version = self._conf.get_arg('KG_GCLOUD_VERSION', default='284.0.0')
google_sdk_url = ("https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/"
"google-cloud-sdk-{gcloud_version}-{os}-x86_64.tar.gz"
.format(os=cur_os, gcloud_version=gcloud_version))
return self._run("cd {cache_dir} && curl -L {url} | tar zx"
.format(cache_dir=self._cache_dir, url=google_sdk_url))
def _is_kubeconfig_valid(self, cluster_name: str):
kubeconfig_file = self._kubeconfig_file_path(cluster_name)
def is_valid(expiry: str) -> bool:
return datetime.fromisoformat(expiry) > (datetime.utcnow() + timedelta(minutes=10))
def parse_kubeconfig_expiry() -> str:
kubeconfig = yaml.load(self._file.read(kubeconfig_file))
return kubeconfig['users'][0]['user']['auth-provider']['config']['expiry'].replace('Z', '')
if self._file.exists(kubeconfig_file):
return E.try_catch(lambda: parse_kubeconfig_expiry()) \
| E.then | (lambda expiry: is_valid(expiry)) \
| E.on | dict(failure=lambda e: self._log.warn('Failed to parse kubeconfig at: {}. {}'
.format(kubeconfig_file, e))) \
| E.from_either | dict(if_success=lambda valid: valid,
if_failure=lambda _: False)
return False
def _kubeconfig_file_path(self, cluster_name: str):
return '{cache_dir}/{cluster_name}-kubeconfig.yaml' \
.format(cache_dir=self._cache_dir, cluster_name=cluster_name)
def _get_project_id(project_id, config: Config):
return config.get_arg('KG_PROJECT_ID', project_id, ensure=True)
def _get_service_account_info(service_account_b64: str, config: Config):
service_account_b64 = config.get_arg('KG_SERVICE_ACCOUNT_B64', service_account_b64, ensure=True)
return json.loads(b64decode(service_account_b64).decode("utf-8"))
|
nilq/baby-python
|
python
|
# INTEL CONFIDENTIAL
#
# Copyright (C) 2021 Intel Corporation
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were provided to
# you ("License"). Unless the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the related documents
# without Intel's prior written permission.
#
# This software and the related documents are provided as is,
# with no express or implied warranties, other than those that are expressly stated
# in the License.
import pytest
from ote_sdk.entities.url import URL
from ote_sdk.tests.constants.ote_sdk_components import OteSdkComponent
from ote_sdk.tests.constants.requirements import Requirements
@pytest.mark.components(OteSdkComponent.OTE_SDK)
class TestURL:
@pytest.mark.priority_medium
@pytest.mark.component
@pytest.mark.reqids(Requirements.REQ_1)
def test_annotation_repo(self):
"""
<b>Description:</b>
Check that URL can correctly decode a given URL
<b>Input data:</b>
Some URL strings
<b>Expected results:</b>
Test passes if the correct scheme and path can be retrieved from the url
<b>Steps</b>
1. Create URL instances
2. Check scheme of URL
3. Check path of URL
"""
test_url = URL("binaryrepo:/images/file_%20_whatever.jpg")
test_url2 = URL(
"binaryrepo://intel.com/images/file_%20_whatever.jpg?blaat=none"
)
assert test_url.scheme == "binaryrepo"
assert test_url2.scheme == "binaryrepo"
assert test_url.path == "/images/file_%20_whatever.jpg"
assert test_url2.path == "/images/file_%20_whatever.jpg"
|
nilq/baby-python
|
python
|
from typing import Optional
from django.utils.crypto import get_random_string
from django.db import transaction
from rest_framework_simplejwt.tokens import RefreshToken
from treeckle.common.constants import REFRESH, ACCESS, TOKENS, USER
from users.models import User, UserInvite
from users.logic import requester_to_json, get_users, get_user_invites
from .models import PasswordAuthentication, PasswordAuthenticationData
def get_tokens(user: User) -> dict:
refreshToken = RefreshToken.for_user(user)
return {
REFRESH: str(refreshToken),
ACCESS: str(refreshToken.access_token),
}
def get_authenticated_data(user: User) -> dict:
data = requester_to_json(user)
tokens = get_tokens(user)
return {USER: data, TOKENS: tokens}
@transaction.atomic
def reset_password(user: User) -> Optional[str]:
random_password = get_random_string(length=8)
PasswordAuthentication.objects.filter(user=user).delete()
## try to create new password auth method for user
password_authentication = PasswordAuthentication.create(
user=user,
auth_data=PasswordAuthenticationData(
name="", email="", auth_id=random_password
),
check_alt_methods=False,
)
return random_password if password_authentication is not None else None
|
nilq/baby-python
|
python
|
#!/home/jepoy/anaconda3/bin/python
## at terminal which python
import platform
print("This is python version {}".format(platform.python_version()))
|
nilq/baby-python
|
python
|
"""Test the interactive test runner."""
import six
if six.PY2:
import mock
else:
from unittest import mock
import pytest
from testplan import defaults
from testplan import report
from testplan import runners
from testplan import runnable
from testplan.common import entity
from testplan.testing import filtering
from testplan.testing import multitest
from testplan.testing import ordering
from testplan.runnable.interactive import base
from testplan.testing.multitest import driver
from testplan.common.utils.path import default_runpath
@multitest.testsuite
class Suite(object):
"""Test suite."""
@multitest.testcase
def case(self, env, result):
"""Testcase."""
del env # unused
result.true(True)
@multitest.testcase(parameters=[1, 2, 3])
def parametrized(self, env, result, val):
"""Parametrized testcase."""
del env # unused
result.gt(val, 0)
def test_startup():
"""Test initializing and running the interactive runner."""
target = runnable.TestRunner(name="TestRunner")
mock_server = mock.MagicMock()
with mock.patch(
"cheroot.wsgi.Server", return_value=mock_server
), mock.patch(
"testplan.runnable.interactive.reloader.ModuleReloader"
) as MockReloader:
MockReloader.return_value = None
irunner = base.TestRunnerIHandler(target)
irunner.setup()
assert irunner.target.runpath == default_runpath(target)
mock_server.prepare.assert_called_once()
mock_server.bind_addr = ("hostname", 1234)
assert irunner.http_handler_info == mock_server.bind_addr
irunner.run()
mock_server.serve.assert_called_once()
irunner.teardown()
@pytest.fixture
def irunner():
"""Set up an irunner instance for testing."""
target = runnable.TestRunner(name="TestRunner")
local_runner = runners.LocalRunner()
test_uids = ["test_1", "test_2", "test_3"]
test_objs = [
multitest.MultiTest(
name=uid,
suites=[Suite()],
test_filter=filtering.Filter(),
test_sorter=ordering.NoopSorter(),
stdout_style=defaults.STDOUT_STYLE,
environment=[driver.Driver(name="mock_driver")],
)
for uid in test_uids
]
for test in test_objs:
local_runner.add(test, test.uid())
target.resources.add(local_runner)
with mock.patch("cheroot.wsgi.Server"), mock.patch(
"testplan.runnable.interactive.reloader.ModuleReloader"
) as MockReloader:
MockReloader.return_value = None
irunner = base.TestRunnerIHandler(target)
irunner.setup()
yield irunner
irunner.teardown()
@pytest.mark.parametrize("sync", [True, False])
def test_run_all_tests(irunner, sync):
"""Test running all tests."""
_check_initial_report(irunner.report)
ret = irunner.run_all_tests(await_results=sync)
# If the tests were run asynchronously, await the results.
if not sync:
assert ret.result() is None
# The report tree should have been updated as a side-effect.
assert irunner.report.passed
assert len(irunner.report.entries) == 3
for test_report in irunner.report:
assert test_report.passed
@pytest.mark.parametrize("sync", [True, False])
def test_run_test(irunner, sync):
"""Test running a single test."""
ret = irunner.run_test("test_1", await_results=sync)
if not sync:
assert ret.result() is None
# The test report should have been updated as a side effect.
assert irunner.report["test_1"].passed
@pytest.mark.parametrize("sync", [True, False])
def test_run_suite(irunner, sync):
"""Test running a single test suite."""
ret = irunner.run_test_suite("test_1", "Suite", await_results=sync)
if not sync:
assert ret.result() is None
# The test report should have been updated as a side effect.
assert irunner.report["test_1"]["Suite"].passed
@pytest.mark.parametrize("sync", [True, False])
def test_run_testcase(irunner, sync):
"""Test running a single testcase."""
ret = irunner.run_test_case("test_1", "Suite", "case", await_results=sync)
if not sync:
assert ret.result() is None
# The test report should have been updated as a side effect.
assert irunner.report["test_1"]["Suite"]["case"].passed
@pytest.mark.parametrize("sync", [True, False])
def test_run_parametrization(irunner, sync):
"""Test running a single parametrization of a testcase."""
ret = irunner.run_test_case(
"test_1", "Suite", "parametrized__val_1", await_results=sync
)
if not sync:
assert ret.result() is None
# The test report should have been updated as a side effect.
assert irunner.report["test_1"]["Suite"]["parametrized"][
"parametrized__val_1"
].passed
@pytest.mark.parametrize("sync", [True, False])
def test_environment_control(irunner, sync):
"""Test starting and stopping test environments."""
test = irunner.test("test_1")
assert irunner.report["test_1"].env_status == entity.ResourceStatus.STOPPED
# Start the environment and check it has the expected status.
start_results = irunner.start_test_resources("test_1", await_results=sync)
# If the environment was started asynchronously, wait for all of the
# operations to copmlete before continuing.
if not sync:
start_results.result()
assert test.resources.all_status(entity.ResourceStatus.STARTED)
assert (
test.resources.mock_driver.status.tag == entity.ResourceStatus.STARTED
)
assert irunner.report["test_1"].env_status == entity.ResourceStatus.STARTED
# Stop the environment and check it has the expected status.
stop_results = irunner.stop_test_resources("test_1", await_results=sync)
# Again, await the async operation results if testing async.
if not sync:
stop_results.result()
assert test.resources.all_status(entity.ResourceStatus.STOPPED)
assert (
test.resources.mock_driver.status.tag == entity.ResourceStatus.STOPPED
)
assert irunner.report["test_1"].env_status == entity.ResourceStatus.STOPPED
def _check_initial_report(initial_report):
"""
Check that the initial report tree is generated correctly.
First, check that there are three top-level Test reports.
"""
assert initial_report.status == report.Status.UNKNOWN
assert initial_report.runtime_status == report.RuntimeStatus.READY
assert len(initial_report.entries) == 3
for test_report in initial_report:
# Each Test contains one suite.
assert test_report.status == report.Status.UNKNOWN
assert test_report.runtime_status == report.RuntimeStatus.READY
assert len(test_report.entries) == 1
for suite_report in test_report:
# Each suite contains two testcase.
assert suite_report.status == report.Status.UNKNOWN
assert suite_report.runtime_status == report.RuntimeStatus.READY
assert len(suite_report.entries) == 2
# The first entry in the suite report is a regular testcase.
testcase_report = suite_report.entries[0]
assert isinstance(testcase_report, report.TestCaseReport)
assert len(testcase_report.entries) == 0
# The second entry in the suite report is a parametrized testcase.
param_report = suite_report.entries[1]
assert isinstance(param_report, report.TestGroupReport)
assert len(param_report.entries) == 3
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.