hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57c55b3ccdc500623a508207c07880063721a0b5 | 766 | py | Python | nngeometry/utils.py | OtUmm7ojOrv/nngeometry | ce184345258a7eb79ce78c14becce047a1785a48 | [
"MIT"
] | 7 | 2020-10-04T22:04:10.000Z | 2022-03-05T14:20:18.000Z | nngeometry/utils.py | OtUmm7ojOrv/nngeometry | ce184345258a7eb79ce78c14becce047a1785a48 | [
"MIT"
] | null | null | null | nngeometry/utils.py | OtUmm7ojOrv/nngeometry | ce184345258a7eb79ce78c14becce047a1785a48 | [
"MIT"
] | 3 | 2020-10-04T22:04:17.000Z | 2022-03-05T14:19:48.000Z | import torch
import torch.nn.functional as F
def get_n_parameters(model):
return sum([p.numel() for p in model.parameters()])
def per_example_grad_conv(mod, x, gy):
ks = (mod.weight.size(2), mod.weight.size(3))
gy_s = gy.size()
bs = gy_s[0]
x_unfold = F.unfold(x, kernel_size=ks, stride=mod.stride,
padding=mod.padding, dilation=mod.dilation)
x_unfold_s = x_unfold.size()
return torch.bmm(gy.view(bs, gy_s[1], -1),
x_unfold.view(bs, x_unfold_s[1], -1).permute(0, 2, 1))
def display_correl(M, axis):
M = M.get_dense_tensor()
diag = torch.diag(M)
dM = (diag + diag.mean() / 100) **.5
correl = torch.abs(M) / dM.unsqueeze(0) / dM.unsqueeze(1)
axis.imshow(correl.cpu()) | 28.37037 | 75 | 0.613577 |
b15d7afc5191535668d5a0a98668cc8b41fdaa5f | 1,030 | py | Python | system/imports/bd_SQlite3/main.py | ryanprogrammer/Sistema-de-cadastro | de1f1e2332650e7ba1dc43eb7daeafe2e5753b75 | [
"MIT"
] | 4 | 2021-12-23T22:56:42.000Z | 2022-01-01T06:00:38.000Z | system/imports/bd_SQlite3/main.py | ryanprogrammer/registration-system | de1f1e2332650e7ba1dc43eb7daeafe2e5753b75 | [
"MIT"
] | null | null | null | system/imports/bd_SQlite3/main.py | ryanprogrammer/registration-system | de1f1e2332650e7ba1dc43eb7daeafe2e5753b75 | [
"MIT"
] | null | null | null | import sqlite3
class BancoDeDados():
def __init__(self, conexao):
self._conexao = conexao
self._cursor = self._conexao.cursor()
def inserir_cadastro(self, nome, email, senha):
consulta = 'INSERT INTO clientes (nome, email, senha) VALUES(?,?,?)'
self._cursor.execute(consulta, (nome, email, senha))
self._conexao.commit()
def contas_cadastradas(self):
consulta = 'SELECT * FROM clientes'
self._cursor.execute(consulta)
lista = list()
contas = dict()
for linha in self._cursor.fetchall():
nome, email, senha, id = linha
contas['email'] = email
contas['senha'] = senha
lista.append(contas.copy())
return lista
def fechar(self):
self._cursor.close()
self._conexao.close()
if __name__ == '__main__':
bd = BancoDeDados(sqlite3.connect('users.db'))
#bd.inserir_cadastro('RyanTeste', 't@gmail.com', '11111')
v = bd.contas_cadastradas()
bd.fechar()
| 29.428571 | 76 | 0.601942 |
04a1a24460bbf8ce064e279e392c108bef117796 | 218 | py | Python | nodes/output_node.py | PhillsProgrammingExperiments/flowbasedpython | 1e1f1cb3eed0ab1241cc1e2dba5ff4e9151b696e | [
"BSD-3-Clause"
] | 1 | 2021-05-29T07:01:32.000Z | 2021-05-29T07:01:32.000Z | nodes/output_node.py | PhillsProgrammingExperiments/flowbasedpython | 1e1f1cb3eed0ab1241cc1e2dba5ff4e9151b696e | [
"BSD-3-Clause"
] | null | null | null | nodes/output_node.py | PhillsProgrammingExperiments/flowbasedpython | 1e1f1cb3eed0ab1241cc1e2dba5ff4e9151b696e | [
"BSD-3-Clause"
] | null | null | null | from fbpy.node_interface import Node
class OutputNode(Node):
def pull(self):
self.data = self.input[0].get()
def body(self):
pass
def push(self):
print("I got number", self.data) | 18.166667 | 40 | 0.605505 |
36f6f54d9e4a08d716211c5ad5818162267d470c | 974 | py | Python | test/test_destination_end_point_destination_list.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | test/test_destination_end_point_destination_list.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | test/test_destination_end_point_destination_list.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import vtpl_api
from vtpl_api.models.destination_end_point_destination_list import DestinationEndPointDestinationList # noqa: E501
from vtpl_api.rest import ApiException
class TestDestinationEndPointDestinationList(unittest.TestCase):
"""DestinationEndPointDestinationList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDestinationEndPointDestinationList(self):
"""Test DestinationEndPointDestinationList"""
# FIXME: construct object with mandatory attributes with example values
# model = vtpl_api.models.destination_end_point_destination_list.DestinationEndPointDestinationList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.35 | 123 | 0.743326 |
131fd3af49876418c4bbf15a7f329d3f898cc2c0 | 762 | py | Python | morpfw/authn/pas/user/path.py | morpframework/morpfw | b867e5809d6c52e8839586670a29fcd179ce64c7 | [
"Apache-2.0"
] | 8 | 2018-12-08T01:41:58.000Z | 2020-12-21T15:30:12.000Z | morpfw/authn/pas/user/path.py | morpframework/morpfw | b867e5809d6c52e8839586670a29fcd179ce64c7 | [
"Apache-2.0"
] | 17 | 2019-02-05T15:01:32.000Z | 2020-04-28T16:17:42.000Z | morpfw/authn/pas/user/path.py | morpframework/morpfw | b867e5809d6c52e8839586670a29fcd179ce64c7 | [
"Apache-2.0"
] | 2 | 2018-12-08T05:03:37.000Z | 2019-03-20T07:15:21.000Z | import morepath
from ..app import App
from .model import CurrentUserModel, UserCollection, UserModel, UserSchema
def get_user(request: morepath.Request, identifier) -> UserModel:
collection = get_user_collection(request)
return collection.get(identifier)
def get_user_collection(request: morepath.Request) -> UserCollection:
return UserCollection(request, storage=request.app.get_storage(UserModel, request))
def get_current_user(request: morepath.Request) -> UserModel:
userid = request.identity.userid
collection = get_user_collection(request)
return collection.get_by_userid(userid)
def refresh_nonce_handler(request, userid):
collection = get_user_collection(request)
return collection.get_by_userid(userid)["nonce"]
| 30.48 | 87 | 0.788714 |
537330fd2e9e07d92f06ef636ba55fa6cdd39d72 | 3,273 | py | Python | Final_project/PostBoard/Board/views.py | GregTMJ/django-files | dfd2c8da596522b77fb3dfc8089f0d287a94d53b | [
"MIT"
] | 1 | 2021-05-29T21:17:56.000Z | 2021-05-29T21:17:56.000Z | Final_project/PostBoard/Board/views.py | GregTMJ/django-files | dfd2c8da596522b77fb3dfc8089f0d287a94d53b | [
"MIT"
] | null | null | null | Final_project/PostBoard/Board/views.py | GregTMJ/django-files | dfd2c8da596522b77fb3dfc8089f0d287a94d53b | [
"MIT"
] | 1 | 2021-06-30T12:43:39.000Z | 2021-06-30T12:43:39.000Z | from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import Post, Author, Category, Comments, User
from django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView
from .forms import PostForm, EditForm, CommentForm, CommentEditForm
from .filters import SearchFilter
# Create your views here.
class Post_list(LoginRequiredMixin, ListView):
model = Post
template_name = 'Post/list_of_posts.html'
context_object_name = 'Posts'
paginate_by = 5
queryset = Post.objects.order_by('-id')
class Post_details(LoginRequiredMixin, DetailView):
model = Post
template_name = 'Post/post_details.html'
context_object_name = 'Post'
class Post_create(LoginRequiredMixin, CreateView):
model = Post
template_name = 'Post/post_create.html'
form_class = PostForm
class Post_update(LoginRequiredMixin, UpdateView):
template_name = 'Post/post_update.html'
form_class = EditForm
context_object_name = 'Post'
def get_object(self, **kwargs):
# Here we are getting the id so Django could stalk the change
ID = self.kwargs.get('pk')
return Post.objects.get(pk=ID)
class Post_delete(LoginRequiredMixin, DeleteView):
template_name = 'Post/post_delete.html'
queryset = Post.objects.all()
context_object_name = 'Post'
success_url = '/Posts'
class Add_comment(LoginRequiredMixin, CreateView):
model = Comments
template_name = 'Post/add_comment.html'
form_class = CommentForm
success_url = '/Posts'
def form_valid(self, form):
form.instance.comment_id = self.kwargs['pk']
return super().form_valid(form)
class List_of_Comments(LoginRequiredMixin, ListView):
model = Comments
template_name = 'Post/list_of_comments.html'
context_object_name = 'comments'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(**kwargs)
available_comments = Comments.objects.all()
user = User.objects.get(id=self.request.user.id)
context['filter'] = SearchFilter(self.request.GET,
queryset=self.get_queryset())
context['available_comments'] = available_comments
context['author_id'] = user
return context
class CommentEdit(LoginRequiredMixin, UpdateView):
template_name = 'Post/edit_comment.html'
form_class = CommentEditForm
context_object_name = 'comments'
def get_object(self, **kwargs):
# Here we are getting the id so Django could stalk the change
ID = self.kwargs.get('pk')
return Comments.objects.get(pk=ID)
class CommentDelete(LoginRequiredMixin, DeleteView):
model = Comments
template_name = 'Post/comment_delete.html'
queryset = Comments.objects.all()
context_object_name = 'comments'
success_url = '/Posts'
def AcceptedView(request, pk):
comment = get_object_or_404(Comments, id=request.POST.get('comment.id'))
if not comment.accepted:
comment.accepted = True
else:
comment.accepted = False
return HttpResponseRedirect(reverse('list_of_comments'))
| 30.877358 | 89 | 0.711885 |
2a3c1bd67189d73924d6d85bdd1ffda9b7793fd6 | 3,229 | py | Python | data_sources/semantic_scholar/main.py | JDRanpariya/ReScholar | 692fb9ee3c82b5abe95a953beaafd1e1b8e19652 | [
"BSD-3-Clause"
] | 1 | 2021-03-14T20:55:40.000Z | 2021-03-14T20:55:40.000Z | data_sources/semantic_scholar/main.py | pranavAbe/ReScholar | 692fb9ee3c82b5abe95a953beaafd1e1b8e19652 | [
"BSD-3-Clause"
] | 8 | 2021-03-14T16:54:54.000Z | 2021-04-23T03:35:57.000Z | data_sources/semantic_scholar/main.py | pranavAbe/ReScholar | 692fb9ee3c82b5abe95a953beaafd1e1b8e19652 | [
"BSD-3-Clause"
] | null | null | null | from scrapy import Spider
import scrapy
from urllib.parse import urlencode
from urllib.parse import urlparse
from selenium import webdriver
from scrapy.contracts import Contract
from scrapy_selenium import SeleniumRequest
base_uri = "https://www.semanticscholar.org"
# class WithSelenium(Contract):
# """ Contract to set the request class to be SeleniumRequest for the current call back method to test
# @with_selenium
# """
# name = 'with_selenium'
# request_cls = SeleniumRequest
def SemanticScholar(request):
class SemanticScholarSearchResultsSpider(Spider):
name = "SemanticScholarSearchResultsSpider"
allowed_domains = ["semanticscholar.org"]
custom_settings = {
'USER_AGENT': 'googlebot',
'DOWNLOADER_MIDDLEWARES':
{
'scrapy_selenium.SeleniumMiddleware': 400,
}
}
def start_requests(self):
queries = ['Residual learning']
for query in queries:
url = 'https://www.semanticscholar.org/search?' + \
urlencode({'q': query})
# print(url)
yield SeleniumRequest(url=url, callback=self.parse)
def parse(self, response):
for i in range(1, 11):
title = "".join(response.xpath(
f"//div[@class='cl-paper-row serp-papers__paper-row paper-row-normal'][{i}]/a/div//text()").extract())
link = response.xpath(
f"//div[@class='cl-paper-row serp-papers__paper-row paper-row-normal'][{i}]/div[@class='cl-paper__bulleted-row cl-paper-controls']/div[2]/div[1]/a/@href").extract()[0]
authors = "".join(response.xpath(
f"//div[@class='cl-paper-row serp-papers__paper-row paper-row-normal'][{i}]/ul/li[1]//text()").extract())
journal = response.xpath(
f"//div[@class='cl-paper-row serp-papers__paper-row paper-row-normal'][{i}]/ul/li[3]//text()").extract()[0].replace("\u2026", "...")
domains = "".join(response.xpath(
f"//div[@class='cl-paper-row serp-papers__paper-row paper-row-normal'][{i}]/ul/li[2]//text()").extract())
year = response.xpath(
f"//div[@class='cl-paper-row serp-papers__paper-row paper-row-normal'][{i}]/ul/li[4]//text()").extract()[-1]
snippet = "".join(response.xpath(
f"//div[@class='cl-paper-row serp-papers__paper-row paper-row-normal'][{i}]/div/div/span[1]//text()").extract())
citations = response.xpath(
f"//div[@class='cl-paper-row serp-papers__paper-row paper-row-normal'][{i}]/div[@class='cl-paper__bulleted-row cl-paper-controls']/div[1]/ul/li[1]/div//text()").extract()[0]
item = {
'title': title,
'link': link,
'authors': authors,
'journal': journal,
'year': year,
'domains ': domains,
'citations': citations,
'snippet': snippet,
}
yield item
| 46.797101 | 193 | 0.550635 |
c84b0c0137cf8c4a971d3cea9572c78989f3519d | 1,441 | py | Python | algorithms/tree/bst/height.py | zhengli0817/algorithms | 3c98813f0329d9a5fff1107dbcd40e7f38d2275d | [
"MIT"
] | 22,426 | 2017-01-17T04:01:44.000Z | 2022-03-31T12:06:16.000Z | algorithms/tree/bst/height.py | zhengli0817/algorithms | 3c98813f0329d9a5fff1107dbcd40e7f38d2275d | [
"MIT"
] | 523 | 2017-04-18T12:05:11.000Z | 2022-03-20T11:10:41.000Z | algorithms/tree/bst/height.py | zhengli0817/algorithms | 3c98813f0329d9a5fff1107dbcd40e7f38d2275d | [
"MIT"
] | 4,900 | 2017-01-19T23:47:05.000Z | 2022-03-31T10:00:47.000Z | """
Write a function height returns the height of a tree. The height is defined to
be the number of levels. The empty tree has height 0, a tree of one node has
height 1, a root node with one or two leaves as children has height 2, and so on
For example: height of tree is 4
9
/ \
6 12
/ \ / \
3 8 10 15
/ \
7 18
height = 4
"""
import unittest
from bst import Node
from bst import bst
def height(root):
if root is None:
return 0
else:
return 1 + max(height(root.left), height(root.right))
"""
The tree is created for testing:
9
/ \
6 12
/ \ / \
3 8 10 15
/ \
7 18
count_left_node = 4
"""
class TestSuite(unittest.TestCase):
def setUp(self):
self.tree = bst()
self.tree.insert(9)
self.tree.insert(6)
self.tree.insert(12)
self.tree.insert(3)
self.tree.insert(8)
self.tree.insert(10)
self.tree.insert(15)
self.tree.insert(7)
self.tree.insert(18)
def test_height(self):
self.assertEqual(4, height(self.tree.root))
if __name__ == '__main__':
unittest.main()
| 23.622951 | 80 | 0.470507 |
186bb579f96eb1534adcfeb9c2bb02a4030279f2 | 5,361 | py | Python | analyses/seasonality_paper_st/lagged_fapar_only/specific.py | akuhnregnier/wildfire-analysis | a04deada145cec864051d2fb15aec1a53a0246b9 | [
"MIT"
] | null | null | null | analyses/seasonality_paper_st/lagged_fapar_only/specific.py | akuhnregnier/wildfire-analysis | a04deada145cec864051d2fb15aec1a53a0246b9 | [
"MIT"
] | null | null | null | analyses/seasonality_paper_st/lagged_fapar_only/specific.py | akuhnregnier/wildfire-analysis | a04deada145cec864051d2fb15aec1a53a0246b9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import warnings
from pathlib import Path
PROJECT_DIR = Path(__file__).resolve().parent
if str(PROJECT_DIR.parent) not in sys.path:
sys.path.insert(0, str(PROJECT_DIR.parent))
warnings.filterwarnings(
"ignore", category=FutureWarning, module="sklearn.utils.deprecation"
)
from common import *
warnings.filterwarnings(
"always", category=FutureWarning, module="sklearn.utils.deprecation"
)
figure_saver = PaperFigureSaver(
directories=Path("~") / "tmp" / PROJECT_DIR.parent.name / PROJECT_DIR.name,
debug=True,
)
map_figure_saver = figure_saver(**map_figure_saver_kwargs)
for fig_saver in (figure_saver, map_figure_saver):
fig_saver.experiment = PROJECT_DIR.name
memory = get_memory("__".join((PROJECT_DIR.parent.name, PROJECT_DIR.name)), verbose=100)
CACHE_DIR = Path(DATA_DIR) / ".pickle" / PROJECT_DIR.parent.name / PROJECT_DIR.name
data_split_cache = SimpleCache("data_split", cache_dir=CACHE_DIR)
save_ale_2d_and_get_importance = partial(
save_ale_2d_and_get_importance, figure_saver=figure_saver
)
save_pdp_plot_2d = partial(save_pdp_plot_2d, figure_saver=figure_saver)
save_ale_plot_1d_with_ptp = partial(
save_ale_plot_1d_with_ptp, figure_saver=figure_saver
)
save_pdp_plot_1d = partial(
save_pdp_plot_1d, CACHE_DIR=CACHE_DIR, figure_saver=figure_saver
)
multi_ale_plot_1d = partial(multi_ale_plot_1d, figure_saver=figure_saver)
# Number of SHAP jobs.
try:
X_train, X_test, y_train, y_test = data_split_cache.load()
# Maximum job array index (inclusive).
shap_params["max_index"] = math.floor(X_train.shape[0] / shap_params["job_samples"])
# Upper bound only.
shap_params["total_samples"] = (shap_params["max_index"] + 1) * shap_params[
"job_samples"
]
except NoCachedDataError:
warnings.warn(
"Processed data not found, not calculating 'max_index' or 'total_samples'."
)
# Upper bound only.
shap_interact_params["total_samples"] = (
shap_interact_params["max_index"] + 1
) * shap_interact_params["job_samples"]
# SHAP cache.
shap_cache = SimpleCache("shap_cache", cache_dir=CACHE_DIR / Path("shap"))
shap_interact_cache = SimpleCache(
"shap_interact_cache", cache_dir=CACHE_DIR / Path("shap_interaction")
)
interact_data_cache = SimpleCache("SHAP_interact_data", cache_dir=CACHE_DIR)
# Redefine the common functionality for our use-case - no shifted variables.
_common_get_data = get_data
_common_get_offset_data = get_offset_data
selected_features = (
"Dry Day Period",
"Max Temp",
"TreeAll",
"SWI(1) 50P 4k",
"pftHerb",
"Diurnal Temp Range",
"ShrubAll",
"AGB Tree",
"pftCrop",
"lightning",
"FAPAR 50P 4k",
"FAPAR 50P 4k -1 Month",
"FAPAR 50P 4k -3 Month",
"FAPAR 50P 4k -6 Month",
"FAPAR 50P 4k -9 Month",
)
assert len(selected_features) == 15
offset_selected_features = []
for column in selected_features:
match = re.search(r"-\d{1,2}", column)
if match:
span = match.span()
# Change the string to reflect the shift.
original_offset = int(column[slice(*span)])
if original_offset > -12:
# Only shift months that are 12 or more months before the current month.
offset_selected_features.append(column)
continue
comp = -(-original_offset % 12)
new_column = " ".join(
(
column[: span[0] - 1],
f"{original_offset} - {comp}",
column[span[1] + 1 :],
)
)
offset_selected_features.append(new_column)
else:
offset_selected_features.append(column)
@wraps(_common_get_data)
def get_data(*args, **kwargs):
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = _common_get_data(*args, **kwargs)
# We need to subset exog_data, filled_datasets, and masked_datasets.
exog_data = exog_data[list(selected_features)]
filled_datasets = filled_datasets.select_variables(selected_features)
masked_datasets = masked_datasets.select_variables(selected_features)
return (
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
)
@wraps(_common_get_offset_data)
def get_offset_data(*args, **kwargs):
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = _common_get_offset_data(*args, **kwargs)
# We need to subset exog_data, filled_datasets, and masked_datasets.
exog_data = exog_data[list(offset_selected_features)]
filled_datasets = filled_datasets.select_variables(selected_features)
masked_datasets = masked_datasets.select_variables(selected_features)
return (
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
)
def get_model(X_train=None, y_train=None):
return common_get_model(cache_dir=CACHE_DIR, X_train=X_train, y_train=y_train)
model_score_cache = SimpleCache("model_scores", cache_dir=CACHE_DIR)
@model_score_cache
def get_model_scores(rf=None, X_test=None, X_train=None, y_test=None, y_train=None):
return common_get_model_scores(rf, X_test, X_train, y_test, y_train)
| 29.618785 | 88 | 0.697631 |
731cdb347795195945eaddedf9b9a89fa0ffbd9b | 8,675 | py | Python | modular.py | yotamitai/AuToPN | e1f8563c2a9ff7abb9079193c6994452a3d57309 | [
"MIT"
] | null | null | null | modular.py | yotamitai/AuToPN | e1f8563c2a9ff7abb9079193c6994452a3d57309 | [
"MIT"
] | null | null | null | modular.py | yotamitai/AuToPN | e1f8563c2a9ff7abb9079193c6994452a3d57309 | [
"MIT"
] | null | null | null | from get_plans import *
from pythonpddl.pythonpddl import pddl
from facts_actions import *
from divesrity_score import *
from swappable import *
from create_plans import *
def parsing(domain_path, problem_path):
"""Parsing original domain and problem"""
parsed = pddl.parseDomainAndProblem(domain_path, problem_path)
"""get actions, initial state and goal state from pddl problem and domain"""
return collect_facts_actions(parsed[0], parsed[1])
def load_diverse_plans(p_type, k):
"""load all plans"""
if p_type == 'Temporal':
all_plans = load_temporal_plans()
else:
all_plans = load_classical_plans()
if not all_plans:
return None, None, None, 'No plans Found'
print('DIVERSE PLANS FOUND: %d' % len(all_plans))
if k > len(all_plans):
k = len(all_plans)
print('GENERATING FOR K=%d PLANS' % k)
if OPTIMIZATIONS['PLANSET_DIV'] == 'Max':
print('OBTAINING MAX DIVERSITY PLANSET')
plan_numbers, PlanSet_Diversity, MaxDiveristyBefore = get_plan_set(all_plans, k)
else: # K-First
print('OBTAINING K-FIRST DIVERSITY PLANSET')
plan_numbers = sorted(list(all_plans.keys())[:k])
k_first_plans = dict((k, all_plans[k]) for k in plan_numbers)
_, PlanSet_Diversity, MaxDiveristyBefore = get_plan_set(k_first_plans, k)
# """load these plans"""
if p_type == 'Temporal':
k_plans = dict((k, all_plans[k]) for k in range(len(plan_numbers)))
else:
k_plans = load_plans(all_plans, plan_numbers)
return k_plans, PlanSet_Diversity, MaxDiveristyBefore, 'plans found'
def get_full_plans(A, I, G, p, p_type):
plans = []
for i in range(len(p)):
if p_type == 'Temporal':
plan_structure = FullTemporalPlan(A, I, G, p[i], i)
else:
plan_structure = FullClassicalPlan(A, I, G, p[i], i)
plan_structure.construct_plan()
plans.append(plan_structure)
return plans
def get_compatible_nodes(plans):
print('OBTAINING SWAPPABLE NODE PAIRS')
logging.info('OBTAINING SWAPPABLE NODE PAIRS')
compatibility_dict = swappable(plans)
node_dict = {}
for config in ['Full', 'Semi']:
node_dict[config] = set([tuple(x[0]) for x in compatibility_dict[config]]).union(
set([tuple(x[1]) for x in compatibility_dict[config]]))
print(f'{str(len(compatibility_dict[config]))} {str(config)} compatible merges found')
logging.info(f'{str(len(compatibility_dict[config]))} {str(config)} compatible merges found')
if len(compatibility_dict.keys()) < 2:
print('0 Semi compatible merges found')
logging.info('0 Semi compatible merges found')
return compatibility_dict, node_dict
def optimizations(prob_type, mergeable, plans):
enabled = True
plan_space_done = False
remaining_nodes = []
remaining_merges = []
compilation_output = []
planSpace_actions = []
if OPTIMIZATIONS['STATE_SPACE']:
"""merge nodes with the same state space"""
enabled = False
compilation_output = state_space_opt(plans, mergeable)
if compilation_output:
return compilation_output, enabled, [], []
else:
enabled = True
if OPTIMIZATIONS['PLAN_SPACE'] and enabled:
"""merge nodes that initiate the same actions from the initial node or from the terminal node"""
if prob_type == 'Temporal':
remaining_merges, remaining_nodes, planSpace_actions = temporal_planSpace_merge(mergeable, plans)
else:
remaining_merges, remaining_nodes, planSpace_actions = classical_planSpace_merge(mergeable, plans)
plan_space_done = True
if not remaining_merges: # all merges were made
compilation_output = planSpace_actions
enabled = False
print('PLAN SPACE COMPILATION - COMPLETE')
print(40 * '-')
return compilation_output, enabled, [remaining_merges, remaining_nodes, plan_space_done], planSpace_actions
def combine_planspace_actions(compilation_output, planSpace_actions):
if OPTIMIZATIONS['COMPILATION'] == 'Python':
merge_num = sum([1 for x in compilation_output if x[0] == 'new'])
else:
# pairs appear only with the 'Mid' compilation
pairs = [x for x in compilation_output if 'pair' in x.lower()]
compilation_output = [x for x in compilation_output if x not in pairs]
merges = [x for x in compilation_output if ' merge' in x.lower()]
lonely = [x for x in compilation_output if 'unmerged_node' in x.lower()]
assert merges or planSpace_actions or pairs, 'Failed to find a non trivial plan'
"""get list of merged nodes"""
node2merge = defaultdict(list)
for line in merges:
words = line.lower().split()
if 'create_merge' in words:
merge_num = int(words[2][5:])
else:
merge_num = int(words[3][5:])
node_num = tuple(int_id(words[1][4:]))
node2merge[merge_num].append(node_num)
"""deal with pairs(Mid compilation)"""
# looks like: add_pair_to_merge node11 node12 node21 node22 merge0
for line in pairs:
words = line.lower().split()
merge_num = int(words[5][5:])
node1_num = tuple(int_id(words[3][4:]))
node2_num = tuple(int_id(words[4][4:]))
node2merge[merge_num].append(node1_num)
node2merge[merge_num].append(node2_num)
node2merge = dict(enumerate(node2merge.values()))
compilation_output = []
for merge in node2merge:
compilation_output.append(('new', node2merge[merge][0], node2merge[merge][1]))
for i in range(2, len(node2merge[merge])):
compilation_output.append(('add', merge, node2merge[merge][i]))
if lonely:
compilation_output.append(('lonely', len(lonely)))
merge_num = len(node2merge)
new_ps_actions = []
for a in planSpace_actions:
if a[0] == 'add':
temp_a = list(a)
temp_a[1] += merge_num
a = tuple(temp_a)
new_ps_actions.append(a)
return compilation_output + new_ps_actions
def state_space_opt(p, possible_merges):
"""state space optimization"""
stateSpace_merges, stateSpace_actions = stateSpace_merge(p, possible_merges)
merge_diff = set(possible_merges).symmetric_difference(set(stateSpace_merges))
if not merge_diff:
print('STATE SPACE COMPILATION - COMPLETE')
print(40 * '-')
return stateSpace_actions
else:
return False
def compile_python(plan_space_done, remaining_nodes, remaining_merges, planSpace_actions, swappable_merges):
print('PYTHON COMPILING...')
if plan_space_done:
print('WITH PLAN SPACE OPTIMIZATION')
compilation_output, compilation_time = get_merges(remaining_nodes, remaining_merges)
compilation_output = combine_planspace_actions(compilation_output, planSpace_actions)
else:
compilation_output, compilation_time = get_merges(remaining_nodes, swappable_merges)
print('...DONE!')
print('COMPILATION TIME: %.02f' % compilation_time)
print(40 * '-')
return compilation_output
def compilation_search(compilation, remaining, planSpace_actions, swappable_merges, domain_path, d_name, p_name, mergeable_nodes):
remaining_merges, remaining_nodes, plan_space_done = remaining
if compilation == 'Python':
return [], compile_python(plan_space_done, remaining_nodes, remaining_merges, planSpace_actions, swappable_merges)
else:
if plan_space_done:
print('WITH PLAN SPACE OPTIMIZATION')
n_merges = int(len(remaining_nodes) / 2)
d_nodes = get_domain_literals(remaining_nodes)
p_obj, p_init = get_problem_literals(domain_path, remaining_nodes, n_merges, remaining_merges)
else:
n_merges = int(len(mergeable_nodes) / 2) # worst case
d_nodes = get_domain_literals(mergeable_nodes)
p_obj, p_init = get_problem_literals(domain_path, mergeable_nodes, n_merges, swappable_merges)
compilation_output_path = DIRECTORIES["COMPILATION_PDDL"]
cleanup(compilation_output_path)
path_new_domain = create_domain(d_name)
new_p_name = d_name + "_" + p_name + ".pddl"
path_new_problem = create_new_problem(p_obj, p_init, new_p_name, d_nodes)
print('DOMAIN AND PROBLEM FILES GENERATED')
print(OPTIMIZATIONS['COMPILATION'] + ' COMPILING...')
return [path_new_domain, path_new_problem], []
| 38.727679 | 130 | 0.663055 |
a1d2f404d015d51a33083eb4d41890bd5efa4b3b | 335 | py | Python | OOP/Exams/Exam_23_august_2021/project/planet/planet.py | petel3/Softuni_education | 4fd80f8c6ce6c3d6a838edecdb091dda2ed1084c | [
"MIT"
] | 2 | 2022-03-05T13:17:12.000Z | 2022-03-05T13:17:16.000Z | OOP/Exams/Exam_23_august_2021/project/planet/planet.py | petel3/Softuni_education | 4fd80f8c6ce6c3d6a838edecdb091dda2ed1084c | [
"MIT"
] | null | null | null | OOP/Exams/Exam_23_august_2021/project/planet/planet.py | petel3/Softuni_education | 4fd80f8c6ce6c3d6a838edecdb091dda2ed1084c | [
"MIT"
] | null | null | null | class Planet:
def __init__(self,name):
self.name = name
self.items=[]
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
if value.strip()=="":
raise ValueError("Planet name cannot be empty string or whitespace!")
self.__name=value
| 22.333333 | 81 | 0.58209 |
42b6c9927fbdb38d66151159123b6bfe5219abff | 7,392 | py | Python | nova/tests/virt/test_virt_disk_vfs_guestfs.py | bopopescu/nova-34 | b037993984229bb698050f20e8719b8c06ff2be3 | [
"Apache-2.0"
] | null | null | null | nova/tests/virt/test_virt_disk_vfs_guestfs.py | bopopescu/nova-34 | b037993984229bb698050f20e8719b8c06ff2be3 | [
"Apache-2.0"
] | null | null | null | nova/tests/virt/test_virt_disk_vfs_guestfs.py | bopopescu/nova-34 | b037993984229bb698050f20e8719b8c06ff2be3 | [
"Apache-2.0"
] | 1 | 2020-07-24T08:52:14.000Z | 2020-07-24T08:52:14.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from nova import exception
from nova import test
from nova.tests import fakeguestfs
from nova.virt.disk.vfs import guestfs as vfsimpl
class VirtDiskVFSGuestFSTest(test.TestCase):
def setUp(self):
super(VirtDiskVFSGuestFSTest, self).setUp()
sys.modules['guestfs'] = fakeguestfs
vfsimpl.guestfs = fakeguestfs
def test_appliance_setup_inspect(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
imgfmt="qcow2",
partition=-1)
vfs.setup()
self.assertEqual(vfs.handle.running, True)
self.assertEqual(len(vfs.handle.mounts), 3)
self.assertEqual(vfs.handle.mounts[0][1],
"/dev/mapper/guestvgf-lv_root")
self.assertEqual(vfs.handle.mounts[1][1], "/dev/vda1")
self.assertEqual(vfs.handle.mounts[2][1],
"/dev/mapper/guestvgf-lv_home")
self.assertEqual(vfs.handle.mounts[0][2], "/")
self.assertEqual(vfs.handle.mounts[1][2], "/boot")
self.assertEqual(vfs.handle.mounts[2][2], "/home")
handle = vfs.handle
vfs.teardown()
self.assertEqual(vfs.handle, None)
self.assertEqual(handle.running, False)
self.assertEqual(handle.closed, True)
self.assertEqual(len(handle.mounts), 0)
def test_appliance_setup_inspect_no_root_raises(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
imgfmt="qcow2",
partition=-1)
# call setup to init the handle so we can stub it
vfs.setup()
def fake_inspect_os():
return []
self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
def test_appliance_setup_inspect_multi_boots_raises(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
imgfmt="qcow2",
partition=-1)
# call setup to init the handle so we can stub it
vfs.setup()
def fake_inspect_os():
return ['fake1', 'fake2']
self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
def test_appliance_setup_static_nopart(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
imgfmt="qcow2",
partition=None)
vfs.setup()
self.assertEqual(vfs.handle.running, True)
self.assertEqual(len(vfs.handle.mounts), 1)
self.assertEqual(vfs.handle.mounts[0][1], "/dev/sda")
self.assertEqual(vfs.handle.mounts[0][2], "/")
handle = vfs.handle
vfs.teardown()
self.assertEqual(vfs.handle, None)
self.assertEqual(handle.running, False)
self.assertEqual(handle.closed, True)
self.assertEqual(len(handle.mounts), 0)
def test_appliance_setup_static_part(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
imgfmt="qcow2",
partition=2)
vfs.setup()
self.assertEqual(vfs.handle.running, True)
self.assertEqual(len(vfs.handle.mounts), 1)
self.assertEqual(vfs.handle.mounts[0][1], "/dev/sda2")
self.assertEqual(vfs.handle.mounts[0][2], "/")
handle = vfs.handle
vfs.teardown()
self.assertEqual(vfs.handle, None)
self.assertEqual(handle.running, False)
self.assertEqual(handle.closed, True)
self.assertEqual(len(handle.mounts), 0)
def test_makepath(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
vfs.setup()
vfs.make_path("/some/dir")
vfs.make_path("/other/dir")
self.assertTrue("/some/dir" in vfs.handle.files)
self.assertTrue("/other/dir" in vfs.handle.files)
self.assertTrue(vfs.handle.files["/some/dir"]["isdir"])
self.assertTrue(vfs.handle.files["/other/dir"]["isdir"])
vfs.teardown()
def test_append_file(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
vfs.setup()
vfs.append_file("/some/file", " Goodbye")
self.assertTrue("/some/file" in vfs.handle.files)
self.assertEqual(vfs.handle.files["/some/file"]["content"],
"Hello World Goodbye")
vfs.teardown()
def test_replace_file(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
vfs.setup()
vfs.replace_file("/some/file", "Goodbye")
self.assertTrue("/some/file" in vfs.handle.files)
self.assertEqual(vfs.handle.files["/some/file"]["content"],
"Goodbye")
vfs.teardown()
def test_read_file(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
vfs.setup()
self.assertEqual(vfs.read_file("/some/file"), "Hello World")
vfs.teardown()
def test_has_file(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
vfs.setup()
vfs.read_file("/some/file")
self.assertTrue(vfs.has_file("/some/file"))
self.assertFalse(vfs.has_file("/other/file"))
vfs.teardown()
def test_set_permissions(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
vfs.setup()
vfs.read_file("/some/file")
self.assertEquals(vfs.handle.files["/some/file"]["mode"], 0o700)
vfs.set_permissions("/some/file", 0o7777)
self.assertEquals(vfs.handle.files["/some/file"]["mode"], 0o7777)
vfs.teardown()
def test_set_ownership(self):
vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
vfs.setup()
vfs.read_file("/some/file")
self.assertEquals(vfs.handle.files["/some/file"]["uid"], 100)
self.assertEquals(vfs.handle.files["/some/file"]["gid"], 100)
vfs.set_ownership("/some/file", "fred", None)
self.assertEquals(vfs.handle.files["/some/file"]["uid"], 105)
self.assertEquals(vfs.handle.files["/some/file"]["gid"], 100)
vfs.set_ownership("/some/file", None, "users")
self.assertEquals(vfs.handle.files["/some/file"]["uid"], 105)
self.assertEquals(vfs.handle.files["/some/file"]["gid"], 500)
vfs.set_ownership("/some/file", "joe", "admins")
self.assertEquals(vfs.handle.files["/some/file"]["uid"], 110)
self.assertEquals(vfs.handle.files["/some/file"]["gid"], 600)
vfs.teardown()
| 35.710145 | 78 | 0.60809 |
fe56f669ed8e51b8d4946dc85fde58a05828f7b1 | 7,719 | py | Python | tests/test_environment.py | dulacp/pytest-django | b3d7fd66416250f1c333dce49685e96bb89eabc4 | [
"BSD-3-Clause"
] | null | null | null | tests/test_environment.py | dulacp/pytest-django | b3d7fd66416250f1c333dce49685e96bb89eabc4 | [
"BSD-3-Clause"
] | null | null | null | tests/test_environment.py | dulacp/pytest-django | b3d7fd66416250f1c333dce49685e96bb89eabc4 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import with_statement
import os
import pytest
from django.contrib.sites.models import Site
from django.contrib.sites import models as site_models
from django.core import mail
from django.db import connection
from django.test import TestCase
from pytest_django.lazy_django import get_django_version
from pytest_django_test.app.models import Item
# It doesn't matter which order all the _again methods are run, we just need
# to check the environment remains constant.
# This is possible with some of the testdir magic, but this is the lazy way
# to do it.
@pytest.mark.parametrize('subject', ['subject1', 'subject2'])
def test_autoclear_mailbox(subject):
assert len(mail.outbox) == 0
mail.send_mail(subject, 'body', 'from@example.com', ['to@example.com'])
assert len(mail.outbox) == 1
m = mail.outbox[0]
assert m.subject == subject
assert m.body == 'body'
assert m.from_email == 'from@example.com'
assert m.to == ['to@example.com']
class TestDirectAccessWorksForDjangoTestCase(TestCase):
def _do_test(self):
assert len(mail.outbox) == 0
mail.send_mail('subject', 'body', 'from@example.com', ['to@example.com'])
assert len(mail.outbox) == 1
def test_one(self):
self._do_test()
def test_two(self):
self._do_test()
@pytest.mark.django_project(extra_settings="""
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'tpkg.app.urls'
""")
def test_invalid_template_variable(django_testdir):
django_testdir.create_app_file("""
from django.conf.urls import url
from pytest_django_test.compat import patterns
from tpkg.app import views
urlpatterns = patterns(
'',
url(r'invalid_template/', views.invalid_template),
)
""", 'urls.py')
django_testdir.create_app_file("""
from django.shortcuts import render
def invalid_template(request):
return render(request, 'invalid_template.html', {})
""", 'views.py')
django_testdir.create_app_file(
"<div>{{ invalid_var }}</div>",
'templates/invalid_template.html'
)
django_testdir.create_test_module('''
import pytest
def test_for_invalid_template(client):
client.get('/invalid_template/')
@pytest.mark.ignore_template_errors
def test_ignore(client):
client.get('/invalid_template/')
''')
result = django_testdir.runpytest_subprocess('-s', '--fail-on-template-vars')
result.stdout.fnmatch_lines_random([
"tpkg/test_the_test.py F.",
"Undefined template variable 'invalid_var' in 'invalid_template.html'",
])
@pytest.mark.django_project(extra_settings="""
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'tpkg.app.urls'
""")
def test_invalid_template_variable_opt_in(django_testdir):
django_testdir.create_app_file("""
from django.conf.urls import url
from pytest_django_test.compat import patterns
from tpkg.app import views
urlpatterns = patterns(
'',
url(r'invalid_template/', views.invalid_template),
)
""", 'urls.py')
django_testdir.create_app_file("""
from django.shortcuts import render
def invalid_template(request):
return render(request, 'invalid_template.html', {})
""", 'views.py')
django_testdir.create_app_file(
"<div>{{ invalid_var }}</div>",
'templates/invalid_template.html'
)
django_testdir.create_test_module('''
import pytest
def test_for_invalid_template(client):
client.get('/invalid_template/')
@pytest.mark.ignore_template_errors
def test_ignore(client):
client.get('/invalid_template/')
''')
result = django_testdir.runpytest_subprocess('-s')
result.stdout.fnmatch_lines_random([
"tpkg/test_the_test.py ..",
])
@pytest.mark.django_db
def test_database_rollback():
assert Item.objects.count() == 0
Item.objects.create(name='blah')
assert Item.objects.count() == 1
@pytest.mark.django_db
def test_database_rollback_again():
test_database_rollback()
@pytest.mark.django_db
def test_database_name():
dirname, name = os.path.split(connection.settings_dict['NAME'])
assert 'file:memorydb' in name or name == ':memory:' or name.startswith('test_')
def test_database_noaccess():
with pytest.raises(pytest.fail.Exception):
Item.objects.count()
class TestrunnerVerbosity:
"""Test that Django's code to setup and teardown the databases uses
pytest's verbosity level."""
@pytest.fixture
def testdir(self, django_testdir):
print("testdir")
django_testdir.create_test_module('''
import pytest
@pytest.mark.django_db
def test_inner_testrunner():
pass
''')
return django_testdir
def test_default(self, testdir):
"""Not verbose by default."""
result = testdir.runpytest_subprocess('-s')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py ."])
def test_vq_verbosity_0(self, testdir):
"""-v and -q results in verbosity 0."""
result = testdir.runpytest_subprocess('-s', '-v', '-q')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py ."])
def test_verbose_with_v(self, testdir):
"""Verbose output with '-v'."""
result = testdir.runpytest_subprocess('-s', '-v')
result.stdout.fnmatch_lines_random([
"tpkg/test_the_test.py:*",
"*PASSED*",
"*Destroying test database for alias 'default'...*"])
def test_more_verbose_with_vv(self, testdir):
"""More verbose output with '-v -v'."""
result = testdir.runpytest_subprocess('-s', '-v', '-v')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py:*Creating test database for alias*",
'*Operations to perform:*',
"*Apply all migrations:*",
"*PASSED*Destroying test database for alias 'default' ('*')...*"])
def test_more_verbose_with_vv_and_reusedb(self, testdir):
"""More verbose output with '-v -v', and --create-db."""
result = testdir.runpytest_subprocess('-s', '-v', '-v', '--create-db')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py:*Creating test database for alias*",
"*PASSED*"])
assert ("*Destroying test database for alias 'default' ('*')...*"
not in result.stdout.str())
@pytest.mark.skipif(
get_django_version() < (1, 8),
reason='Django 1.7 requires settings.SITE_ID to be set, so this test is invalid'
)
@pytest.mark.django_db
@pytest.mark.parametrize('site_name', ['site1', 'site2'])
def test_clear_site_cache(site_name, rf, monkeypatch):
request = rf.get('/')
monkeypatch.setattr(request, 'get_host', lambda: 'foo.com')
Site.objects.create(domain='foo.com', name=site_name)
assert Site.objects.get_current(request=request).name == site_name
@pytest.mark.django_db
@pytest.mark.parametrize('site_name', ['site1', 'site2'])
def test_clear_site_cache_check_site_cache_size(site_name, settings):
assert len(site_models.SITE_CACHE) == 0
site = Site.objects.create(domain='foo.com', name=site_name)
settings.SITE_ID = site.id
assert Site.objects.get_current() == site
assert len(site_models.SITE_CACHE) == 1
| 32.1625 | 84 | 0.652805 |
288a419f92d6a1c6a242bd98badcb6f54f3e4841 | 325 | py | Python | locale/pot/api/core/_autosummary/pyvista-ExplicitStructuredGrid-active_normals-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/core/_autosummary/pyvista-UnstructuredGrid-active_normals-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/core/_autosummary/pyvista-StructuredGrid-active_normals-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | # Compute normals on an example sphere mesh and return the
# active normals for the dataset. Show that this is the same size
# as the number of points.
#
import pyvista
mesh = pyvista.Sphere()
mesh = mesh.compute_normals()
normals = mesh.active_normals
normals.shape
# Expected:
## (842, 3)
mesh.n_points
# Expected:
## 842
| 21.666667 | 66 | 0.744615 |
47932624e5873eca0420770a789482c8c50612ea | 2,856 | py | Python | tests/test_falconx_sandbox.py | EdgeSync/falconpy | a2c026721259137dbbaa647aa719007ad20d9353 | [
"Unlicense"
] | null | null | null | tests/test_falconx_sandbox.py | EdgeSync/falconpy | a2c026721259137dbbaa647aa719007ad20d9353 | [
"Unlicense"
] | 10 | 2021-05-31T06:39:18.000Z | 2022-03-21T23:04:29.000Z | tests/test_falconx_sandbox.py | EdgeSync/falconpy | a2c026721259137dbbaa647aa719007ad20d9353 | [
"Unlicense"
] | null | null | null | # test_falconx_sandbox.py
# This class tests the falconx_sandbox service class
import os
import sys
import pytest
# Authentication via the test_authorization.py
from tests import test_authorization as Authorization
# Import our sibling src folder into the path
sys.path.append(os.path.abspath('src'))
# Classes to test - manually imported from sibling folder
from falconpy import falconx_sandbox as FalconXSandbox
auth = Authorization.TestAuthorization()
auth.serviceAuth()
falcon = FalconXSandbox.FalconX_Sandbox(access_token=auth.token)
AllowedResponses = [200, 429] # Adding rate-limiting as an allowed response for now
class TestFalconX:
def serviceFalconX_QueryReports(self):
if falcon.QueryReports(parameters={"limit": 1})["status_code"] in AllowedResponses:
return True
else:
return False
def serviceFalconX_QuerySubmissions(self):
if falcon.QuerySubmissions(parameters={"limit": 1})["status_code"] in AllowedResponses:
return True
else:
return False
def serviceFalconX_GetSummaryReports(self):
if falcon.GetSummaryReports(
ids=falcon.QueryReports(
parameters={"limit": 1}
)["body"]["resources"]
)["status_code"] in AllowedResponses:
return True
else:
return False
def serviceFalconX_GenerateErrors(self):
falcon.base_url = "nowhere"
errorChecks = True
commandList = [
["GetArtifacts", "parameters={}"],
["GetSummaryReports", "ids='12345678'"],
["GetReports", "ids='12345678'"],
["DeleteReport", "ids='12345678'"],
["GetSubmissions", "ids='12345678'"],
["Submit", "body={}"],
["QueryReports", ""],
["QuerySubmissions", ""],
["GetSampleV2", "ids='12345678'"],
["UploadSampleV2", "body={}, parameters={}"],
["DeleteSampleV2", "ids='12345678'"],
["QuerySampleV1", "ids='12345678'"]
]
for cmd in commandList:
if eval("falcon.{}({})['status_code']".format(cmd[0], cmd[1])) != 500:
errorChecks = False
return errorChecks
def test_QueryReports(self):
assert self.serviceFalconX_QueryReports() == True
def test_QuerySubmissions(self):
assert self.serviceFalconX_QuerySubmissions() == True
@pytest.mark.skipif(falcon.QueryReports(parameters={"limit": 1})["status_code"] == 429, reason="API rate limit reached")
def test_GetSummaryReports(self):
assert self.serviceFalconX_GetSummaryReports() == True
def test_Logout(self):
assert auth.serviceRevoke() == True
def test_Errors(self):
assert self.serviceFalconX_GenerateErrors() == True
| 34.409639 | 124 | 0.627451 |
91192ead973df835eb449110c41813b4200ca221 | 2,482 | py | Python | esque/validation/yamale_validators.py | real-digital/esque | 0b779fc308ce8bce45c1903f36c33664b2e832e7 | [
"MIT"
] | 29 | 2019-05-10T21:12:38.000Z | 2021-08-24T08:09:49.000Z | esque/validation/yamale_validators.py | real-digital/esque | 0b779fc308ce8bce45c1903f36c33664b2e832e7 | [
"MIT"
] | 103 | 2019-05-17T07:21:41.000Z | 2021-12-02T08:29:00.000Z | esque/validation/yamale_validators.py | real-digital/esque | 0b779fc308ce8bce45c1903f36c33664b2e832e7 | [
"MIT"
] | 2 | 2019-05-28T06:45:14.000Z | 2019-11-21T00:33:15.000Z | import re
from functools import wraps
from yamale.util import isstr
from yamale.validators import Boolean, DefaultValidators, Enum, Integer, Number, Validator
def all_validators() -> dict:
validators = DefaultValidators.copy()
validators[StringBool.tag] = StringBool
validators[StringInt.tag] = StringInt
validators[StringFloat.tag] = StringFloat
validators[ReplicaList.tag] = ReplicaList
validators[StringEnum.tag] = StringEnum
return validators
def catch_value_errors(validate):
"""method decorator to catch ValueErrors for casts and return an error"""
@wraps(validate)
def wrapper(self, value):
try:
return validate(self, value)
except ValueError:
return [f"'{value}' could not be casted to {self.tag[2:]}"]
return wrapper
class StringBool(Boolean):
tag = "s_bool"
@catch_value_errors
def validate(self, value):
value = str(value)
if value.lower() not in ["false", "true"]:
raise ValueError
return super().validate(bool(value))
class StringInt(Integer):
tag = "s_int"
@catch_value_errors
def validate(self, value):
return super().validate(int(value))
class StringFloat(Number):
tag = "s_float"
@catch_value_errors
def validate(self, value):
return super().validate(float(value))
class StringEnum(Enum):
tag = "s_enum"
def __init__(self, *args, case_sensitive: bool = True, **kwargs):
if not case_sensitive:
args = [arg.lower() for arg in args]
super().__init__(*args, **kwargs)
@catch_value_errors
def validate(self, value):
if not isinstance(value, str):
raise TypeError(f"Value {value} has to be a string, but is {type(value).__name__}")
return super().validate(value.lower())
class ReplicaList(Validator):
"""
Validates a list of replicas in the form of '<broker_id>:<partition>' (e.g. `'0:0,1:1,2:2'`).
Empty string for empty list or '*' for all replicas area also valid values.
"""
tag = "replica_list"
def _is_valid(self, value) -> bool:
if not isstr(value):
return False
if value == "" or value == "*":
return True
for pair in value.split(","):
if not re.fullmatch(r"\d+:\d+", pair):
return False
return True
def fail(self, value):
return f"could not build dict from this: {value}"
| 26.404255 | 97 | 0.63054 |
f45b8f90b20ad0872d57b676942ed1ba28e73e0b | 1,074 | py | Python | distribution.py | vlasenkoalexey/tensorflow_serving_benchmark | c8b9c26ab6026cb91bf4a5183e0f4bd182b1888f | [
"MIT"
] | null | null | null | distribution.py | vlasenkoalexey/tensorflow_serving_benchmark | c8b9c26ab6026cb91bf4a5183e0f4bd182b1888f | [
"MIT"
] | null | null | null | distribution.py | vlasenkoalexey/tensorflow_serving_benchmark | c8b9c26ab6026cb91bf4a5183e0f4bd182b1888f | [
"MIT"
] | null | null | null | import numpy as np
class Distribution(object):
@staticmethod
def factory(dist, rate):
assert dist in Distribution.distributions()
return globals()[dist.capitalize()](rate)
@staticmethod
def distributions():
return [cls.__name__.lower() for cls in Distribution.__subclasses__()]
class Uniform(Distribution):
def __init__(self, rate):
self.rate = rate
def next(self):
return 1.0 / self.rate
class Poisson(Distribution):
def __init__(self, rate):
self.rate = rate
def next(self):
return np.random.exponential(1 / self.rate)
class Pareto(Distribution):
def __init__(self, rate):
self.rate = rate
def next(self):
# We chose the constant 2, so that the mean is (1/rate)
return np.random.pareto(2) / self.rate
def test():
p = 0
u = 0
e = 0
rate = 100 # qps
uniform = Uniform(rate)
exp = Poisson(rate)
pareto = Pareto(rate)
for _ in range(100):
u = uniform.next()
e = exp.next()
p = pareto.next()
print("uniform: {:.2f} poisson: {:.2f} pareto: {:.2f}".format(u, e, p))
| 18.20339 | 75 | 0.643389 |
e4842efcf7e314be204e745ef48923feb79d1a03 | 9,113 | py | Python | update_cpes.py | dabdine-r7/recog | 4ec39a98b72170aeb9c2d89ef0b70ff58aadc41d | [
"BSD-2-Clause"
] | 1 | 2021-01-01T21:32:31.000Z | 2021-01-01T21:32:31.000Z | update_cpes.py | dabdine-r7/recog | 4ec39a98b72170aeb9c2d89ef0b70ff58aadc41d | [
"BSD-2-Clause"
] | null | null | null | update_cpes.py | dabdine-r7/recog | 4ec39a98b72170aeb9c2d89ef0b70ff58aadc41d | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import json
import logging
import re
import sys
from lxml import etree
def parse_r7_remapping(file):
remap = {} # r7_vendor => { 'cpe_vendor' => <cpe_vendor>, 'products': { r7_product1 => cpe_product1 }}
remappings = None
with open(file) as remap_file:
remappings = json.load(remap_file)["remappings"]
for remap_json in remappings:
r7_vendor = remap_json['r7_vendor']
cpe_vendor = remap_json['cpe_vendor']
if r7_vendor in remap:
raise ValueError("R7 vendor {} duplicated in {}".format(r7_vendor, file))
product_map = {}
if 'products' in remap_json:
product_map = remap_json['products']
remap[r7_vendor] = {'cpe_vendor': cpe_vendor, 'products': product_map}
return remap
def parse_cpe_vp_map(file):
vp_map = {} # cpe_type -> vendor -> products
parser = etree.XMLParser(remove_comments=False)
doc = etree.parse(file, parser)
namespaces = {'ns': 'http://cpe.mitre.org/dictionary/2.0', 'meta': 'http://scap.nist.gov/schema/cpe-dictionary-metadata/0.2'}
for cpe_name in doc.xpath("//ns:cpe-list/ns:cpe-item/@name", namespaces=namespaces):
cpe_match = re.match('^cpe:/([aho]):([^:]+):([^:]+)', cpe_name)
if cpe_match:
cpe_type, vendor, product = cpe_match.group(1, 2, 3)
if not cpe_type in vp_map:
vp_map[cpe_type] = {}
if not vendor in vp_map[cpe_type]:
vp_map[cpe_type][vendor] = set()
vp_map[cpe_type][vendor].add(product)
else:
logging.error("Unexpected CPE %s", cpe_name)
return vp_map
def main():
if len(sys.argv) != 4:
logging.critical("Expecting exactly 3 arguments; recog XML file, CPE 2.3 XML dictionary, JSON remapping, got %s", (len(sys.argv) - 1))
exit(1)
cpe_vp_map = parse_cpe_vp_map(sys.argv[2])
if not cpe_vp_map:
logging.critical("No CPE vendor => product mappings read from CPE 2.3 XML dictionary %s", sys.argv[2])
exit(1)
r7_vp_map = parse_r7_remapping(sys.argv[3])
if not r7_vp_map:
logging.warning("No Rapid7 vendor/product => CPE mapping read from %s", sys.argv[3])
update_cpes(sys.argv[1], cpe_vp_map, r7_vp_map)
def update_cpes(xml_file, cpe_vp_map, r7_vp_map):
parser = etree.XMLParser(remove_comments=False)
doc = etree.parse(xml_file, parser)
for fingerprint in doc.xpath('//fingerprint'):
# collect all the params, grouping by os and service params that could be used to compute a CPE
params = {}
for param in fingerprint.xpath('./param'):
name = param.attrib['name']
# remove any existing CPE params
if re.match(r'^.*\.cpe\d{0,2}$', name):
param.getparent().remove(param)
continue
match = re.search(r'^(?P<fp_type>hw|os|service(?:\.component)?)\.', name)
if match:
fp_type = match.group('fp_type')
if not fp_type in params:
params[fp_type] = {}
if name in params[fp_type]:
raise ValueError('Duplicated fingerprint named {} in {}'.format(name, fingerprint.attrib['pattern']))
params[fp_type][name] = param
# for each of the applicable os/service param groups, build a CPE
for fp_type in params:
if fp_type == 'os':
cpe_type = 'o'
elif fp_type.startswith('service'):
cpe_type = 'a'
elif fp_type == 'hw':
cpe_type = 'h'
else:
raise ValueError('Unhandled param type {}'.format(fp_type))
# extract the vendor/product/version values from each os/service group,
# using the static value ('Apache', for example) when pos is 0, and
# otherwise use a value that contains interpolation markers such that
# products/projects that use recog content can insert the value
# extracted from the banner/other data via regex capturing groups
fp_data = {
'vendor': None,
'product': None,
'version': '-',
}
for fp_datum in fp_data:
fp_datum_param_name = "{}.{}".format(fp_type, fp_datum)
if fp_datum_param_name in params[fp_type]:
fp_datum_e = params[fp_type][fp_datum_param_name]
if fp_datum_e.attrib['pos'] == '0':
fp_data[fp_datum] = fp_datum_e.attrib['value']
else:
fp_data[fp_datum] = "{{{}}}".format(fp_datum_e.attrib['name'])
vendor = fp_data['vendor']
product = fp_data['product']
version = fp_data['version']
# build a reasonable looking CPE value from the vendor/product/version,
# lowercasing, replacing whitespace with _, and more
if vendor and product:
if not cpe_type in cpe_vp_map:
logging.error("Didn't find CPE type '%s' for '%s' '%s'", cpe_type, vendor, product)
continue
vendor = vendor.lower().replace(' ', '_').replace(',', '')
product = product.lower().replace(' ', '_').replace(',', '')
if 'unknown' in [vendor, product]:
continue
if (vendor.startswith('{') and vendor.endswith('}')) or (product.startswith('{') and product.endswith('}')):
continue
remapped_vendor = False
og_vendor = vendor
if not vendor in cpe_vp_map[cpe_type]:
if vendor in r7_vp_map:
vendor = r7_vp_map[vendor]['cpe_vendor']
remapped_vendor = True
if not vendor in cpe_vp_map[cpe_type]:
logging.error("Remapped vendor %s (remapped from %s) invalid for CPE %s (product %s)", vendor, og_vendor, cpe_type, product)
continue
else:
logging.error("Vendor %s invalid for CPE %s and no remapping (product %s)", vendor, cpe_type, product)
continue
# if the product as specified is not found in the CPE dictionary for this vendor
if not product in cpe_vp_map[cpe_type][vendor]:
# if this vendor has a remapping from R7
if og_vendor in r7_vp_map:
# if this product has a remapping for this vendor from R7
if product in r7_vp_map[og_vendor]['products']:
og_product = product
product = r7_vp_map[og_vendor]['products'][product]
# ensure that the remapped product is valid for the given vendor in CPE
if not product in cpe_vp_map[cpe_type][vendor]:
logging.error("Remapped product %s (remapped from %s) from vendor %s invalid for CPE %s", product, og_product, vendor, cpe_type)
continue
else:
if remapped_vendor:
logging.error("Product %s from vendor %s (remapped from %s) invalid for CPE %s and no mapping", product, vendor, og_vendor, cpe_type)
else:
logging.error("Product %s from vendor %s invalid for CPE %s and no mapping", product, vendor, cpe_type)
continue
else:
if remapped_vendor:
logging.error("Vendor %s (remapped from %s) is valid for CPE %s but product %s not valid and no mapping", vendor, og_vendor, cpe_type, product)
else:
logging.error("Vendor %s is valid for CPE %s but product %s not valid and no mapping", vendor, cpe_type, product)
continue
# building the CPE string
cpe_value = 'cpe:/{}:{}:{}'.format(cpe_type, vendor, product)
if version:
cpe_value += ":{}".format(version)
cpe_param = etree.Element('param')
cpe_param.attrib['pos'] = '0'
cpe_param.attrib['name'] = '{}.cpe23'.format(fp_type)
cpe_param.attrib['value'] = cpe_value
for param_name in params[fp_type]:
param = params[fp_type][param_name]
parent = param.getparent()
index = parent.index(param) + 1
parent.insert(index, cpe_param)
root = doc.getroot()
with open(xml_file, 'wb') as xml_out:
xml_out.write(etree.tostring(root, pretty_print=True, xml_declaration=True, encoding=doc.docinfo.encoding))
if __name__ == '__main__':
try: exit(main())
except KeyboardInterrupt: pass
| 44.891626 | 171 | 0.548667 |
00015200c571aeb4f8ff46d65b56febeec9c6a4e | 4,463 | py | Python | session/speaker-overlap/finetune-speakernet.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | session/speaker-overlap/finetune-speakernet.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | session/speaker-overlap/finetune-speakernet.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | 1 | 2021-08-19T02:34:41.000Z | 2021-08-19T02:34:41.000Z | import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../gcs/mesolitica-storage.json'
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import malaya_speech
import malaya_speech.train as train
import malaya_speech.train.model.speakernet as speakernet
import tensorflow as tf
import malaya_speech.config
config = malaya_speech.config.speakernet_featurizer_config
new_config = {'frame_ms': 20, 'stride_ms': 0.5}
featurizer = malaya_speech.featurization.SpeakerNetFeaturizer(
**{**config, **new_config}
)
DIMENSION = 64
def calc(v):
r = featurizer(v)
return r
def preprocess_inputs(example):
s = tf.compat.v1.numpy_function(calc, [example['inputs']], tf.float32)
s = tf.reshape(s, (-1, DIMENSION))
length = tf.cast(tf.shape(s)[0], tf.int32)
length = tf.expand_dims(length, 0)
example['inputs'] = s
example['inputs_length'] = length
return example
def parse(serialized_example):
data_fields = {
'inputs': tf.VarLenFeature(tf.float32),
'targets': tf.VarLenFeature(tf.int64),
}
features = tf.parse_single_example(
serialized_example, features = data_fields
)
for k in features.keys():
features[k] = features[k].values
features = preprocess_inputs(features)
keys = list(features.keys())
for k in keys:
if k not in ['inputs', 'inputs_length', 'targets']:
features.pop(k, None)
return features
def get_dataset(files, batch_size = 32, shuffle_size = 1024, thread_count = 24):
def get():
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(parse, num_parallel_calls = thread_count)
dataset = dataset.shuffle(shuffle_size)
dataset = dataset.padded_batch(
batch_size,
padded_shapes = {
'inputs': tf.TensorShape([None, DIMENSION]),
'inputs_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
},
padding_values = {
'inputs': tf.constant(0, dtype = tf.float32),
'inputs_length': tf.constant(0, dtype = tf.int32),
'targets': tf.constant(0, dtype = tf.int64),
},
)
dataset = dataset.repeat()
return dataset
return get
def model_fn(features, labels, mode, params):
learning_rate = 1e-5
init_checkpoint = '../speakernet/model.ckpt'
Y = tf.cast(features['targets'][:, 0], tf.int32)
model = speakernet.Model(
features['inputs'],
features['inputs_length'][:, 0],
num_class = 2,
mode = 'train',
)
logits = model.logits
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = logits, labels = Y
)
)
tf.identity(loss, 'train_loss')
accuracy = tf.metrics.accuracy(
labels = Y, predictions = tf.argmax(logits, axis = 1)
)
tf.identity(accuracy[1], name = 'train_accuracy')
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
variables = [v for v in variables if 'dense_2' not in v.name]
assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step = global_step)
estimator_spec = tf.estimator.EstimatorSpec(
mode = mode, loss = loss, train_op = train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode = tf.estimator.ModeKeys.EVAL,
loss = loss,
eval_metric_ops = {'accuracy': accuracy},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter = 1
)
]
files = tf.io.gfile.glob(
'gs://mesolitica-general/speaker-overlap/data/*.tfrecords'
)
train_dataset = get_dataset(files)
save_directory = 'output-speakernet-speaker-overlap'
train.run_training(
train_fn = train_dataset,
model_fn = model_fn,
model_dir = save_directory,
num_gpus = 1,
log_step = 1,
save_checkpoint_step = 25000,
max_steps = 300000,
train_hooks = train_hooks,
)
| 27.549383 | 90 | 0.643961 |
47afacfa05e92549da7145d8fb20f7ba68aedca6 | 695 | py | Python | src/stay_classification/bounding_box_classifier/bounding_box_functions.py | m-salewski/stay_classification | e3f9deadf51c97029a0f9a4bb669a5af68abf7c6 | [
"MIT"
] | null | null | null | src/stay_classification/bounding_box_classifier/bounding_box_functions.py | m-salewski/stay_classification | e3f9deadf51c97029a0f9a4bb669a5af68abf7c6 | [
"MIT"
] | null | null | null | src/stay_classification/bounding_box_classifier/bounding_box_functions.py | m-salewski/stay_classification | e3f9deadf51c97029a0f9a4bb669a5af68abf7c6 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
def bbox(t_arr, x_arr):
return np.array([t_arr.min(), t_arr.max(), x_arr.min(), x_arr.max()])
def plot_box(bbox, ax=None, plot_dict=None):
if plot_dict == None:
plot_dict = {"linestyle":'--',
"dashes":[4,2,],
"color":"grey",
"linewidth":2}
ms = [0,0,1,1,0]
ns = [2,3,3,2,2]
ts,xs = [],[]
for i in range(5):
ts.append(bbox[ms[i]])
xs.append(bbox[ns[i]])
if ax == None:
ax = plt.plot(ts,xs, **plot_dict)
return ax
else:
ax.plot(ts,xs, **plot_dict)
return None | 22.419355 | 73 | 0.473381 |
d4643b07613b74b7bcb73638bfaa5ac4a534b911 | 136 | py | Python | Modules/github/__init__.py | TigerGraph-OSS/DashboardPython | a7b9a3ee7c7f968ece8541d473b09a37484cf31c | [
"MIT"
] | null | null | null | Modules/github/__init__.py | TigerGraph-OSS/DashboardPython | a7b9a3ee7c7f968ece8541d473b09a37484cf31c | [
"MIT"
] | null | null | null | Modules/github/__init__.py | TigerGraph-OSS/DashboardPython | a7b9a3ee7c7f968ece8541d473b09a37484cf31c | [
"MIT"
] | null | null | null | def conf():
return {
"id":"github",
"description":"this is the github c360 component",
"enabled":True,
} | 22.666667 | 58 | 0.522059 |
d1a101051afb4cc91ee9d387673501478066c84a | 238 | py | Python | powerline-shell.py | ashwinjv/powerline-shell | 6222532f61878a5eb76cbdf6e2eb96037650217f | [
"MIT"
] | null | null | null | powerline-shell.py | ashwinjv/powerline-shell | 6222532f61878a5eb76cbdf6e2eb96037650217f | [
"MIT"
] | null | null | null | powerline-shell.py | ashwinjv/powerline-shell | 6222532f61878a5eb76cbdf6e2eb96037650217f | [
"MIT"
] | null | null | null | #!/usr/local/opt/python@2/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from powerline_shell import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 21.636364 | 69 | 0.621849 |
2c6083df79039d0d2460e942a2f2e1db38574a9c | 14,068 | py | Python | libqtile/scratchpad.py | kapilpokhrel/qtile | c9eac5aed41c22d87e22e7bc4b7b7fb64e863c47 | [
"MIT"
] | null | null | null | libqtile/scratchpad.py | kapilpokhrel/qtile | c9eac5aed41c22d87e22e7bc4b7b7fb64e863c47 | [
"MIT"
] | null | null | null | libqtile/scratchpad.py | kapilpokhrel/qtile | c9eac5aed41c22d87e22e7bc4b7b7fb64e863c47 | [
"MIT"
] | null | null | null | # Copyright (c) 2017, Dirk Hartmann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Dict, List
from libqtile import config, group, hook
from libqtile.backend.base import FloatStates
class WindowVisibilityToggler:
"""
WindowVisibilityToggler is a wrapper for a window, used in ScratchPad group
to toggle visibility of a window by toggling the group it belongs to.
The window is either sent to the named ScratchPad, which is by default
invisble, or the current group on the current screen.
With this functionality the window can be shown and hidden by a single
keystroke (bound to command of ScratchPad group).
By default, the window is also hidden if it loses focus.
"""
def __init__(self, scratchpad_name, window, on_focus_lost_hide, warp_pointer):
"""
Initiliaze the WindowVisibilityToggler.
Parameters:
===========
scratchpad_name: string
The name (not label) of the ScratchPad group used to hide the window
window: window
The window to toggle
on_focus_lost_hide: bool
if True the associated window is hidden if it loses focus
warp_pointer: bool
if True the mouse pointer is warped to center of associated window
if shown. Only used if on_focus_lost_hide is True
"""
self.scratchpad_name = scratchpad_name
self.window = window
self.on_focus_lost_hide = on_focus_lost_hide
self.warp_pointer = warp_pointer
# determine current status based on visibility
self.shown = False
self.show()
def info(self):
return dict(window=self.window.info(),
scratchpad_name=self.scratchpad_name,
visible=self.visible,
on_focus_lost_hide=self.on_focus_lost_hide,
warp_pointer=self.warp_pointer)
@property
def visible(self):
"""
Determine if associated window is currently visible.
That is the window is on a group different from the scratchpad
and that group is the current visible group.
"""
if self.window.group is None:
return False
return (self.window.group.name != self.scratchpad_name and
self.window.group is self.window.qtile.current_group)
def toggle(self):
"""
Toggle the visibility of associated window. Either show() or hide().
"""
if (not self.visible or not self.shown):
self.show()
else:
self.hide()
def show(self):
"""
Show the associated window on top of current screen.
The window is moved to the current group as floating window.
If 'warp_pointer' is True the mouse pointer is warped to center of the
window if 'on_focus_lost_hide' is True.
Otherwise, if pointer is moved manually to window by the user
the window might be hidden again before actually reaching it.
"""
if (not self.visible) or (not self.shown):
win = self.window
# always set the floating state before changing group
# to avoid disturbance of tiling layout
win._float_state = FloatStates.TOP
# add to group and bring it to front.
win.togroup()
win.cmd_bring_to_front()
# toggle internal flag of visibility
self.shown = True
# add hooks to determine if focus get lost
if self.on_focus_lost_hide:
if self.warp_pointer:
win.qtile.core.warp_pointer(win.x + win.width // 2, win.y + win.height // 2)
hook.subscribe.client_focus(self.on_focus_change)
hook.subscribe.setgroup(self.on_focus_change)
def hide(self):
"""
Hide the associated window. That is, send it to the scratchpad group.
"""
if self.visible or self.shown:
# unsubscribe the hook methods, since the window is not shown
if self.on_focus_lost_hide:
hook.unsubscribe.client_focus(self.on_focus_change)
hook.unsubscribe.setgroup(self.on_focus_change)
self.window.togroup(self.scratchpad_name)
self.shown = False
def unsubscribe(self):
"""unsubscribe all hooks"""
if self.on_focus_lost_hide and (self.visible or self.shown):
hook.unsubscribe.client_focus(self.on_focus_change)
hook.unsubscribe.setgroup(self.on_focus_change)
def on_focus_change(self, *args, **kwargs):
"""
hook method which is called on window focus change and group change.
Depending on 'on_focus_lost_xxx' arguments, the associated window may
get hidden (by call to hide) or even killed.
"""
if self.shown:
current_group = self.window.qtile.current_group
if (self.window.group is not current_group or
self.window is not current_group.current_window):
if self.on_focus_lost_hide:
self.hide()
class DropDownToggler(WindowVisibilityToggler):
"""
Specialized WindowVisibilityToggler which places the associatd window
each time it is shown at desired location.
For example this can be used to create a quake-like terminal.
"""
def __init__(self, window, scratchpad_name, ddconfig):
self.name = ddconfig.name
self.x = ddconfig.x
self.y = ddconfig.y
self.width = ddconfig.width
self.height = ddconfig.height
window.opacity = ddconfig.opacity
WindowVisibilityToggler.__init__(
self, scratchpad_name, window, ddconfig.on_focus_lost_hide, ddconfig.warp_pointer
)
def info(self):
info = WindowVisibilityToggler.info(self)
info.update(dict(name=self.name,
x=self.x,
y=self.y,
width=self.width,
height=self.height))
return info
def show(self):
"""
Like WindowVisibilityToggler.show, but before showing the window,
its floating x, y, width and height is set.
"""
if (not self.visible) or (not self.shown):
# SET GEOMETRY
win = self.window
screen = win.qtile.current_screen
# calculate windows floating position and width/height
# these may differ for screens, and thus always recalculated.
x = int(screen.dx + self.x * screen.dwidth)
y = int(screen.dy + self.y * screen.dheight)
win.float_x = x
win.float_y = y
width = int(screen.dwidth * self.width)
height = int(screen.dheight * self.height)
win.place(
x, y, width, height, win.borderwidth, win.bordercolor, respect_hints=True
)
# Toggle the dropdown
WindowVisibilityToggler.show(self)
class ScratchPad(group._Group):
"""
Specialized group which is by default invisible and can be configured, to
spawn windows and toggle its visibility (in the current group) by command.
The ScratchPad group acts as a container for windows which are currently
not visible but associated to a DropDownToggler and can toggle their
group by command (of ScratchPad group).
The ScratchPad, by default, has no label and thus is not shown in
GroupBox widget.
"""
def __init__(self, name='scratchpad', dropdowns: List[config.DropDown] = None, label=''):
group._Group.__init__(self, name, label=label)
self._dropdownconfig = {dd.name: dd for dd in dropdowns} if dropdowns is not None else {}
self.dropdowns: Dict[str, DropDownToggler] = {}
self._spawned: Dict[int, str] = {}
self._to_hide: List[str] = []
def _check_unsubscribe(self):
if not self.dropdowns:
hook.unsubscribe.client_killed(self.on_client_killed)
hook.unsubscribe.float_change(self.on_float_change)
def _spawn(self, ddconfig):
"""
Spawn a process by defined command.
Method is only called if no window is associated. This is either on the
first call to show or if the window was killed.
The process id of spawned process is saved and compared to new windows.
In case of a match the window gets associated to this DropDown object.
"""
name = ddconfig.name
if name not in self._spawned.values():
if not self._spawned:
hook.subscribe.client_new(self.on_client_new)
cmd = self._dropdownconfig[name].command
pid = self.qtile.cmd_spawn(cmd)
self._spawned[pid] = name
def on_client_new(self, client, *args, **kwargs):
"""
hook method which is called on new windows.
This method is subscribed if the given command is spawned
and unsubscribed immediately if the associated window is detected.
"""
client_pid = client.get_pid()
if client_pid in self._spawned:
name = self._spawned.pop(client_pid)
if not self._spawned:
hook.unsubscribe.client_new(self.on_client_new)
self.dropdowns[name] = DropDownToggler(client, self.name,
self._dropdownconfig[name])
if name in self._to_hide:
self.dropdowns[name].hide()
self._to_hide.remove(name)
if len(self.dropdowns) == 1:
hook.subscribe.client_killed(self.on_client_killed)
hook.subscribe.float_change(self.on_float_change)
def on_client_killed(self, client, *args, **kwargs):
"""
hook method which is called if a client is killed.
If the associated window is killed, reset internal state.
"""
name = None
for name, dd in self.dropdowns.items():
if dd.window is client:
dd.unsubscribe()
del self.dropdowns[name]
break
self._check_unsubscribe()
def on_float_change(self, *args, **kwargs):
"""
hook method which is called if window float state is changed.
If the current associated window is not floated (any more) the window
and process is detached from DRopDown, thus the next call to Show
will spawn a new process.
"""
name = None
for name, dd in self.dropdowns.items():
if not dd.window.floating:
if dd.window.group is not self:
dd.unsubscribe()
del self.dropdowns[name]
break
self._check_unsubscribe()
def cmd_dropdown_toggle(self, name):
"""
Toggle visibility of named DropDown.
"""
if name in self.dropdowns:
self.dropdowns[name].toggle()
else:
if name in self._dropdownconfig:
self._spawn(self._dropdownconfig[name])
def cmd_dropdown_reconfigure(self, name, **kwargs):
"""
reconfigure the named DropDown configuration.
Note that changed attributes only have an effect on spawning the window.
"""
if name not in self._dropdownconfig:
return
dd = self._dropdownconfig[name]
for attr, value in kwargs.items():
if hasattr(dd, attr):
setattr(dd, attr, value)
def cmd_dropdown_info(self, name=None):
"""
Get information on configured or currently active DropDowns.
If name is None, a list of all dropdown names is returned.
"""
if name is None:
return {'dropdowns': [ddname for ddname in self._dropdownconfig]}
elif name in self.dropdowns:
return self.dropdowns[name].info()
elif name in self._dropdownconfig:
return self._dropdownconfig[name].info()
else:
raise ValueError('No DropDown named "%s".' % name)
def get_state(self):
"""
Get the state of existing dropdown windows. Used for restoring state across
Qtile restarts.
"""
state = []
for name, dd in self.dropdowns.items():
pid = dd.window.get_pid()
state.append((name, pid, dd.visible))
return state
def restore_state(self, state):
"""
Restore the state of existing dropdown windows. Used for restoring state across
Qtile restarts.
"""
orphans = []
for name, pid, visible in state:
if name in self._dropdownconfig:
self._spawned[pid] = name
if not visible:
self._to_hide.append(name)
else:
orphans.append(pid)
if self._spawned:
hook.subscribe.client_new(self.on_client_new)
return orphans
| 40.079772 | 97 | 0.621268 |
64472399dbea5c2fc0477b781032e28abf0f3438 | 778 | py | Python | ringcentral_bot_framework/core/user_oauth.py | zxdong262/ringcentral-chatbot-python | 4ca6feb4f686705a75501239c13a1869e617d2a3 | [
"MIT"
] | 4 | 2018-11-14T08:10:55.000Z | 2019-05-15T10:40:00.000Z | ringcentral_bot_framework/core/user_oauth.py | ringcentral/ringcentral-chatbot-python | 4ca6feb4f686705a75501239c13a1869e617d2a3 | [
"MIT"
] | 52 | 2018-11-14T08:10:37.000Z | 2019-04-19T00:43:59.000Z | ringcentral_bot_framework/core/user_oauth.py | zxdong262/ringcentral-chatbot-python | 4ca6feb4f686705a75501239c13a1869e617d2a3 | [
"MIT"
] | 2 | 2019-02-11T18:10:06.000Z | 2019-04-03T15:12:21.000Z | """
user auth
"""
import time
from .common import result, getQueryParam
from pydash import get
def initUserAuth(
conf,
Bot,
getBot,
User,
dbAction
):
def userAuth(event):
user = User()
code = getQueryParam(event, 'code')
user.auth(code)
state = getQueryParam(event, 'state') or ','
arr = state.split(',')
groupId = get(arr, '[0]')
botId = get(arr, '[1]')
bot = getBot(botId)
user.addGroup(groupId, botId)
conf.userAuthSuccessAction(bot, groupId, user.id, dbAction)
conf.userAddGroupInfoAction(user, bot, groupId, dbAction)
return result(
conf.userAuthSuccessHtml(user, bot),
200,
{
'headers': {
'Content-Type': 'text/html; charset=UTF-8'
}
}
)
return userAuth
| 20.473684 | 63 | 0.609254 |
8c60f9c94573d6e8101ed953f19a0b2e2f69b1d1 | 689 | py | Python | gen/custom.py | acdiaconu/metagol | e2cd4840a9e4c25c4bef349ae12369c71497ab6e | [
"BSD-3-Clause"
] | null | null | null | gen/custom.py | acdiaconu/metagol | e2cd4840a9e4c25c4bef349ae12369c71497ab6e | [
"BSD-3-Clause"
] | null | null | null | gen/custom.py | acdiaconu/metagol | e2cd4840a9e4c25c4bef349ae12369c71497ab6e | [
"BSD-3-Clause"
] | null | null | null | def create_ex(lines):
final = ""
for line in lines:
if line[0] == '%' or line == '\n':
final = final + '\n' + line
continue
ex = 'f(['
i = 0
while line[i] != '=':
ex += '\'' + line[i] + '\'' + ','
i += 1
ex = ex[:-5] + "],["
i += 3
while i < len(line):
ex += '\'' + line[i] + '\'' + ','
i += 1
ex = ex[:-5] + "]),"
final = final + '\n' + ex
final = final[:-1]
print(final)
file1 = open('C:\\Users\\andre\\OneDrive\\Documente\\metagol\\gen\\ex', 'r')
Lines = file1.readlines()
create_ex(Lines) | 27.56 | 78 | 0.359942 |
99c155575b4da82b23051691a1be020af5c826e3 | 5,440 | py | Python | xlsxwriter/test/worksheet/test_sparkline03.py | totdiao/XlsxWriter | 3d65858d8933bddb8262d500bcc2005f28fde645 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/worksheet/test_sparkline03.py | totdiao/XlsxWriter | 3d65858d8933bddb8262d500bcc2005f28fde645 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/worksheet/test_sparkline03.py | totdiao/XlsxWriter | 3d65858d8933bddb8262d500bcc2005f28fde645 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = 'Sheet1'
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row('A1', data)
worksheet.write_row('A2', data)
# Set up sparklines.
worksheet.add_sparkline('F1', {'range': 'Sheet1!A1:E1'})
worksheet.add_sparkline('F2', {'range': 'Sheet1!A2:E2'})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
<row r="2" spans="1:5" x14ac:dyDescent="0.25">
<c r="A2">
<v>-2</v>
</c>
<c r="B2">
<v>2</v>
</c>
<c r="C2">
<v>3</v>
</c>
<c r="D2">
<v>-1</v>
</c>
<c r="E2">
<v>0</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup displayEmptyCellsAs="gap">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A2:E2</xm:f>
<xm:sqref>F2</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
<x14:sparklineGroup displayEmptyCellsAs="gap">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| 42.170543 | 337 | 0.419853 |
514b11ab0d87b8aa43d386322a70afff640d749d | 1,956 | py | Python | trRosetta/coords6d.py | NatureGeorge/trRosetta2 | dba6078ebda9f2429264ace3deaffe50d9899def | [
"MIT"
] | 73 | 2021-05-21T07:03:24.000Z | 2022-03-31T14:11:28.000Z | trRosetta/coords6d.py | partrita/trRosetta2 | 7036f81cdcfac6adcfebdc1ee917f46d8345229a | [
"MIT"
] | 14 | 2021-05-20T21:35:30.000Z | 2022-02-05T15:38:55.000Z | trRosetta/coords6d.py | partrita/trRosetta2 | 7036f81cdcfac6adcfebdc1ee917f46d8345229a | [
"MIT"
] | 19 | 2021-05-24T10:26:13.000Z | 2022-03-02T14:11:45.000Z | import numpy as np
import scipy
import scipy.spatial
# calculate dihedral angles defined by 4 sets of points
def get_dihedrals(a, b, c, d):
b0 = -1.0*(b - a)
b1 = c - b
b2 = d - c
b1 /= np.linalg.norm(b1, axis=-1)[:,None]
v = b0 - np.sum(b0*b1, axis=-1)[:,None]*b1
w = b2 - np.sum(b2*b1, axis=-1)[:,None]*b1
x = np.sum(v*w, axis=-1)
y = np.sum(np.cross(b1, v)*w, axis=-1)
return np.arctan2(y, x)
# calculate planar angles defined by 3 sets of points
def get_angles(a, b, c):
v = a - b
v /= np.linalg.norm(v, axis=-1)[:,None]
w = c - b
w /= np.linalg.norm(w, axis=-1)[:,None]
x = np.sum(v*w, axis=1)
return np.arccos(x)
# get 6d coordinates from x,y,z coords of N,Ca,C atoms
def get_coords6d(xyz, dmax):
nres = xyz.shape[1]
# three anchor atoms
N = xyz[0]
Ca = xyz[1]
C = xyz[2]
# recreate Cb given N,Ca,C
b = Ca - N
c = C - Ca
a = np.cross(b, c)
Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
# fast neighbors search to collect all
# Cb-Cb pairs within dmax
kdCb = scipy.spatial.cKDTree(Cb)
indices = kdCb.query_ball_tree(kdCb, dmax)
# indices of contacting residues
idx = np.array([[i,j] for i in range(len(indices)) for j in indices[i] if i != j]).T
idx0 = idx[0]
idx1 = idx[1]
# Cb-Cb distance matrix
dist6d = np.full((nres, nres),999.9)
dist6d[idx0,idx1] = np.linalg.norm(Cb[idx1]-Cb[idx0], axis=-1)
# matrix of Ca-Cb-Cb-Ca dihedrals
omega6d = np.zeros((nres, nres))
omega6d[idx0,idx1] = get_dihedrals(Ca[idx0], Cb[idx0], Cb[idx1], Ca[idx1])
# matrix of polar coord theta
theta6d = np.zeros((nres, nres))
theta6d[idx0,idx1] = get_dihedrals(N[idx0], Ca[idx0], Cb[idx0], Cb[idx1])
# matrix of polar coord phi
phi6d = np.zeros((nres, nres))
phi6d[idx0,idx1] = get_angles(Ca[idx0], Cb[idx0], Cb[idx1])
return dist6d, omega6d, theta6d, phi6d
| 25.076923 | 88 | 0.593047 |
9372530efcdcd679c66b49892accc7018da3c507 | 2,830 | py | Python | setup.py | eserie/optax | 28f9627092ffa08f0637422b90d62b83239d728e | [
"Apache-2.0"
] | null | null | null | setup.py | eserie/optax | 28f9627092ffa08f0637422b90d62b83239d728e | [
"Apache-2.0"
] | null | null | null | setup.py | eserie/optax | 28f9627092ffa08f0637422b90d62b83239d728e | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
import os
from setuptools import find_namespace_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open(os.path.join(_CURRENT_DIR, 'optax', '__init__.py')) as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `optax/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
setup(
name='optax',
version=_get_version(),
url='https://github.com/deepmind/optax',
license='Apache 2.0',
author='DeepMind',
description=('A gradient processing and optimisation library in JAX.'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='optax-dev@google.com',
keywords='reinforcement-learning python machine learning',
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements-test.txt')),
zip_safe=False, # Required for full installation.
python_requires='>=3.7',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 37.236842 | 80 | 0.668905 |
34a7f9f909437529acb26ffde6a9a00861e82c0b | 123 | py | Python | bookscrape/exceptions.py | clemfromspace/pybook | ed16c24a3d1caeab07b5111812c8eb07ba598b8a | [
"WTFPL"
] | 12 | 2018-01-20T06:17:46.000Z | 2022-02-01T02:04:07.000Z | bookscrape/exceptions.py | clemfromspace/pybook | ed16c24a3d1caeab07b5111812c8eb07ba598b8a | [
"WTFPL"
] | 6 | 2021-03-18T20:40:35.000Z | 2022-03-11T23:26:11.000Z | bookscrape/exceptions.py | clemfromspace/pybook | ed16c24a3d1caeab07b5111812c8eb07ba598b8a | [
"WTFPL"
] | 1 | 2020-06-02T18:16:12.000Z | 2020-06-02T18:16:12.000Z | """This module contains the exceptions for the ``bookscrape`` package"""
class BookScrapeException(Exception):
pass
| 17.571429 | 72 | 0.739837 |
c662e946519cecc5af94158fec3dd58c64be35c1 | 454 | py | Python | semantic_release/__init__.py | dialoguemd/python-semantic-release | 76123f410180599a19e7c48da413880185bbea20 | [
"MIT"
] | null | null | null | semantic_release/__init__.py | dialoguemd/python-semantic-release | 76123f410180599a19e7c48da413880185bbea20 | [
"MIT"
] | null | null | null | semantic_release/__init__.py | dialoguemd/python-semantic-release | 76123f410180599a19e7c48da413880185bbea20 | [
"MIT"
] | null | null | null | """Semantic Release
"""
__version__ = '4.1.1'
from .errors import (SemanticReleaseBaseError, ImproperConfigurationError, # noqa
UnknownCommitMessageStyleError) # noqa
def setup_hook(argv: list):
"""
A hook to be used in setup.py to enable `python setup.py publish`.
:param argv: sys.argv
"""
if len(argv) > 1 and argv[1] in ['version', 'publish', 'changelog']:
from .cli import main
main()
| 23.894737 | 82 | 0.625551 |
695ade895283f30163898e5bffcc6749d0184f5c | 1,547 | py | Python | user/tasks.py | enjoy-binbin/Django-blog | 0fcf3709fabeee49874343b3a4ab80582698c466 | [
"MIT"
] | 111 | 2019-06-01T06:40:36.000Z | 2021-11-14T19:57:22.000Z | user/tasks.py | enjoy-binbin/binblog-Django | 0fcf3709fabeee49874343b3a4ab80582698c466 | [
"MIT"
] | 19 | 2019-06-11T00:45:54.000Z | 2022-03-11T23:47:53.000Z | user/tasks.py | enjoy-binbin/Django-blog | 0fcf3709fabeee49874343b3a4ab80582698c466 | [
"MIT"
] | 24 | 2019-06-01T06:40:39.000Z | 2020-10-11T14:03:08.000Z | import hashlib
import time
from celery import shared_task
from celery.utils.log import get_task_logger
from django.conf import settings
from django.core.mail import send_mail
logger = get_task_logger(__name__)
@shared_task
def send_email_task(email, code_str, send_type):
"""
使用celery异步发送邮件
@email 邮件收件方
@code_str 邮件验证码
@send_type: 邮件类型
"""
if send_type == 'register':
subject = '彬彬博客注册激活链接'
message = '请点击下面的链接激活您的账号: http://127.0.0.1:8000/active/{0}'.format(code_str)
elif send_type == 'forget':
subject = '彬彬博客忘记密码连接'
timestamp = int(time.time())
md5 = hashlib.md5()
md5_str = md5.update((code_str + email + str(timestamp)).encode('utf8'))
hash_str = md5.hexdigest()
message = '请点击下面的链接修改你的密码: http://127.0.0.1:8000/reset?timestamp={0}&hash={1}&email={2}'.format(timestamp,
hash_str, email)
elif send_type == 'change':
subject = '彬彬博客修改邮箱连接'
message = '你的邮箱验证码为: {0}'.format(code_str)
else:
logger.error('非法的发送类型'.format(email))
return {'status': 'fail', 'error': 'illegal send_type'}
status = send_mail(subject, message, settings.EMAIL_FROM, [email]) # ,html_message=
if status:
logger.info('{0}邮件发送成功'.format(email))
return {'status': 'success', 'email': email}
else:
logger.error('{0}邮件发送失败'.format(email))
return {'status': 'fail', 'email': email}
| 32.914894 | 120 | 0.597931 |
8b03b838354c2ff643fd15af5466aece41c2e822 | 4,165 | py | Python | MAG-related/Tara_Ocean_trace_bins_depth_size_fraction.py | carleton-spacehogs/transposase-deep-ocean | cf782acec39f902c563ff83f6e74c2200bf7f743 | [
"MIT"
] | null | null | null | MAG-related/Tara_Ocean_trace_bins_depth_size_fraction.py | carleton-spacehogs/transposase-deep-ocean | cf782acec39f902c563ff83f6e74c2200bf7f743 | [
"MIT"
] | null | null | null | MAG-related/Tara_Ocean_trace_bins_depth_size_fraction.py | carleton-spacehogs/transposase-deep-ocean | cf782acec39f902c563ff83f6e74c2200bf7f743 | [
"MIT"
] | null | null | null | import csv
import sys
import pandas as pd
bin_coverages = list(csv.reader(open('../Figure Generating/data/TOBG-Combined.RPKM.csv', 'r')))
master_bin = list(csv.reader(open('../Figure Generating/data/bin_taxon.csv', 'r')))[1:]
bin_of_interested = [row[0] for row in master_bin]
# RPKM (reads per kilobase pair MAG per Million pair metagenome)
read_count_file = './Table1_ReadsPrimaryContigs_modifiedfrom_Tully_et_al_2018.csv'
read_counts = list(csv.reader(open(read_count_file, 'r')))[1:]
read_counts_dict = {row[3]:int(row[4]) for row in read_counts}
cal_depth_reads = pd.read_csv(read_count_file)
depth_sum = cal_depth_reads.groupby(["depth"])["num_reads"].agg('sum')
normaling_factor = [depth_sum["SRF"], depth_sum["DCM"], depth_sum["MES"]]
size_sum = cal_depth_reads.groupby(["size_fraction"])["num_reads"].agg('sum')
# viral (<0.22 μm), girus (0.22–0.8 μm), bacterial (0.22–1.6 μm), and protistan (0.8–5.0 μm)
upper_cut, lower_cut = 2, 0.5
nor_fac_size = [size_sum["prot"], size_sum["bact"]]
samples = bin_coverages[0]
out_file = [["bin", "depth", "size_fraction", "sum_SRF", "sum_DCM", "sum_MES", "sum_particle", "sum_planktonic"]]
par_count, plank_count, SRF_count, DCM_count, MES_count = 0,0,0,0,0
def get_depth(depth_r):
# depth_r = [RPKM SRF, DCM, MES]
global SRF_count
global DCM_count
global MES_count
if depth_r[0] + depth_r[1] < depth_r[2]:
MES_count += 1
return "MES"
elif depth_r[1] + depth_r[2] < depth_r[0]:
DCM_count += 1
return "SRF"
elif depth_r[0] + depth_r[2] < depth_r[1]:
SRF_count += 1
return "DCM"
else:
return "unsure"
def get_depth2(sum_row_depth, normaling_factor):
global SRF_count
global DCM_count
global MES_count
def comp(r_sum, nor, ind): # r_sum: read sum
this_depth_rpkm = r_sum[ind]/nor[ind]
other_depths_rpkm = sum(r_sum[:ind] + r_sum[ind+1:])/sum(nor[:ind] + nor[ind+1:])
if this_depth_rpkm/other_depths_rpkm > 2:
return True
if comp(sum_row_depth, normaling_factor, 0):
SRF_count += 1
return "SRF"
elif comp(sum_row_depth, normaling_factor, 1):
DCM_count += 1
return "DCM"
elif comp(sum_row_depth, normaling_factor, 2):
MES_count += 1
return "MES"
else:
return "unsure"
def get_size(size_r):
global par_count
global plank_count
# size_r = [particle, planktonic]
if sum_nor_size[0] / sum_nor_size[1] > upper_cut:
par_count += 1
return "particle"
elif sum_nor_size[0] / sum_nor_size[1] < lower_cut:
plank_count += 1
return "planktonic"
else:
return "mixed"
# get MAG's depth
for MAG_row in bin_coverages[1:]:
bin_name = MAG_row[0]
if bin_name not in bin_of_interested: continue
out_row = [bin_name]
sum_row_depth = [0, 0, 0] # SRF, DCM, MES
sum_row_size = [0, 0] # particle, planktonic
# print(MAG_row)
for col in range(1, len(samples)):
sample = samples[col] # ie. sample = tara151_prot_DCM
depth, size_type = sample.split("_")[2], sample.split("_")[1]
coverage = float(MAG_row[col])*read_counts_dict[sample]
# print(f"cannot find number of reads for {sample}, using average instead")
if depth == "SRF": sum_row_depth[0] += coverage
elif depth == "DCM": sum_row_depth[1] += coverage
elif depth == "MES": sum_row_depth[2] += coverage
elif depth in ["mixed", "epi"]: pass
else: sys.exit(f"error! got depth of {depth}")
if size_type in ["prot"]: sum_row_size[0] += coverage
elif size_type in ["bact"]: sum_row_size[1] += coverage
sum_normalized_depth = [a / b for a, b in zip(sum_row_depth, normaling_factor)]
sum_nor_size = [a / b for a, b in zip(sum_row_size, nor_fac_size)]
# out_row.append(get_depth(sum_normalized_depth))
out_row.append(get_depth2(sum_row_depth, normaling_factor))
out_row.append(get_size(sum_nor_size))
out_row.extend(sum_normalized_depth)
out_row.extend(sum_nor_size)
out_file.append(out_row)
with open("Tara_bins_origin.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(out_file)
print("total:", len(bin_of_interested))
print("SRF:", SRF_count, ", DCM:", DCM_count, ", MES", MES_count)
print("with upper cutoff of:", upper_cut, ", lower cutoff of:", lower_cut)
print(par_count, "MAGs have particle lifestyle,", plank_count, "are plankontic")
| 33.58871 | 113 | 0.711405 |
6a10f21b2d10a0623780d5cf71561e62cfad967f | 6,009 | py | Python | paxes_cinder/db/api.py | windskyer/k_cinder | 000ee539ee4842a158071d26ee99d12c7c0a87da | [
"Apache-2.0"
] | null | null | null | paxes_cinder/db/api.py | windskyer/k_cinder | 000ee539ee4842a158071d26ee99d12c7c0a87da | [
"Apache-2.0"
] | null | null | null | paxes_cinder/db/api.py | windskyer/k_cinder | 000ee539ee4842a158071d26ee99d12c7c0a87da | [
"Apache-2.0"
] | null | null | null | #
#
# =================================================================
# =================================================================
"""P Database API Class for providing access to the Cinder Database"""
from cinder.openstack.common.db import api as common_db
_BACKEND_MAPPING = {'sqlalchemy': 'paxes_cinder.db.sqlalchemy.api'}
IMPL = common_db.DBAPI(backend_mapping=_BACKEND_MAPPING)
##################################################
########## HMC DB API Definition ###########
##################################################
def ibm_hmc_get_all(context):
""" Retrieves all of the HMC's that are in the Database """
return IMPL.ibm_hmc_get_all(context)
def ibm_hmc_get_all_by_cluster(context, host_name):
""" Retrieves the HMC's for the given Host from the Database """
return IMPL.ibm_hmc_get_all_by_cluster(context, host_name)
def ibm_hmc_get_by_uuid(context, hmc_uuid):
""" Retrieves the HMC with the given UUID from the Database """
return IMPL.ibm_hmc_get_by_uuid(context, hmc_uuid)
def ibm_hmc_create(context, values):
""" Creates a new HMC instance in the Database """
return IMPL.ibm_hmc_create(context, values)
def ibm_hmc_update(context, hmc_uuid, values):
""" Updates an existing HMC instance in the Database """
return IMPL.ibm_hmc_update(context, hmc_uuid, values)
def ibm_hmc_delete(context, hmc_uuid):
""" Removes an existing HMC instance from the Database """
return IMPL.ibm_hmc_delete(context, hmc_uuid)
def ibm_hmc_clusters_get_by_uuid(context, hmc_uuid):
""" Retrieves the HMC/Cluster Mapping for a given UUID from the DB """
return IMPL.ibm_hmc_clusters_get_by_uuid(context, hmc_uuid)
def ibm_hmc_clusters_create(context, hmc_uuid, host_name):
""" Associates a new Host with the HMC in the Database """
return IMPL.ibm_hmc_clusters_create(context, hmc_uuid, host_name)
def ibm_hmc_clusters_delete(context, hmc_uuid, host_name):
""" Dissociates an existing Cluster from an HMC in the Database """
return IMPL.ibm_hmc_clusters_delete(context, hmc_uuid, host_name)
##################################################
######## Storage Node DB API Definition ##########
##################################################
def storage_node_get_by_host(context, host):
"""Retrieves a Storage Node from the DB that maps to a given Host."""
return IMPL.storage_node_get_by_host(context, host)
def storage_node_get_all(context):
"""Retrieves all of the Storage Nodes that are in the Database."""
return IMPL.storage_node_get_all(context)
def storage_node_get(context, storage_id):
"""Retrieves an individual Storage Node from the Database."""
return IMPL.storage_node_get(context, storage_id)
def storage_node_create(context, values):
"""Creates a new Storage Node instance in the Database."""
return IMPL.storage_node_create(context, values)
def storage_node_update(context, storage_id, values):
"""Updates an existing Storage Node in the Database."""
return IMPL.storage_node_update(context, storage_id, values)
def service_delete(context, service_id):
"""Deletes both a Service and the Storage Node from the Database."""
return IMPL.service_delete(context, service_id)
#######################################################
######### Restricted Metadata DB API Implementation ###
#######################################################
def volume_restricted_metadata_get(context, volume_id):
"""Get all restricted metadata for a volume."""
return IMPL.volume_restricted_metadata_get(context, volume_id)
def volume_restricted_metadata_delete(context, volume_id, key):
"""Delete the given restricted metadata item."""
IMPL.volume_restricted_metadata_delete(context, volume_id, key)
def volume_restricted_metadata_update_or_create(context,
volume_id,
metadata):
"""Create or update restricted metadata. This adds or modifies the
key/value pairs specified in the metadata dict argument
"""
IMPL.volume_restricted_metadata_update_or_create(context,
volume_id,
metadata)
##################################################
######## On-board Task DB API Definition #########
##################################################
def onboard_task_get_all(context, host):
"""Retrieves all of the On-board Tasks from the DB."""
return IMPL.onboard_task_get_all(context, host)
def onboard_task_get(context, task_id):
"""Retrieves one of the On-board Tasks from the DB."""
return IMPL.onboard_task_get(context, task_id)
def onboard_task_create(context, host):
"""Creates the given On-board Task in the DB."""
return IMPL.onboard_task_create(context, host)
def onboard_task_update(context, task_id, values):
"""Updates the given On-board Task in the DB."""
return IMPL.onboard_task_update(context, task_id, values)
def onboard_task_delete(context, task_id):
"""Deletes the given On-board Task from the DB."""
return IMPL.onboard_task_delete(context, task_id)
def onboard_task_volume_create(context, task_id, vol_uuid, values):
"""Create the Volume record for the given On-board Task"""
return IMPL.onboard_task_volume_create(context, task_id, vol_uuid, values)
def onboard_task_volume_update(context, task_id, vol_uuid, values):
"""Updates the Volume record for the given On-board Task"""
return IMPL.onboard_task_volume_update(context, task_id, vol_uuid, values)
##############################################################
######## P DB API Extension for Admin metadata #########
##############################################################
def ibm_volume_get_all_except_bootable(context):
""" return a list of volumes that don't have 'is_boot_volume' key"""
return IMPL.ibm_volume_get_all_except_bootable(context)
| 36.865031 | 78 | 0.637544 |
167f113d7ebdc7a3cbb9c7779028aafff7d042a8 | 1,074 | py | Python | crawlers/italy/italy_crawler.py | hasan-haider/conrad | 3e040b73a0cbec0ecee36c397c3bdedf9e439b54 | [
"Apache-2.0"
] | 244 | 2019-10-27T22:40:17.000Z | 2022-01-11T13:04:50.000Z | crawlers/italy/italy_crawler.py | hasan-haider/conrad | 3e040b73a0cbec0ecee36c397c3bdedf9e439b54 | [
"Apache-2.0"
] | 92 | 2019-10-27T23:01:34.000Z | 2021-08-21T15:19:37.000Z | crawlers/italy/italy_crawler.py | hasan-haider/conrad | 3e040b73a0cbec0ecee36c397c3bdedf9e439b54 | [
"Apache-2.0"
] | 71 | 2019-10-28T03:05:42.000Z | 2022-02-02T13:34:32.000Z | # -*- coding: utf-8 -*-
import json
import requests
from ..base import BaseCrawler
class ItalyCrawler(BaseCrawler):
def get_events(self):
response = requests.get(
"https://raw.githubusercontent.com/ildoc/awesome-italy-events/master/data/2020.json"
)
for event in json.loads(response.content):
if "pycon" in event["title"].lower():
continue
e = {
"name": event["title"],
"url": event["url"],
"city": event["location"],
"state": None,
"country": "Italy",
"location": event["location"],
"cfp_open": False,
"cfp_end_date": "1970-01-01",
"start_date": event["startDate"],
"end_date": event["endDate"],
"source": "https://github.com/ildoc/awesome-italy-events",
"tags": ["technology"],
"kind": "conference",
"by": "bot",
}
self.events.append(e)
| 29.027027 | 96 | 0.477654 |
c6bffa832b064858f27fcb96d1971c538514d935 | 2,188 | py | Python | tests/all/common/test_button.py | visigoths/visigoth | c5297148209d630f6668f0e5ba3039a8856d8320 | [
"MIT"
] | null | null | null | tests/all/common/test_button.py | visigoths/visigoth | c5297148209d630f6668f0e5ba3039a8856d8320 | [
"MIT"
] | 1 | 2021-01-26T16:55:48.000Z | 2021-09-03T15:29:14.000Z | tests/all/common/test_button.py | visigoths/visigoth | c5297148209d630f6668f0e5ba3039a8856d8320 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# visigoth: A lightweight Python3 library for rendering data visualizations in SVG
# Copyright (C) 2020 Visigoth Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
from visigoth import Diagram
from visigoth.internal.utils.test_utils import TestUtils
from visigoth.common.button import Button
from visigoth.common.space import Space
class TestButton(unittest.TestCase):
def test_basic(self):
d = Diagram(fill="white")
d.add(Button("Test"))
d.add(Space(10))
d.add(Button("Test with Font Height",font_height=32))
d.add(Space(10))
d.add(Button("Test with Font Family",text_attributes={"font-family":"monospace"}))
d.add(Space(10))
d.add(Button("Test with Green Push Fill",push_fill="green"))
d.add(Space(10))
d.add(Button("Test with URL",url="http://www.github.com"))
d.add(Space(10))
d.add(Button("Square Button",r=0))
d.add(Space(10))
d.add(Button("Padded Button",padding=20,fill="orange"))
TestUtils.draw_output(d,"test_button")
if __name__ == "__main__":
unittest.main()
| 43.76 | 98 | 0.711152 |
f6138d9e98fcf4e4350a5464f69ce2b729863ac4 | 567 | py | Python | MultiQubit_PulseGenerator/Custom_sequence/sequence_3rd_level_rev.py | ittnas/Drivers_Antti | 2a8d7252ae24b6a6440426202f696f78f6da651b | [
"MIT"
] | null | null | null | MultiQubit_PulseGenerator/Custom_sequence/sequence_3rd_level_rev.py | ittnas/Drivers_Antti | 2a8d7252ae24b6a6440426202f696f78f6da651b | [
"MIT"
] | null | null | null | MultiQubit_PulseGenerator/Custom_sequence/sequence_3rd_level_rev.py | ittnas/Drivers_Antti | 2a8d7252ae24b6a6440426202f696f78f6da651b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import logging
import random as rnd
import numpy as np
import cliffords
import copy
import gates
from sequence import Sequence
log = logging.getLogger('LabberDriver')
import os
path_currentdir = os.path.dirname(os.path.realpath(__file__)) # curret directory
class CustomSequence(Sequence):
def generate_sequence(self, config):
# config.get('Parameter #1', False)
# apply pi-pulse for QB 1
self.add_gate(qubit=0, gate=gates.Xp)
# apply pi-pulse for QB 3
self.add_gate(qubit=2, gate=gates.Xp)
if __name__ == '__main__':
pass
| 19.551724 | 81 | 0.747795 |
016ce13eea381ba8cd7fea57408fd2a6e7280fa1 | 2,576 | py | Python | number_converter.py | 43061b4a/phone_number_to_words | 893b33d4f6f791bbd81ec8c1ab27e6c3c22a73dd | [
"MIT"
] | null | null | null | number_converter.py | 43061b4a/phone_number_to_words | 893b33d4f6f791bbd81ec8c1ab27e6c3c22a73dd | [
"MIT"
] | null | null | null | number_converter.py | 43061b4a/phone_number_to_words | 893b33d4f6f791bbd81ec8c1ab27e6c3c22a73dd | [
"MIT"
] | null | null | null | import time
from functools import lru_cache
from trie import PrefixTree
class NumberConverter(object):
def __init__(self):
self.trie = PrefixTree()
with open('words_en.txt') as file:
lines = [line.rstrip('\n') for line in file]
for line in lines:
self.trie.insert(line)
def number_to_valid_phone_words(self, num):
if '1' in num or '0' in num:
raise Exception('Numbers with 1 and 0 are currently not supported.')
# 1: Find all words of length equivalent to given string that can be formed
words = []
for prefix in self.num_to_chars(num[0]):
words.extend(self.trie.starts_with(prefix, len(num)))
# 2: Convert words to number equivalents eg 'cat' -> '228'
possible_words = []
for word in words:
converted_num = self.words_to_nums(word)
# 3: We add this word to results if this is equivalent to given number
if num == converted_num:
possible_words.append(word)
return possible_words
@staticmethod
def num_to_chars(num):
keymap = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
return keymap[num] if num in keymap else None
@lru_cache(maxsize=10000)
def words_to_nums(self, word):
keymap = {
'a': '2', 'b': '2', 'c': '2',
'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4',
'j': '5', 'k': '5', 'l': '5',
'm': '6', 'n': '6', 'o': '6',
'p': '7', 'q': '7', 'r': '7', 's': '7',
't': '8', 'u': '8', 'v': '8',
'w': '9', 'x': '9', 'y': '9', 'z': '9'
}
for char, num in keymap.items():
word = word.replace(char, num)
return word
converter = NumberConverter()
print('****First Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000))
print('****Second Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000))
| 33.454545 | 83 | 0.483696 |
d1269943300420d6e31aa49110a8d30bbb1b2b0c | 1,746 | py | Python | nba_api/stats/endpoints/draftcombinenonstationaryshooting.py | adureja/nba_api | 2e1f8ee55e513689aea410bf663a09a3d7c34283 | [
"MIT"
] | 1 | 2019-02-20T19:19:19.000Z | 2019-02-20T19:19:19.000Z | nba_api/stats/endpoints/draftcombinenonstationaryshooting.py | adureja/nba_api | 2e1f8ee55e513689aea410bf663a09a3d7c34283 | [
"MIT"
] | null | null | null | nba_api/stats/endpoints/draftcombinenonstationaryshooting.py | adureja/nba_api | 2e1f8ee55e513689aea410bf663a09a3d7c34283 | [
"MIT"
] | 2 | 2020-08-09T06:00:19.000Z | 2022-03-04T16:39:30.000Z | from nba_api.stats.endpoints._base import Endpoint
from nba_api.stats.library.http import NBAStatsHTTP
from nba_api.stats.library.parameters import LeagueID, Season
class DraftCombineNonStationaryShooting(Endpoint):
endpoint = 'draftcombinenonstationaryshooting'
expected_data = {'Results': ['TEMP_PLAYER_ID', 'PLAYER_ID', 'FIRST_NAME', 'LAST_NAME', 'PLAYER_NAME', 'POSITION', 'OFF_DRIB_FIFTEEN_BREAK_LEFT_MADE', 'OFF_DRIB_FIFTEEN_BREAK_LEFT_ATTEMPT', 'OFF_DRIB_FIFTEEN_BREAK_LEFT_PCT', 'OFF_DRIB_FIFTEEN_TOP_KEY_MADE', 'OFF_DRIB_FIFTEEN_TOP_KEY_ATTEMPT', 'OFF_DRIB_FIFTEEN_TOP_KEY_PCT', 'OFF_DRIB_FIFTEEN_BREAK_RIGHT_MADE', 'OFF_DRIB_FIFTEEN_BREAK_RIGHT_ATTEMPT', 'OFF_DRIB_FIFTEEN_BREAK_RIGHT_PCT', 'OFF_DRIB_COLLEGE_BREAK_LEFT_MADE', 'OFF_DRIB_COLLEGE_BREAK_LEFT_ATTEMPT', 'OFF_DRIB_COLLEGE_BREAK_LEFT_PCT', 'OFF_DRIB_COLLEGE_TOP_KEY_MADE', 'OFF_DRIB_COLLEGE_TOP_KEY_ATTEMPT', 'OFF_DRIB_COLLEGE_TOP_KEY_PCT', 'OFF_DRIB_COLLEGE_BREAK_RIGHT_MADE', 'OFF_DRIB_COLLEGE_BREAK_RIGHT_ATTEMPT', 'OFF_DRIB_COLLEGE_BREAK_RIGHT_PCT', 'ON_MOVE_FIFTEEN_MADE', 'ON_MOVE_FIFTEEN_ATTEMPT', 'ON_MOVE_FIFTEEN_PCT', 'ON_MOVE_COLLEGE_MADE', 'ON_MOVE_COLLEGE_ATTEMPT', 'ON_MOVE_COLLEGE_PCT']}
def __init__(self,
league_id=LeagueID.default,
season=Season.default):
self.nba_response = NBAStatsHTTP().send_api_request(
endpoint=self.endpoint,
parameters={
'LeagueID': league_id,
'SeasonYear': season
},
)
data_sets = self.nba_response.get_data_sets()
self.data_sets = [Endpoint.DataSet(data=data_set) for data_set_name, data_set in data_sets.items()]
self.results = Endpoint.DataSet(data=data_sets['Results'])
| 75.913043 | 914 | 0.766896 |
99f040b5dda17a1e6b2fc251de30b131639ea277 | 26,260 | py | Python | HandyML_Trainer/python/ml_train.py | masiref/HandyML | b86483f46e0d190b18b20ecf01a3868980c85ef2 | [
"MIT"
] | 3 | 2019-10-08T13:45:49.000Z | 2020-05-18T16:03:47.000Z | HandyML_Trainer/python/ml_train.py | masiref/HandyML | b86483f46e0d190b18b20ecf01a3868980c85ef2 | [
"MIT"
] | null | null | null | HandyML_Trainer/python/ml_train.py | masiref/HandyML | b86483f46e0d190b18b20ecf01a3868980c85ef2 | [
"MIT"
] | 3 | 2019-07-09T07:57:12.000Z | 2019-10-24T17:41:13.000Z | # Data preprocessing
# Importing the libraries
import pandas as pd
import numpy as np
import time
import json
from joblib import dump
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import scikitplot as skplt
from win10toast import ToastNotifier
############## GLOBAL VARIABLES ##############
app_name = 'HandyML Trainer'
color_red = 'tomato'
color_blue = 'skyblue'
color_green = 'palegreen'
############## GENERIC FUNCTIONS ##############
def encode_categorical_data(data):
labelencoder = LabelEncoder()
return labelencoder.fit_transform(data), labelencoder
def one_hot_encode(data, indices):
transformer = ColumnTransformer([('one_hot_encoder', OneHotEncoder(), indices)], remainder = 'passthrough')
data = transformer.fit_transform(data)
return data, transformer
def feature_scaling(data, scaler = None):
if scaler == None:
scaler = StandardScaler(with_mean = False)
data = scaler.fit_transform(data)
else:
data = scaler.transform(data)
return data, scaler
def split_data_set(X, y, test_size = 0.2):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size)
return X_train, X_test, y_train, y_test
def get_parameter_value(name, parameters, default_value):
if len(parameters) > 0:
parameters_dictionary = parameters.split(', ')
for parameter in parameters_dictionary:
key_value_pair = parameter.split('=')
key = key_value_pair[0]
value = key_value_pair[1]
if(key == name):
return value
return default_value
def get_model_score(model, X_test, y_test, is_polynomial_regression = False, degree = None):
if is_polynomial_regression:
poly_reg = PolynomialFeatures(degree)
X_test = poly_reg.fit_transform(X_test)
try:
return model.score(X_test, y_test)
except TypeError:
# Avoiding error: A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.
return model.score(X_test.toarray(), y_test)
def save_regression_plot(scatter_X, scatter_y, plot_X, plot_y, title, xlabel, ylabel, path):
plt.scatter(scatter_X, scatter_y, color = color_red)
plt.plot(plot_X, plot_y, color = color_blue)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(path, bbox_inches = 'tight')
plt.close()
def save_regression_plot_using_grid(X, y, regressor, labels, title, path):
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
save_regression_plot(X, y, X_grid, regressor.predict(X_grid), title, labels[0], labels[1], path)
def save_classification_plot(X, y, classifier, labels, title, path):
X1, X2 = np.meshgrid(np.arange(start = X[:, 0].min() - 1, stop = X[:, 0].max() + 1, step = 0.01),
np.arange(start = X[:, 1].min() - 1, stop = X[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap((color_red, color_green)))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y)):
plt.scatter(X[y == j, 0], X[y == j, 1], c = ListedColormap((color_red, color_green))(i), label = j)
plt.title(title)
plt.xlabel(labels[0])
plt.ylabel(labels[1])
plt.legend()
plt.savefig(path, bbox_inches = 'tight')
plt.close()
def save_confusion_matrix_plot(y_test, y_pred, title, path):
skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize = True)
plt.title(title)
plt.savefig(path, bbox_inches = 'tight')
plt.close()
def save_roc_plot(y_pred, y_probas, title, path):
skplt.metrics.plot_roc(y_pred, y_probas, title)
plt.title(title)
plt.savefig(path, bbox_inches = 'tight')
plt.close()
def save_ks_statistic_plot(y_pred, y_probas, title, path):
skplt.metrics.plot_ks_statistic(y_pred, y_probas, title)
plt.title(title)
plt.savefig(path, bbox_inches = 'tight')
plt.close()
def save_precision_recall_plot(y_pred, y_probas, title, path):
skplt.metrics.plot_precision_recall(y_pred, y_probas, title)
plt.title(title)
plt.savefig(path, bbox_inches = 'tight')
plt.close()
def save_cumulative_gain_plot(y_pred, y_probas, title, path):
skplt.metrics.plot_cumulative_gain(y_pred, y_probas, title)
plt.title(title)
plt.savefig(path, bbox_inches = 'tight')
plt.close()
def save_lift_curve_plot(y_pred, y_probas, title, path):
skplt.metrics.plot_lift_curve(y_pred, y_probas, title)
plt.title(title)
plt.savefig(path, bbox_inches = 'tight')
plt.close()
def save_learning_curve_plot(model, X, y, title, path):
skplt.estimators.plot_learning_curve(model, X, y, title)
plt.title(title)
plt.savefig(path, bbox_inches = 'tight')
plt.close()
def save_feature_importances_plot(model, feature_names, title, path):
skplt.estimators.plot_feature_importances(model, title, feature_names, x_tick_rotation = 45)
plt.title(title)
plt.savefig(path, bbox_inches = 'tight')
plt.close()
############## REGRESSION FUNCTIONS ##############
def fit_linear_regression(X_train, y_train):
regressor = LinearRegression()
regressor.fit(X_train, y_train)
return regressor
def save_linear_regression_plot(X, y, regressor, labels, title, path):
save_regression_plot(X, y, X, regressor.predict(X), title, labels[0], labels[1], path)
def fit_polynomial_regression(X_train, y_train, degree):
poly_reg = PolynomialFeatures(degree)
X_poly = poly_reg.fit_transform(X_train)
poly_reg.fit(X_poly, y_train)
regressor = LinearRegression()
regressor.fit(X_poly, y_train)
return regressor, poly_reg
def save_polynomial_regression_plot(X, y, regressor, polynomial_regressor, labels, title, path):
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
save_regression_plot(X, y, X_grid, regressor.predict(polynomial_regressor.fit_transform(X_grid)), title, labels[0], labels[1], path)
def fit_sv_regression(X_train, y_train, X_test, y_test):
X_train, scaler_X = feature_scaling(X_train)
X_test, _ = feature_scaling(X_test, scaler_X)
y_train, scaler_y = feature_scaling(y_train.reshape(-1, 1))
y_train = y_train.reshape(len(y_train))
y_test, _ = feature_scaling(y_test.reshape(-1, 1), scaler_y)
y_test = y_test.reshape(len(y_test))
regressor = SVR(kernel = 'rbf', gamma = 'scale')
regressor.fit(X_train, y_train)
return regressor, X_train, y_train, X_test, y_test, scaler_X, scaler_y
def fit_decision_tree_regression(X_train, y_train, criterion):
regressor = DecisionTreeRegressor(criterion)
regressor.fit(X_train, y_train)
return regressor
def fit_random_forest_regression(X_train, y_train, n_estimators, criterion):
regressor = RandomForestRegressor(n_estimators, criterion)
regressor.fit(X_train, y_train)
return regressor
############## CLASSIFICATION FUNCTIONS ##############
def fit_logistic_regression(X_train, y_train, solver):
classifier = LogisticRegression(fit_intercept = True, dual = False, penalty = 'l2', solver = solver)
classifier.fit(X_train, y_train)
return classifier
def fit_knn(X_train, y_train, n_neighbors):
classifier = KNeighborsClassifier(n_neighbors)
classifier.fit(X_train, y_train)
return classifier
def fit_svm(X_train, y_train, kernel, gamma):
classifier = SVC(C = 1.0, kernel = kernel, degree = 3, gamma = gamma, probability = True)
classifier.fit(X_train, y_train)
return classifier
def fit_kernel_svm(X_train, y_train, kernel, degree, gamma):
classifier = SVC(C = 1.0, kernel = kernel, degree = degree, gamma = gamma, probability = True)
classifier.fit(X_train, y_train)
return classifier
def fit_naive_bayes(X_train, y_train):
classifier = GaussianNB()
try:
classifier.fit(X_train, y_train)
except TypeError:
# Avoiding error: A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.
classifier.fit(X_train.toarray(), y_train)
return classifier
def fit_decision_tree_classification(X_train, y_train, criterion, splitter):
classifier = DecisionTreeClassifier(criterion, splitter)
classifier.fit(X_train, y_train)
return classifier
def fit_random_forest_classification(X_train, y_train, n_estimators, criterion):
classifier = RandomForestClassifier(n_estimators, criterion)
classifier.fit(X_train, y_train)
return classifier
def process(file_path, features, target, categorical_features, problem_type, algorithm, algorithm_parameters, path, column_names):
errors = []
toaster = ToastNotifier()
now = time.localtime()
timestamp = time.strftime("%Y%m%d%H%M%S", now)
if path == None:
path = 'C:/Temp/'
model_path = path + 'model' + '_' + timestamp + '.model'
dataset_mean_path = ''
dataset_mode_path = ''
one_hot_encoder_path = ''
scaler_X_path = ''
scaler_y_path = ''
labelencoder_path = ''
model = None
scaler_X = None
scaler_y = None
is_polynomial_regression = False
degree = None
model_score = 0
plot_training_results = ''
plot_test_results = ''
plot_confusion_matrix = ''
plot_roc = ''
plot_ks_statistic = ''
plot_precision_recall = ''
plot_cumulative_gain = ''
plot_lift_curve = ''
plot_learning_curve = ''
plot_feature_importances = ''
dataset_imported = False
try:
# Converting column_names to a list of string
if len(column_names) > 0:
column_names = column_names.split('::')
# Converting list of features columns from string to integer
features = list(map(int, features.split()))
feature_names = list(map(lambda index: column_names[index], features))
# Categorical features are determined in HandyML_Trainer UiPath script, but we prefer to redo the job in Python (more reliable)
# Converting list of categorical features columns from string to integer if necessary
# if len(categorical_features) > 0:
# categorical_features = list(map(int, categorical_features.split()))
# Converting target column from string to integer
target = int(target)
# Importing the dataset
dataset = pd.read_csv(file_path)
mean = dataset.mean()
mode = dataset.mode().iloc[0]
dataset = dataset.fillna(mean)
dataset = dataset.dropna()
X = dataset.iloc[:, features].values
y = dataset.iloc[:, target].values
# Determining categorical features
categorical_features = []
types = dataset.dtypes
for i, type_ in enumerate(types):
if i in features and type_ == np.object or type_ == np.bool:
categorical_features.append(i)
# Encoding categorical features
one_hot_encoder = None
if len(categorical_features) > 0:
# Find the right index of the column in features list
categorical_features = np.array(list(map(lambda index: features.index(index), categorical_features)), dtype=int)
# One hot encoding on X[:, indices]
X, one_hot_encoder = one_hot_encode(X, categorical_features)
# Encoding categorical target in case of a classification problem
labelencoder = None
if problem_type == 'classification':
y, labelencoder = encode_categorical_data(y)
# Checking target in case of a regression problem
elif problem_type == 'regression' and dataset.iloc[:, target].dtype != np.float64 and dataset.iloc[:, target].dtype != np.int64:
raise TypeError('Problem type is regression but found categorical data in target')
# Splitting the dataset into training set and test set
X_train, X_test, y_train, y_test = split_data_set(X, y)
toaster.show_toast(app_name, 'Dataset successfully imported', duration=5)
dataset_imported = True
except Exception as e:
toaster.show_toast(app_name, 'Error raised while importing dataset', duration=5)
errors.append('Error raised while importing dataset: ' + str(e))
if dataset_imported:
training_done = False
try:
############## PROBLEM TYPE IS REGRESSION ##############
if problem_type == 'regression':
# Plots variables
generate_plots = False
# Generate plots only if there is 1 dimension
if X_train.shape[1] == 1:
generate_plots = True
plot_training_results = path + 'training_results_' + timestamp + '.png'
plot_test_results = path + 'test_results_' + timestamp + '.png'
plot_labels = [ column_names[features[0]], column_names[target] ]
if algorithm == None:
algorithm = 'linear_regression'
# Selecting the right regression algorithm
if algorithm == 'linear_regression':
model = fit_linear_regression(X_train, y_train)
if generate_plots:
save_linear_regression_plot(X_train, y_train, model, plot_labels, 'Training results', plot_training_results)
save_linear_regression_plot(X_test, y_test, model, plot_labels, 'Test results', plot_test_results)
elif algorithm == 'polynomial_regression':
is_polynomial_regression = True
degree = int(get_parameter_value('degree', algorithm_parameters, 2))
model, polynomial_regressor = fit_polynomial_regression(X_train, y_train, degree)
if generate_plots:
save_polynomial_regression_plot(X_train, y_train, model, polynomial_regressor, plot_labels, 'Training results', plot_training_results)
save_polynomial_regression_plot(X_test, y_test, model, polynomial_regressor, plot_labels, 'Test results', plot_test_results)
elif algorithm == 'support_vector_regression':
model, X_train, y_train, X_test, y_test, scaler_X, scaler_y = fit_sv_regression(X_train, y_train, X_test, y_test)
if generate_plots:
save_regression_plot_using_grid(X_train, y_train, model, plot_labels, 'Training results', plot_training_results)
save_regression_plot_using_grid(X_test, y_test, model, plot_labels, 'Test results', plot_test_results)
elif algorithm == 'decision_tree_regression':
criterion = get_parameter_value('criterion', algorithm_parameters, 'mse')
model = fit_decision_tree_regression(X_train, y_train, criterion)
if generate_plots:
save_regression_plot_using_grid(X_train, y_train, model, plot_labels, 'Training results', plot_training_results)
save_regression_plot_using_grid(X_test, y_test, model, plot_labels, 'Test results', plot_test_results)
elif algorithm == 'random_forest_regression':
criterion = get_parameter_value('criterion', algorithm_parameters, 'mse')
n_estimators = int(get_parameter_value('n_estimators', algorithm_parameters, 10))
model = fit_random_forest_regression(X_train, y_train, n_estimators, criterion)
if generate_plots:
save_regression_plot_using_grid(X_train, y_train, model, plot_labels, 'Training results', plot_training_results)
save_regression_plot_using_grid(X_test, y_test, model, plot_labels, 'Test results', plot_test_results)
############## PROBLEM TYPE IS CLASSIFICATION ##############
elif problem_type == 'classification':
# Feature scaling
X_train, scaler_X = feature_scaling(X_train)
X_test, _ = feature_scaling(X_test, scaler_X)
# Plots variables
generate_plots = False
# Generate plots only if there is 2 dimensions and 2 classes in target
if X_train.shape[1] == 2 and len(np.unique(y)) == 2:
generate_plots = True
plot_training_results = path + 'training_results_' + timestamp + '.png'
plot_test_results = path + 'test_results_' + timestamp + '.png'
plot_labels = [ column_names[features[0]], column_names[features[1]] ]
if algorithm == None:
algorithm = 'logistic_regression'
# Selecting the right classification algorithm
if algorithm == 'logistic_regression':
solver = get_parameter_value('solver', algorithm_parameters, 'liblinear')
model = fit_logistic_regression(X_train, y_train, solver)
elif algorithm == 'knn':
n_neighbors = int(get_parameter_value('n_neighbors', algorithm_parameters, 5))
model = fit_knn(X_train, y_train, n_neighbors)
elif algorithm == 'svm':
kernel = get_parameter_value('kernel', algorithm_parameters, 'rbf')
gamma = float(get_parameter_value('gamma', algorithm_parameters, 0.1))
model = fit_svm(X_train, y_train, kernel, gamma)
elif algorithm == 'kernel_svm':
kernel = get_parameter_value('kernel', algorithm_parameters, 'rbf')
gamma = float(get_parameter_value('gamma', algorithm_parameters, 0.1))
degree = int(get_parameter_value('degree', algorithm_parameters, 2))
model = fit_kernel_svm(X_train, y_train, kernel, degree, gamma)
elif algorithm == 'naive_bayes':
model = fit_naive_bayes(X_train, y_train)
elif algorithm == 'decision_tree_classification':
criterion = get_parameter_value('criterion', algorithm_parameters, 'entropy')
splitter = get_parameter_value('splitter', algorithm_parameters, 'best')
model = fit_decision_tree_classification(X_train, y_train, criterion, splitter)
elif algorithm == 'random_forest_classification':
n_estimators = int(get_parameter_value('n_estimators', algorithm_parameters, 10))
criterion = get_parameter_value('criterion', algorithm_parameters, 'entropy')
model = fit_random_forest_classification(X_train, y_train, n_estimators, criterion)
# Generate plots
if generate_plots:
save_classification_plot(X_train, y_train, model, plot_labels, 'Training results', plot_training_results)
save_classification_plot(X_test, y_test, model, plot_labels, 'Test results', plot_test_results)
toaster.show_toast(app_name, 'Training ended successfully', duration=5)
training_done = True
except Exception as e:
toaster.show_toast(app_name, 'Error during training', duration=5)
errors.append('Error raised during training: ' + str(e))
if training_done:
try:
# Generate metrics and estimators plots
if problem_type == 'classification':
plot_confusion_matrix = path + 'confusion_matrix_' + timestamp + '.png'
save_confusion_matrix_plot(y_test, model.predict(X_test), 'Confusion matrix', plot_confusion_matrix)
plot_learning_curve = path + 'learning_curve_' + timestamp + '.png'
save_learning_curve_plot(model, X_train, y_train, 'Learning curve', plot_learning_curve)
if hasattr(model, 'feature_importances_') and len(features) > 1:
plot_feature_importances = path + 'feature_importances_' + timestamp + '.png'
try:
save_feature_importances_plot(model, feature_names, 'Feature importances', plot_feature_importances)
except:
plot_feature_importances = ''
toaster.show_toast(app_name, 'Plots generated successfully', duration=5)
except:
toaster.show_toast(app_name, 'Error raised while generating plots', duration=5)
errors.append('Error raised while generating plots')
try:
# Saving the trained model
dump(model, model_path)
# Saving dataset mean
dataset_mean_path = path + 'dataset_mean_' + timestamp + '.pickle'
mean.to_pickle(dataset_mean_path)
# Saving dataset mode
dataset_mode_path = path + 'dataset_mode_' + timestamp + '.pickle'
mode.to_pickle(dataset_mode_path)
# Saving the one hot encoder
if one_hot_encoder:
one_hot_encoder_path = path + 'one_hot_encoder_' + timestamp + '.pickle'
dump(one_hot_encoder, one_hot_encoder_path)
# Saving the labelencoder
if labelencoder:
labelencoder_path = path + 'labelencoder_' + timestamp + '.pickle'
dump(labelencoder, labelencoder_path)
# Saving the scalers
if scaler_X:
scaler_X_path = path + 'scaler_X_' + timestamp + '.pickle'
dump(scaler_X, scaler_X_path)
if scaler_y:
scaler_y_path = path + 'scaler_y_' + timestamp + '.pickle'
dump(scaler_y, scaler_y_path)
toaster.show_toast(app_name, 'Model saved successfully', duration=5)
except Exception as e:
toaster.show_toast(app_name, 'Error raised while saving model', duration=5)
errors.append('Error raised while saving model: ' + str(e))
try:
# Calculation of model score
model_score = get_model_score(model, X_test, y_test, is_polynomial_regression, degree)
#model_path = ''.join(model_path)
except Exception as e:
errors.append('Error raised while calculating model score: ' + str(e))
json_object = {
"errors": errors,
"model": model_path,
"dataset_mean": dataset_mean_path,
"dataset_mode": dataset_mode_path,
"one_hot_encoder": one_hot_encoder_path,
"scaler_X": scaler_X_path,
"scaler_y": scaler_y_path,
"labelencoder": labelencoder_path,
"score": model_score,
"plot_training_results": plot_training_results,
"plot_test_results": plot_test_results,
"plot_confusion_matrix": plot_confusion_matrix,
"plot_roc": plot_roc,
"plot_ks_statistic": plot_ks_statistic,
"plot_precision_recall": plot_precision_recall,
"plot_cumulative_gain": plot_cumulative_gain,
"plot_lift_curve": plot_lift_curve,
"plot_learning_curve": plot_learning_curve,
"plot_feature_importances": plot_feature_importances
}
json_string = json.dumps(json_object)
return json_string
# Main program
if __name__ == '__main__':
# For testing purposes
file = 'c:/temp/data.csv'
column_names = 'a::b::c::d::e::f::g::h'
features = '1 2 4 5 6 7'
categorical_features = '1 2'
target = '3'
# classification, regression
problem_type = 'regression'
# linear_regression, polynomial_regression, support_vector_regression, decision_tree_regression, random_forest_regression
# logistic_regression, knn, svm, kernel_svm, naive_bayes, decision_tree_classification, random_forest_classification
algorithm = 'polynomial_regression'
algorithm_parameters = ''
path = 'c:/temp/'
result = process(file, features, target, categorical_features, problem_type, algorithm, algorithm_parameters, path, column_names)
print(result) | 41.289308 | 158 | 0.62658 |
4ea04b0b3f0a816d2b3f5d21064a6d7e33f86ce8 | 1,575 | py | Python | Tools/python/lvlMaya/MayaUtil.py | vitei/Usa | c44f893d5b3d8080529ecf0e227f983fddb829d4 | [
"MIT"
] | 47 | 2018-04-27T02:16:26.000Z | 2022-02-28T05:21:24.000Z | Tools/python/lvlMaya/MayaUtil.py | vitei/Usa | c44f893d5b3d8080529ecf0e227f983fddb829d4 | [
"MIT"
] | 2 | 2018-11-13T18:46:41.000Z | 2022-03-12T00:04:44.000Z | Tools/python/lvlMaya/MayaUtil.py | vitei/Usa | c44f893d5b3d8080529ecf0e227f983fddb829d4 | [
"MIT"
] | 6 | 2019-08-10T21:56:23.000Z | 2020-10-21T11:18:29.000Z | import maya.cmds as cmds
#
# Maya utility functions
#
def getCurrentSelection():
return cmds.ls(sl=True)
def createChildGroup( newChildGroupName, parentGroupName ):
cmds.group( empty=True, name=newChildGroupName )
cmds.parent( newChildGroupName, parentGroupName )
cmds.select( newChildGroupName, r=True )
def addAndSetAttribute( groupName, attrName, attr ):
cmds.addAttr( groupName, ln=attrName, dataType='string' )
cmds.setAttr( groupName+'.'+attrName, attr, type='string' )
def addNameAttribute( groupName, name ):
cmds.addAttr( groupName, ln='name', dataType='string' )
cmds.setAttr( groupName+'.name', name, type='string' )
def createNewGroup( groupName ):
cmds.group( empty=True, name=groupName )
def setParent( child, newParent ):
cmds.parent( child, newParent )
def setCurrentGroup( groupName ):
cmds.select( groupName, r=True )
def setTranslation( groupName, x, y, z ):
cmds.move( x, y, z, groupName )
def setRotation( groupName, x, y, z ):
cmds.rotate( x, y, z, groupName )
def addColoredCube( r, g, b ):
cmds.polyCube()
cmds.setAttr( "lambert1.color", r, g, b, type='double3' )
def importMayaBinary( path, namespace, groupName ):
try:
cmds.file(path, i=True, type='mayaBinary',
ignoreVersion=True, ra=True, mergeNamespacesOnClash=False, namespace=namespace,
pr=True, loadReferenceDepth='none', gr=True, groupName=groupName )
return True
except:
#raise RuntimeError('mb file :' + path + ' is not found!' )
return False | 31.5 | 97 | 0.676825 |
a0c95587f54fc1a449a85b3e41e7c5cf287e56fb | 3,079 | py | Python | install/app_store/tk-framework-qtwidgets/v2.6.5/python/search_completer/hierarchical_search_result_delegate.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-framework-qtwidgets/v2.6.5/python/search_completer/hierarchical_search_result_delegate.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-framework-qtwidgets/v2.6.5/python/search_completer/hierarchical_search_result_delegate.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | 1 | 2020-02-15T10:42:56.000Z | 2020-02-15T10:42:56.000Z | # Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from sgtk.platform.qt import QtCore
from .search_result_delegate import SearchResultDelegate
# import the shotgun_model and view modules from the shotgun utils framework
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
views = sgtk.platform.current_bundle().import_module("views")
class HierarchicalSearchResultDelegate(SearchResultDelegate):
"""
Delegate which renders search match entries in the hierarhical
search completer.
"""
def _render_result(self, widget, model_index):
"""
Renders a result from the model into the provided widget.
:param widget: Widget used to render the result.
:type widget: ``SearchResultWidget``
:param model_index: Index of the item to render.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
from .hierarchical_search_completer import HierarchicalSearchCompleter
icon = shotgun_model.get_sanitized_data(model_index, QtCore.Qt.DecorationRole)
if icon:
thumb = icon.pixmap(512)
widget.set_thumbnail(thumb)
else:
# probably won't hit here, but just in case, use default/empty
# thumbnail
widget.set_thumbnail(self._pixmaps.no_thumbnail)
data = shotgun_model.get_sanitized_data(model_index, HierarchicalSearchCompleter.SG_DATA_ROLE)
# Example of data stored in the data role:
# {
# "path_label": "Assets > Character",
# "incremental_path": [
# "/Project/65",
# "/Project/65/Asset",
# "/Project/65/Asset/sg_asset_type/Character",
# "/Project/65/Asset/sg_asset_type/Character/id/734"
# ],
# "project_id": 65,
# "ref": {
# "type": "Asset",
# "id": 734
# },
# "label": "Bunny"
# },
if data["ref"]["type"]:
et_url = shotgun_globals.get_entity_type_icon_url(data["ref"]["type"])
else:
et_url = None
underlined_label = self._underline_search_term(data["label"])
# present type name name
if et_url:
content = "<img src='%s'/> <b style='color: rgb(48, 167, 227)';>%s</b>" % (
et_url, underlined_label
)
else:
content = underlined_label
content += "<br>%s" % data["path_label"]
widget.set_text(content)
| 35.390805 | 102 | 0.635596 |
98a19f7317f4841544334f85ebf9310213e15fea | 5,342 | py | Python | egta/script/innerloop.py | cipherboy/quiesce | f1e25eb00f16a4c8c3547d1b8278f7027845fbef | [
"Apache-2.0"
] | 4 | 2019-01-12T09:02:18.000Z | 2021-10-30T12:08:27.000Z | egta/script/innerloop.py | cipherboy/quiesce | f1e25eb00f16a4c8c3547d1b8278f7027845fbef | [
"Apache-2.0"
] | 1 | 2020-09-28T01:15:11.000Z | 2020-10-07T06:58:47.000Z | egta/script/innerloop.py | cipherboy/quiesce | f1e25eb00f16a4c8c3547d1b8278f7027845fbef | [
"Apache-2.0"
] | 7 | 2019-03-09T11:45:44.000Z | 2022-03-25T12:18:43.000Z | """Script utility for running inner loop"""
import asyncio
import json
import logging
from concurrent import futures
from gameanalysis import regret
from egta import innerloop
from egta import schedgame
from egta.script import schedspec
from egta.script import utils
def add_parser(subparsers):
"""Create innerloop parser"""
parser = subparsers.add_parser(
"quiesce",
help="""Compute equilibria using the quiesce procedure""",
description="""Samples profiles from small restricted strategy sets,
expanding set support by best responses to candidate restricted game
equilibria. For games with a large number of players, a reduction
should be specified. The result is a list where each element specifies
an "equilibrium".""",
)
parser.add_argument(
"scheduler",
metavar="<sched-spec>",
help="""A scheduler specification,
see `egta spec` for more info.""",
)
parser.add_argument(
"--regret-thresh",
metavar="<reg>",
type=float,
default=1e-3,
help="""Regret threshold for a mixture to be considered an equilibrium.
(default: %(default)g)""",
)
parser.add_argument(
"--dist-thresh",
metavar="<norm>",
type=float,
default=0.1,
help="""Norm threshold for two mixtures to be considered distinct.
(default: %(default)g)""",
)
parser.add_argument(
"--max-restrict-size",
metavar="<support>",
type=int,
default=3,
help="""Support size threshold, beyond which restricted games are not
required to be explored. (default: %(default)d)""",
)
parser.add_argument(
"--num-equilibria",
metavar="<num>",
type=int,
default=1,
help="""Number of equilibria requested to be found. This is mainly
useful when game contains known degenerate equilibria, but those
strategies are still useful as deviating strategies. (default:
%(default)d)""",
)
parser.add_argument(
"--num-backups",
metavar="<num>",
type=int,
default=1,
help="""Number
of backup restricted strategy set to pop at a time, when no equilibria
are confirmed in initial required set. When games get to this point
they can quiesce slowly because this by default pops one at a time.
Increasing this number can get games like tis to quiesce more quickly,
but naturally, also schedules more, potentially unnecessary,
simulations. (default: %(default)d)""",
)
parser.add_argument(
"--dev-by-role",
action="store_true",
help="""Explore deviations in
role order instead of all at once. By default, when checking for
beneficial deviations, all role deviations are scheduled at the same
time. Setting this will check one role at a time. If a beneficial
deviation is found, then that restricted strategy set is scheduled
without exploring deviations from the other roles.""",
)
parser.add_argument(
"--style",
default="best",
choices=["fast", "more", "best", "one"],
help="""Style of equilibrium finding to use. `fast` is the fastests but
least thorough, `one` will guarantee an equilibrium is found in
potentially exponential time.""",
)
parser.add_argument(
"--procs",
type=int,
default=2,
metavar="<num-procs>",
help="""Number
of process to use. This will speed up computation if doing
computationally intensive things simultaneously, i.e. nash finding.
(default: %(default)d)""",
)
utils.add_reductions(parser)
parser.run = run
async def run(args):
"""Entry point for cli"""
sched = await schedspec.parse_scheduler(args.scheduler)
red, red_players = utils.parse_reduction(sched, args)
agame = schedgame.schedgame(sched, red, red_players)
async def get_regret(eqm):
"""Gets the regret of an equilibrium"""
game = await agame.get_deviation_game(eqm > 0)
return float(regret.mixture_regret(game, eqm))
async with sched:
with futures.ProcessPoolExecutor(args.procs) as executor:
eqa = await innerloop.inner_loop(
agame,
regret_thresh=args.regret_thresh,
dist_thresh=args.dist_thresh,
restricted_game_size=args.max_restrict_size,
num_equilibria=args.num_equilibria,
num_backups=args.num_backups,
devs_by_role=args.dev_by_role,
style=args.style,
executor=executor,
)
regrets = await asyncio.gather(*[get_regret(eqm) for eqm in eqa])
logging.error(
"quiesce finished finding %d equilibria:\n%s",
eqa.shape[0],
"\n".join(
"{:d}) {} with regret {:g}".format(i, sched.mixture_to_repr(eqm), reg)
for i, (eqm, reg) in enumerate(zip(eqa, regrets), 1)
),
)
json.dump(
[
{"equilibrium": sched.mixture_to_json(eqm), "regret": reg}
for eqm, reg in zip(eqa, regrets)
],
args.output,
)
args.output.write("\n")
| 34.688312 | 82 | 0.6155 |
94d60a0da1f206107e0314df0189c4cd50c16141 | 2,097 | py | Python | kmod/mnist/util.py | wittawatj/kernel-mod | 147a05888855a15d72b28a734752a91d93018604 | [
"MIT"
] | 20 | 2018-10-26T16:18:56.000Z | 2020-11-10T01:08:56.000Z | kmod/mnist/util.py | wittawatj/kernel-mod | 147a05888855a15d72b28a734752a91d93018604 | [
"MIT"
] | null | null | null | kmod/mnist/util.py | wittawatj/kernel-mod | 147a05888855a15d72b28a734752a91d93018604 | [
"MIT"
] | 2 | 2019-12-08T21:08:53.000Z | 2020-11-10T01:08:57.000Z | """
Utility functions for Mnist dataset.
"""
import numpy as np
import torch
import kmod.log as log
import kmod.plot as plot
def pt_sample_by_labels(data, label_counts):
"""
data: a dataset such that data[i][0] is a point, and data[i][1] is an
integer label.
label_counts: a list of tuples of two values (A, B), where A is a label,
and B is the count.
"""
list_selected = []
labels = np.array([data[i][1] for i in range(len(data))])
for label, count in label_counts:
inds = np.where(labels==label)[0]
homo_data = [data[i][0] for i in inds[:count]]
list_selected.extend(homo_data)
# stack all
selected = torch.stack(list_selected)
return selected
def show_sorted_digits(imgs, digit_mapper, n_per_row=10, figsize=(8,8),
digits=[], normalize=True):
"""
Show sorted generated Mnist digits.
imgs: a Pytorch tensor of images containing a mix of digits.
sampler: an object which implements sample(n) whose call returns a stack of
generated MNIST digits.
digit_mapper: a callable object which takes a stack of images and returns a
list of digit identities as integers. This is likely a classifier.
digits: digits (a list) to show. If not set, show all digits of 0-9.
n_per_row: number of generated digits to show per row
"""
Y = digit_mapper(imgs)
UY = torch.unique(Y)
list_row_imgs = []
for y in UY:
if not digits or y in digits:
Iy = torch.nonzero(Y==y).view(-1)
# print(Iy)
len_Iy = len(Iy)
if len_Iy < n_per_row:
# not enough geneated images for the digit y
raise ValueError('Only {} images available for digit {}. But you want to show n_per_row = {} images.'.format(len_Iy, y, n_per_row))
imgs_y = imgs[Iy[:n_per_row]]
list_row_imgs.append(imgs_y)
stack_imgs = torch.cat(list_row_imgs, dim=0)
# show the images
plot.show_torch_imgs(stack_imgs, nrow=n_per_row, figsize=figsize,
normalize=normalize)
| 31.772727 | 147 | 0.639008 |
9ca20b26188520d6a847043fb3bc4506c0554fa4 | 1,887 | py | Python | code/proc_eeg_3_compute_activation_patterns.py | nschawor/eeg-leadfield-mixing | ffc5ef8138f6620944b84feced174ea6880cbcfa | [
"MIT"
] | 3 | 2022-03-14T12:42:25.000Z | 2022-03-16T08:36:53.000Z | code/proc_eeg_3_compute_activation_patterns.py | nschawor/eeg-leadfield-mixing | ffc5ef8138f6620944b84feced174ea6880cbcfa | [
"MIT"
] | null | null | null | code/proc_eeg_3_compute_activation_patterns.py | nschawor/eeg-leadfield-mixing | ffc5ef8138f6620944b84feced174ea6880cbcfa | [
"MIT"
] | null | null | null | """ Data: compute weighted activation pattern for each subject.
"""
import os
import pandas as pd
import numpy as np
import mne
import ssd
from helper import print_progress, load_ssd
from params import EEG_DATA_FOLDER, SSD_BANDWIDTH, SSD_EEG_DIR, \
SPEC_PARAM_CSV_EEG, PATTERN_EEG_DIR
# %% specify participants and folders
subjects = np.unique([s.split("_")[0] for s in os.listdir(SSD_EEG_DIR)])
df = pd.read_csv(SPEC_PARAM_CSV_EEG)
df = df.set_index("subject")
os.makedirs(PATTERN_EEG_DIR, exist_ok=True)
conditions = ["eo", "ec"]
# %% compute for all participants
for i_sub, subject in enumerate(subjects):
print_progress(i_sub, subject, subjects)
for condition in conditions:
# compute weighted spatial pattern cofficients
df_file_name = f"{PATTERN_EEG_DIR}/{subject}_{condition}_patterns.csv"
if os.path.exists(df_file_name):
continue
# load raw file for computing band-power
file_name = f"{EEG_DATA_FOLDER}/{subject}_{condition}-raw.fif"
raw = mne.io.read_raw_fif(file_name, verbose=False)
raw.load_data()
raw.pick_types(eeg=True)
raw.set_eeg_reference("average")
filters, patterns = load_ssd(subject, "eeg", condition)
raw_ssd = ssd.apply_filters(raw, filters)
# compute band power in narrow band around peak
peak = df.loc[subject]["peak_frequency"]
raw_ssd.filter(peak - SSD_BANDWIDTH, peak + SSD_BANDWIDTH, verbose=False)
# weight patterns by filtered band-power
std_comp = np.std(raw_ssd._data, axis=1)
assert np.all(std_comp[1] * patterns[:, 1] == (std_comp * patterns)[:, 1])
weighted_patterns = std_comp * patterns
# save weighted patterns
df_patterns = pd.DataFrame(weighted_patterns.T, columns=raw.ch_names)
df_patterns.to_csv(df_file_name, index=False)
| 34.944444 | 82 | 0.689984 |
ab4a7a48a473b5ca3b78e63c912b4da8f6619c99 | 194 | py | Python | goutdotcom/widgets/widgets.py | Spiewart/goutdotcom | 0916155732a72fcb8c8a2fb0f4dd81efef618af8 | [
"MIT"
] | null | null | null | goutdotcom/widgets/widgets.py | Spiewart/goutdotcom | 0916155732a72fcb8c8a2fb0f4dd81efef618af8 | [
"MIT"
] | null | null | null | goutdotcom/widgets/widgets.py | Spiewart/goutdotcom | 0916155732a72fcb8c8a2fb0f4dd81efef618af8 | [
"MIT"
] | null | null | null | from django.forms import RadioSelect
class HorizontalRadioSelect(RadioSelect):
template_name = "widgets/horizontal_radios.html"
option_template_name = "widgets/horizontal_inputs.html"
| 27.714286 | 59 | 0.814433 |
d786176687c7e31efa96cc728904362445fd68ac | 804 | py | Python | scipy/spatial/__init__.py | seberg/scipy | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | [
"BSD-3-Clause"
] | 1 | 2018-10-04T15:34:14.000Z | 2018-10-04T15:34:14.000Z | scipy/spatial/__init__.py | seberg/scipy | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | [
"BSD-3-Clause"
] | null | null | null | scipy/spatial/__init__.py | seberg/scipy | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | [
"BSD-3-Clause"
] | null | null | null | """
=============================================================
Spatial algorithms and data structures (:mod:`scipy.spatial`)
=============================================================
Nearest-neighbor queries:
.. autosummary::
:toctree: generated/
KDTree -- class for efficient nearest-neighbor queries
cKDTree -- class for efficient nearest-neighbor queries (faster impl.)
distance -- module containing many different distance measures
Delaunay triangulation:
.. autosummary::
:toctree: generated/
Delaunay
tsearch
"""
from kdtree import *
from ckdtree import *
from qhull import *
__all__ = filter(lambda s:not s.startswith('_'),dir())
__all__ += ['distance']
import distance
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| 22.333333 | 77 | 0.606965 |
a9002bc9331488f94b79a5d731a8ec031990b5b3 | 13,172 | py | Python | src/ibfanalytics/__init__.py | lgguzman/ibfanalytics | a65efc08bf451e7b8dae34df5e5ce4bf2e1cea71 | [
"MIT"
] | null | null | null | src/ibfanalytics/__init__.py | lgguzman/ibfanalytics | a65efc08bf451e7b8dae34df5e5ce4bf2e1cea71 | [
"MIT"
] | null | null | null | src/ibfanalytics/__init__.py | lgguzman/ibfanalytics | a65efc08bf451e7b8dae34df5e5ce4bf2e1cea71 | [
"MIT"
] | null | null | null | from itertools import combinations
from pyspark.ml.fpm import FPGrowth
from pyspark.sql.functions import col, size
from sbxpy import SbxCore
from functools import reduce
import asyncio
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import mean as _mean, stddev as _stddev, col, to_json, from_json, struct, lit
from pyspark.sql import functions as func
from pyspark.sql.types import StructType, StructField
from pyspark.ml.linalg import VectorUDT
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.clustering import KMeans, BisectingKMeans, GaussianMixture
import matplotlib.pyplot as plt
from pyspark.ml.feature import PCA as PCAml
class AssociationRules:
async def df_sbx_cart_item(self, sbx, spark, min_transaction=6, attribute='variety_name', customer=None, to_suggested=False):
# query = sbx.with_model('cart_box_item').fetch_models(['variety', 'cart_box', 'product_group']) \
query = sbx.with_model('cart_box_item').fetch_models(['variety', 'cart_box']) \
.and_where_is_not_null('cart_box.purchase')
if customer is not None:
query = query.and_where_in('cart_box.customer', customer)
if not to_suggested:
total_data = await query.find_all_query()
else:
query.set_page(0)
query.set_page_size(3)
temp = await query.find()
total_data = [temp]
d = {}
errors = []
for data in total_data:
for item in data['results']:
try:
# item['name'] = data['fetched_results']['add_masterlist'][
# data['fetched_results']['inventory'][item['inventory']]['masterlist']][attribute]
color = data['fetched_results']['variety'][item['variety']]['color']
variety = color#data['fetched_results']['color'][color]['_KEY']
product_group = item['product_group']#data['fetched_results']['product_group'][item['product_group']]['_KEY']
item['name'] = product_group + ' ' + variety
if data['fetched_results']['cart_box'][item['cart_box']]['purchase'] not in d:
d[data['fetched_results']['cart_box'][item['cart_box']]['purchase']] = {}
d[data['fetched_results']['cart_box'][item['cart_box']]['purchase']][item['name']] = 1
except Exception as inst:
errors.append(inst)
if len(errors) > 0:
print("Errors")
print(errors)
if not to_suggested:
varieties = [(key, [k for k, val in value.items()]) for key, value in d.items()]
varieties = list(filter(lambda t: len(t[1]) > min_transaction, varieties))
return spark.createDataFrame(varieties, ["id", "items"]).repartition(100)
else:
def merging_data(varieties, elements):
for element in elements:
if element not in varieties:
varieties.append(element)
return varieties
varieties = [[k for k, val in value.items()] for key, value in d.items()]
varieties = reduce(merging_data, varieties, [])
return await self.transform_possible_list(spark, varieties)
async def df_from_varieties(self, sbx, spark, varieties):
query = sbx.with_model('variety')\
.where_with_keys(varieties)
total_data = await query.find_all_query()
errors = []
varieties = []
for data in total_data:
for item in data['results']:
try:
varieties = item["product_group"]+" "+item["color"]
except Exception as inst:
errors.append(inst)
if len(errors) > 0:
print("Errors")
print(errors)
return await self.transform_possible_list(spark, varieties)
async def get_model(self, df, min_support=0.1, min_confidence=0.6):
fpGrowth = FPGrowth(itemsCol="items", minSupport=min_support, minConfidence=min_confidence)
model = fpGrowth.fit(df)
return model.freqItemsets.sort("freq", ascending=False), model.associationRules.sort("confidence", ascending=True), model
async def transform_possible_list(self, spark, items):
combins = list( map (list,reduce(lambda acc, x: acc + list(combinations(items, x)), range(1, len(items) + 1), [])))
combins = sorted(combins, key=lambda comb: len(comb), reverse=True)
possibles = [(str(i), combins[i]) for i in range(len(combins))]
df = spark.createDataFrame(possibles, ["id", "items"]).repartition(100)
return df
async def run_suggested(self, model, df):
return model.transform(df).withColumn("item_size", size(col("items"))) \
.withColumn("prediction_size", size(col("prediction"))) \
.orderBy(["item_size", "prediction"], ascending=[1, 0])
async def get_replacement(self, sbx, varieties):
query = sbx.with_model('replacement')\
.and_where_in("variety",varieties).or_where_in("replace",varieties)
total_data = await query.find_all_query()
errors = []
replacement = []
for data in total_data:
for item in data['results']:
try:
if item["variety"] not in varieties:
replacement.append(item["variety"])
if item["replace"] not in varieties:
replacement.append(item["replace"])
except Exception as inst:
errors.append(inst)
return [varieties, list(set(replacement))]
async def get_varieties_from_product_color(self, sbx, product_color):
product_group = list(map( lambda x: x.split(" ")[0], product_color))
query = sbx.with_model('variety')\
.and_where_in("product_group",product_group)
total_data = await query.find_all_query()
errors = []
varieties = []
for data in total_data:
for item in data['results']:
try:
if item["product_group"] + ' ' + item["color"] in product_color:
varieties.append(item["_KEY"])
except Exception as inst:
errors.append(inst)
return await self.get_replacement(sbx,list(set(varieties)))
async def test(self):
sbx = SbxCore()
sbx.initialize(os.environ['DOMAIN'], os.environ['APP-KEY'], os.environ['SERVER_URL'])
login = await sbx.login(os.environ['LOGIN'], os.environ['PASSWORD'], os.environ['DOMAIN'] )
spark = SparkSession.builder.appName("SparkExample").getOrCreate()
df = await self.df_sbx_cart_item(sbx, spark, 6, '_KEY')
freq_items, association_rules, model = await self.get_model(df, 0.1, 0.6)
suggested = await self.run_suggested(model, df)
suggested.drop('items').show(10, False)
spark.stop()
def run_test(self):
loop = asyncio.new_event_loop()
loop.run_until_complete(self.test())
class UserCluster:
async def df_sbx_customer_purchase_behaivor(self, sbx, spark, date_int, limit =3, variety_sw = False):
data_complete = await sbx.with_model('cart_box_item').fetch_models(['cart_box']) \
.and_where_greater_than('cart_box.charge_date', date_int) \
.and_where_is_not_null('cart_box.purchase').find_all_query()
sc = spark.sparkContext
d = {}
groups = {}
errors = []
for data in data_complete:
for item in data['results']:
try:
customer = data['fetched_results']['cart_box'][item['cart_box']]['customer']
if customer not in d:
d[customer] = {}
purchase = item['product_group'] + ((' ' + item['variety']) if variety_sw else '')
if purchase not in d[customer]:
d[customer][purchase] = 1
# else:
# d[customer][purchase] = d[customer][purchase] + 1
if purchase not in groups:
groups[purchase] = 1
except Exception as inst:
errors.append(inst)
columns = [key for key, value in groups.items()]
def newRow(data, customer, columns):
row = {}
row['customer'] = customer
row['products'] = []
for key in columns:
if key not in data:
row['products'].append(0)
else:
row['products'].append(data[key])
return row
customers = [ newRow(value, key, columns) for key, value in d.items()]
customers = list(filter(lambda x: sum(x['products']) > limit, customers) )
tmp = sc.parallelize(customers, numSlices=100)
df = spark.read.option("multiLine", "true").json(tmp)
json_vec = to_json(struct(struct(
lit(1).alias("type"), # type 1 is dense, type 0 is sparse
col("products").alias("values")
).alias("v")))
schema = StructType([StructField("v", VectorUDT())])
return df.withColumn(
"features", from_json(json_vec, schema).getItem("v")
)
async def df_sbx_customer_special_box_purchased(self, sbx, spark):
data = await sbx.with_model('cart_box') \
.set_page_size(1000) \
.and_where_is_not_null('purchase') \
.and_where_is_equal('variety', os.environ['SPECIAL_BOX']).find()
sc = spark.sparkContext
def deleteMeta(d):
dt = {}
dt['customer'] = d['customer']
dt['total_items'] = d['total_items']
dt['current_percentage'] = d['current_percentage']
dt['count'] = 1
return dt
dit = list(map(deleteMeta, data['results']))
tmp = sc.parallelize(dit, numSlices=100)
df = spark.read.option("multiLine", "true").json(tmp)
df2 = df.groupBy("customer").agg(func.avg("total_items").alias('total_items'),
func.avg("current_percentage").alias('current_percentage'),
func.sum("count").alias('count'))
(cumean, custd, comean, costd, tmean, tstd) = df2.select(
_mean(col('current_percentage')).alias('cumean'),
_stddev(col('current_percentage')).alias('custd'),
_mean(col('count')).alias('comean'),
_stddev(col('count')).alias('costd'),
_mean(col('total_items')).alias('total_items'),
_stddev(col('total_items')).alias('total_items'),
).first()
df3 = df2.withColumn("acurrent_percentage", (col("current_percentage") - cumean) / custd).withColumn("acount", (
col("count") - comean) / costd).withColumn("atotal_items", (col("total_items") - tmean) / tstd)
vecAssembler = VectorAssembler(inputCols=["acurrent_percentage", "acount", "atotal_items"],
outputCol="features")
return vecAssembler.transform(df3)
async def k_means(self,df, k, seed):
if k is not None and seed is not None:
kmeans = KMeans(k=k, seed=seed)
else:
kmeans = KMeans()
return kmeans.fit(df.select('features'))
async def bisecting_means(self,df,k):
return BisectingKMeans(k=k).fit(df.select('features'))
async def gaussian_mixture(self,df):
return GaussianMixture().fit(df.select('features'))
async def run_cluster(self,model, df):
return model.transform(df)
async def plot_cluster(self, df, x='_3', y= '_4'):
pca = PCAml(k=2, inputCol="features", outputCol="pca")
model3 = pca.fit(df)
transformed2 = model3.transform(df)
def extract(row):
return (row.customer,) + (row.prediction,) + tuple(row.pca.toArray().tolist())
pcadf = transformed2.rdd.map(extract).toDF(["customer", "prediction"])
pcadf.show(10, False)
pandad = pcadf.toPandas()
pandad.plot.scatter(x=x, y=y, c='prediction', colormap='viridis')
plt.show()
async def test(self):
sbx = SbxCore()
sbx.initialize(os.environ['DOMAIN'], os.environ['APP-KEY'], os.environ['SERVER_URL'])
login = await sbx.login(os.environ['LOGIN'], os.environ['PASSWORD'], os.environ['DOMAIN2'] )
spark = SparkSession.builder.appName("SparkExample").getOrCreate()
df = await self.df_sbx_customer_special_box_purchased(sbx,spark)
df.show()
model = await self.k_means(df,3,1)
transformed = await self.run_cluster(model, df)
# transformed.select(['customer','current_percentage', 'count' , 'total_items', 'prediction']).show(20, False)
await self.plot_cluster(transformed)
spark.stop()
def run_test(self):
loop = asyncio.new_event_loop()
loop.run_until_complete(self.test())
| 42.766234 | 129 | 0.585484 |
6c48fb5858fed46322f5359955b023639aa574b8 | 13,679 | py | Python | cosine/core/order_worker.py | oladotunr/cosine | 25c37f6a31ef014eff6fd0211fa31193b6055515 | [
"MIT"
] | 3 | 2018-10-10T16:44:02.000Z | 2022-03-28T15:27:08.000Z | cosine/core/order_worker.py | oladotunr/cosine | 25c37f6a31ef014eff6fd0211fa31193b6055515 | [
"MIT"
] | 1 | 2021-03-25T22:01:01.000Z | 2021-03-25T22:01:01.000Z | cosine/core/order_worker.py | oladotunr/cosine | 25c37f6a31ef014eff6fd0211fa31193b6055515 | [
"MIT"
] | 1 | 2020-06-08T18:49:46.000Z | 2020-06-08T18:49:46.000Z | """
#
# 15/08/2018
# Oladotun Rominiyi - Copyright © 2018. all rights reserved.
"""
__author__ = 'dotun rominiyi'
# IMPORTS
from datetime import datetime, timedelta
from enum import Enum
from decimal import Decimal
from cosine.core.config import FieldSet as Pos
from cosine.core.logger import null_logger
from cosine.core.utils import epsilon_equals
from cosine.core.order_worker_types import PendingAction, LostControlError
from cosine.venues.base_venue import AsyncEvents, OrderType, OfferType, OrderStatus
# MODULE FUNCTIONS
def empty_pos(price=Decimal(0.0)):
return Pos(
price=price,
openpos=Decimal(0.0),
pending=PendingAction.NONE,
order=None,
new_pos=None
)
# MODULE CLASSES
class CosineOrderWorker(object):
def __init__(self, active_depth, instrument, venue, logger=None):
self.logger = logger if logger else null_logger
self._depth = active_depth
self._venue = venue
self._instr = instrument
self._balances = {}
self._halted = False
self._bids = {}
self._asks = {}
self._pending_orders = {}
self._pending_amends = {}
self._pending_cancels = {}
self._setup_async()
def update(self, bids, asks):
try:
# reconcile any pending requests...
self.reconcile()
# if we're halted then we shouldn't bother doing anything new...
if self._halted:
return
for bid in bids:
currbid = self._bids.get(bid.price)
if currbid is None:
# new order...
self.update_level(OfferType.Bid, bid, empty_pos(price=bid.price))
elif not epsilon_equals(bid.openpos, currbid.openpos):
# update order...
self.update_level(OfferType.Bid, bid, currbid)
# clean up orders...
new_pxs = map(lambda x: x.price, bids)
removals = [self._bids[px] for px in self._bids if not px in new_pxs]
[self.update_level(OfferType.Bid, empty_pos(price=currbid.price), currbid) for currbid in removals]
for ask in asks:
currask = self._asks.get(ask.price)
if currask is None:
# new order...
self.update_level(OfferType.Ask, ask, empty_pos(price=ask.price))
elif not epsilon_equals(ask.openpos, currask.openpos):
# update order...
self.update_level(OfferType.Ask, ask, currask)
# clean up orders...
new_pxs = map(lambda x: x.price, asks)
removals = [self._asks[px] for px in self._asks if not px in new_pxs]
[self.update_level(OfferType.Ask, empty_pos(price=currask.price), currask) for currask in removals]
except LostControlError as err:
self.logger.error(err)
self.pull_all()
def pull_all(self):
self._halted = True
res = self._venue.cancel_all_orders(instrument=self._instr)
if not self._venue.is_async:
self.on_cancel_all_orders(res)
def update_level(self, side, new_lvl, curr_lvl):
if curr_lvl.pending != PendingAction.NONE:
return
if curr_lvl.openpos == 0.0 and new_lvl.openpos > 0.0 and curr_lvl.order is None and \
self.check_against_balance(side, price=new_lvl.price, qty=new_lvl.openpos):
# place a new order...
self.commit_balance(side, price=new_lvl.price, qty=new_lvl.openpos)
curr_lvl.pending = PendingAction.NEW_ORDER
order = self._venue.new_order(
offer_type=side,
order_type=OrderType.Limit,
instrument=self._instr,
price=new_lvl.price,
quantity=new_lvl.openpos
)
curr_lvl.openpos = order.remaining_qty
curr_lvl.filled = order.initial_qty - order.remaining_qty
curr_lvl.pending = PendingAction.from_status(order.status, curr_lvl.pending)
if curr_lvl.pending != PendingAction.NONE:
self._pending_orders[order.id] = order
curr_lvl.order = order
elif curr_lvl.order and curr_lvl.openpos > 0.0 and new_lvl.openpos == 0.0:
# clear out the pos...
curr_lvl.pending = PendingAction.CANCEL_ORDER
self._pending_cancels[curr_lvl.order.id] = curr_lvl.order
res = self._venue.cancel_order(curr_lvl.order)
if not self._venue.is_async:
self.on_cancel_order(res)
elif curr_lvl.order:
# amend the pos...
curr_lvl.pending = PendingAction.AMEND_ORDER
curr_lvl.new_pos = new_lvl
self._pending_amends[curr_lvl.order.id] = curr_lvl.order
res = self._venue.cancel_order(curr_lvl.order)
if not self._venue.is_async:
self.on_cancel_order(res)
def check_against_balance(self, side, price, qty):
balance = self._balances[side]
required = qty if side == OfferType.Ask else price * qty
available = balance.available_balance
can_commit = available >= required
if not can_commit:
asset = self._instr.asset if side == OfferType.Bid else self._instr.ccy
self.logger.warning("CosineOrderWorker - [{0}|{1}] Insufficient inventory - (has: {2}, requires: {3})".format(
self._venue.name,
asset.symbol,
available,
required
))
return can_commit
def cancel_balance(self, side, price, qty):
balance = self._balances[side]
update_value = qty if side == OfferType.Ask else price * qty
balance.available_balance += update_value
def clear_balance(self, side, price, qty):
balance = self._balances[side]
update_value = qty if side == OfferType.Ask else price * qty
balance.available_balance -= update_value
balance.real_balance -= update_value
def commit_balance(self, side, price, qty):
balance = self._balances[side]
update_value = qty if side == OfferType.Ask else price * qty
balance.available_balance -= update_value
def balance_sync(self):
balance_info = self._venue.get_inventory()
self._balances = {
OfferType.Ask: balance_info.balances[self._instr.ccy.symbol],
OfferType.Bid: balance_info.balances[self._instr.asset.symbol]
}
self.logger.info(f"CosineOrderWorker - Balances: {self._balances}")
def synchronise(self):
# get all the open orders...
try:
open_orders = self._venue.get_open_orders(
instrument=self._instr,
order_type=OrderType.Limit,
max_count=self._depth * 2
)
except Exception as e:
self.logger.exception(e)
raise LostControlError(str("Could not get all open orders for instrument: "+self._instr.name))
# populate the bids and asks based on the known state at the venue...
bids = dict(**self._bids)
asks = dict(**self._asks)
self._clear_orders()
for order in open_orders:
lvls = asks if order.side == OfferType.Ask else bids
currpos = lvls.get(order.price)
pos = Pos(
price=order.price,
openpos=order.remaining_qty,
filled=order.initial_qty - order.remaining_qty,
pending=PendingAction.from_status(order.status, currpos.pending if currpos else PendingAction.NONE),
order=order
)
lvls[order.price] = pos
self.balance_sync()
def reconcile(self):
pendings = len(self._pending_amends) + len(self._pending_orders) + len(self._pending_cancels)
if pendings == 0:
return
# get all the open orders...
try:
open_orders = self._venue.get_open_orders(
instrument=self._instr,
order_type=OrderType.Limit,
max_count=self._depth * 2
)
except Exception as e:
self.logger.exception(e)
raise LostControlError(str("Could not get all open orders for instrument: "+self._instr.name))
# reconcile any pending states against open positions on the book...
for order in open_orders:
lvls = self._asks if order.side == OfferType.Ask else self._bids
curr_lvl = lvls.get(order.price)
if not curr_lvl:
pos = Pos(
price=order.price,
openpos=order.remaining_qty,
filled=order.initial_qty - order.remaining_qty,
pending=PendingAction.from_status(order.status, PendingAction.NONE),
order=order
)
lvls[order.price] = pos
continue
if order.status == OrderStatus.Pending:
continue
elif curr_lvl.pending == PendingAction.NEW_ORDER:
curr_lvl.pending = PendingAction.NONE
curr_lvl.openpos = order.remaining_qty
curr_lvl.filled = order.initial_qty - order.remaining_qty
curr_lvl.order = order
del self._pending_orders[order.id]
elif curr_lvl.pending == PendingAction.CANCEL_ORDER:
curr_lvl.pending = PendingAction.NONE
curr_lvl.openpos = Decimal(0.0)
curr_lvl.filled = Decimal(0.0)
curr_lvl.order = None
del self._pending_cancels[order.id]
elif curr_lvl.pending == PendingAction.AMEND_ORDER:
if curr_lvl.new_pos:
new_pos = curr_lvl.new_pos
self.commit_balance(order.side, price=new_pos.price, qty=new_pos.openpos)
curr_lvl.new_pos = None
order = self._venue.new_order(
offer_type=order.side,
order_type=OrderType.Limit,
instrument=self._instr,
price=new_pos.price,
quantity=new_pos.openpos
)
curr_lvl.openpos = order.remaining_qty
curr_lvl.filled = order.initial_qty - order.remaining_qty
curr_lvl.pending = PendingAction.from_status(order.status, curr_lvl.pending)
curr_lvl.order = order
else:
curr_lvl.pending = PendingAction.NONE
curr_lvl.openpos = order.remaining_qty
curr_lvl.filled = order.initial_qty - order.remaining_qty
curr_lvl.order = order
del self._pending_orders[order.id]
def _setup_async(self):
if not self._venue.is_async: return
self._venue.on(AsyncEvents.OnPlaceOrder, self.on_place_order)
self._venue.on(AsyncEvents.OnExecution, self.on_execution)
self._venue.on(AsyncEvents.OnCancelOrder, self.on_cancel_order)
self._venue.on(AsyncEvents.OnCancelAllOrders, self.on_cancel_all_orders)
try:
self.synchronise()
except LostControlError as err:
self.logger.error(err)
self.pull_all()
def _clear_orders(self):
self._bids.clear()
self._asks.clear()
self._pending_orders.clear()
self._pending_amends.clear()
self._pending_cancels.clear()
def on_place_order(self, order):
if order.placed and order.id in self._pending_orders:
del self._pending_orders[order.id]
else:
raise LostControlError(str(order))
def on_execution(self, execution):
if execution.instrument_venue_id != self._instr.venue_id: return
if execution.price in self._bids:
bid = self._bids[execution.price]
if bid.order.id == execution.bid_order_id:
self.clear_balance(OfferType.Bid, price=execution.price, qty=execution.qty)
bid.openpos -= execution.qty
bid.filled += execution.qty
if bid.openpos == 0.0:
del self._bids[execution.price]
elif execution.price in self._asks:
ask = self._asks[execution.price]
if ask.order.id == execution.ask_order_id:
self.clear_balance(OfferType.Ask, price=execution.price, qty=execution.qty)
ask.openpos -= execution.qty
ask.filled += execution.qty
if ask.openpos == 0.0:
del self._asks[execution.price]
def on_cancel_order(self, response):
if response.cancelled:
order = self._pending_cancels.get(response.order_id)
if order:
del self._pending_cancels[order.id]
self.cancel_balance(order.side, price=order.price, qty=order.qty)
else:
raise LostControlError(str(response))
def on_cancel_all_orders(self, response):
if not response.cancelled:
raise LostControlError(str(response))
else:
self._clear_orders()
self.balance_sync()
@property
def bids(self):
return self._bids
@property
def asks(self):
return self._asks
@property
def instrument(self):
return self._instr
@property
def depth(self):
return self._depth
| 36.97027 | 122 | 0.591052 |
df0ebb7a0cd4cd007b97758a191529528484f184 | 691 | py | Python | app/core/management/commands/wait_for_db.py | abdulsagheer/recipe-api | 6c5f8408705c8ebf7fb1f4c916b898f8bab6ff43 | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | abdulsagheer/recipe-api | 6c5f8408705c8ebf7fb1f4c916b898f8bab6ff43 | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | abdulsagheer/recipe-api | 6c5f8408705c8ebf7fb1f4c916b898f8bab6ff43 | [
"MIT"
] | null | null | null | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is availabe"""
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn=None
while not db_conn:
try:
db_conn=connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!!'))
| 27.64 | 78 | 0.632417 |
6ea77f88371225d16eb05ccf7c1dc75f5ba900af | 2,372 | py | Python | text.py | SimoneIppoliti/Apocapixel | d1ab05b1704258fec714e66ed3ba9b5f4a7c94f8 | [
"MIT"
] | null | null | null | text.py | SimoneIppoliti/Apocapixel | d1ab05b1704258fec714e66ed3ba9b5f4a7c94f8 | [
"MIT"
] | null | null | null | text.py | SimoneIppoliti/Apocapixel | d1ab05b1704258fec714e66ed3ba9b5f4a7c94f8 | [
"MIT"
] | null | null | null | import game
from game_objects import *
import managers
class Text(GameObject):
def __init__(self, font, text, position, pivot):
self._position = position
self.pivot = pivot
self.string = text
self.font = font
self.color = (255, 255, 255)
self.text = self.font.render(text, 0, self.color)
image_size = self.text.get_size()
p_x = image_size[0] * pivot[0]
p_y = image_size[1] * pivot[1]
self._pivot = (p_x, p_y)
text_x = position[0] - self._pivot[0]
text_y = position[1] - self._pivot[1]
self.position = [text_x, text_y]
self.is_active = True
self.is_visible = True
self.layer = managers.Layers.TEXT
self.depth = 0
managers.UpdateManager.add_item(self)
managers.DrawManager.add_item(self)
def update(self):
text_x = self._position[0] - self._pivot[0]
text_y = self._position[1] - self._pivot[1]
self.position = [text_x, text_y]
def draw(self, screen):
screen.blit(self.text, self.position)
def set_color(self, color):
self.color = color
self.text = self.font.render(self.string, 0, color)
def set_text(self, text):
self.text = text
self.text = self.font.render(text, 0, self.color)
def set_text_ext(self, text, color):
self.text = text
self.color = color
self.text = self.font.render(text, 0, color)
class UIManager:
score = 0
killed_enemies = 0
player_health_text = None
active_bullet = None
@classmethod
def __init__(self):
font = pygame.font.Font("Assets\Legendaria_1.ttf", 32)
_x, _y = (game.w_x, game.w_y)
self.score_text = Text(font, "Score: 0", (10, _y - 32), (0, 1))
self.killed_enemies_text = Text(font, "Killed Enemies: 0", (10, _y - 8), (0, 1))
self.player_health_text = Text(font, "Health: 0", (10, 8), (0, 0))
self.active_bullet = Text(font, "Active Magic: Yo mum", (10, 60), (0, 1))
@classmethod
def score_add(self, val):
self.score += val
self.score_text.set_text("Score: " + str(self.score))
@classmethod
def killed_enemies_add(self, val):
self.killed_enemies += val
self.killed_enemies_text.set_text("Killed Enemies: " + str(self.killed_enemies)) | 27.905882 | 88 | 0.600337 |
81e98f81d6ae8fef88cd1d6774f8bbf73774a152 | 10,721 | py | Python | synapse/rest/admin/__init__.py | lxndrbnsv/synapse | b2f4594275931a770712ec285a55467772cbfec8 | [
"Apache-2.0"
] | null | null | null | synapse/rest/admin/__init__.py | lxndrbnsv/synapse | b2f4594275931a770712ec285a55467772cbfec8 | [
"Apache-2.0"
] | null | null | null | synapse/rest/admin/__init__.py | lxndrbnsv/synapse | b2f4594275931a770712ec285a55467772cbfec8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2020, 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import platform
from typing import TYPE_CHECKING, Optional, Tuple
import synapse
from synapse.api.errors import Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
from synapse.rest.admin.background_updates import (
BackgroundUpdateEnabledRestServlet,
BackgroundUpdateRestServlet,
)
from synapse.rest.admin.devices import (
DeleteDevicesRestServlet,
DeviceRestServlet,
DevicesRestServlet,
)
from synapse.rest.admin.event_reports import (
EventReportDetailRestServlet,
EventReportsRestServlet,
)
from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
from synapse.rest.admin.registration_tokens import (
ListRegistrationTokensRestServlet,
NewRegistrationTokenRestServlet,
RegistrationTokenRestServlet,
)
from synapse.rest.admin.rooms import (
ForwardExtremitiesRestServlet,
JoinRoomAliasServlet,
ListRoomRestServlet,
MakeRoomAdminRestServlet,
RoomEventContextServlet,
RoomMembersRestServlet,
RoomRestServlet,
RoomStateRestServlet,
)
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet
from synapse.rest.admin.username_available import UsernameAvailableRestServlet
from synapse.rest.admin.users import (
AccountValidityRenewServlet,
DeactivateAccountRestServlet,
PushersRestServlet,
RateLimitRestServlet,
ResetPasswordRestServlet,
SearchUsersRestServlet,
ShadowBanRestServlet,
UserAdminServlet,
UserMembershipRestServlet,
UserRegisterServlet,
UserRestServletV2,
UsersRestServletV2,
UserTokenRestServlet,
WhoisRestServlet,
)
from synapse.types import JsonDict, RoomStreamToken
from synapse.util.versionstring import get_version_string
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class VersionServlet(RestServlet):
PATTERNS = admin_patterns("/server_version$")
def __init__(self, hs: "HomeServer"):
self.res = {
"server_version": get_version_string(synapse),
"python_version": platform.python_version(),
}
def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
return 200, self.res
class PurgeHistoryRestServlet(RestServlet):
PATTERNS = admin_patterns(
"/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
)
def __init__(self, hs: "HomeServer"):
self.pagination_handler = hs.get_pagination_handler()
self.store = hs.get_datastore()
self.auth = hs.get_auth()
async def on_POST(
self, request: SynapseRequest, room_id: str, event_id: Optional[str]
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
body = parse_json_object_from_request(request, allow_empty_body=True)
delete_local_events = bool(body.get("delete_local_events", False))
# establish the topological ordering we should keep events from. The
# user can provide an event_id in the URL or the request body, or can
# provide a timestamp in the request body.
if event_id is None:
event_id = body.get("purge_up_to_event_id")
if event_id is not None:
event = await self.store.get_event(event_id)
if event.room_id != room_id:
raise SynapseError(400, "Event is for wrong room.")
# RoomStreamToken expects [int] not Optional[int]
assert event.internal_metadata.stream_ordering is not None
room_token = RoomStreamToken(
event.depth, event.internal_metadata.stream_ordering
)
token = await room_token.to_string(self.store)
logger.info("[purge] purging up to token %s (event_id %s)", token, event_id)
elif "purge_up_to_ts" in body:
ts = body["purge_up_to_ts"]
if not isinstance(ts, int):
raise SynapseError(
400, "purge_up_to_ts must be an int", errcode=Codes.BAD_JSON
)
stream_ordering = await self.store.find_first_stream_ordering_after_ts(ts)
r = await self.store.get_room_event_before_stream_ordering(
room_id, stream_ordering
)
if not r:
logger.warning(
"[purge] purging events not possible: No event found "
"(received_ts %i => stream_ordering %i)",
ts,
stream_ordering,
)
raise SynapseError(
404, "there is no event to be purged", errcode=Codes.NOT_FOUND
)
(stream, topo, _event_id) = r
token = "t%d-%d" % (topo, stream)
logger.info(
"[purge] purging up to token %s (received_ts %i => "
"stream_ordering %i)",
token,
ts,
stream_ordering,
)
else:
raise SynapseError(
400,
"must specify purge_up_to_event_id or purge_up_to_ts",
errcode=Codes.BAD_JSON,
)
purge_id = self.pagination_handler.start_purge_history(
room_id, token, delete_local_events=delete_local_events
)
return 200, {"purge_id": purge_id}
class PurgeHistoryStatusRestServlet(RestServlet):
PATTERNS = admin_patterns("/purge_history_status/(?P<purge_id>[^/]+)")
def __init__(self, hs: "HomeServer"):
self.pagination_handler = hs.get_pagination_handler()
self.auth = hs.get_auth()
async def on_GET(
self, request: SynapseRequest, purge_id: str
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
purge_status = self.pagination_handler.get_purge_status(purge_id)
if purge_status is None:
raise NotFoundError("purge id '%s' not found" % purge_id)
return 200, purge_status.asdict()
########################################################################################
#
# please don't add more servlets here: this file is already long and unwieldy. Put
# them in separate files within the 'admin' package.
#
########################################################################################
class AdminRestResource(JsonResource):
"""The REST resource which gets mounted at /_synapse/admin"""
def __init__(self, hs: "HomeServer"):
JsonResource.__init__(self, hs, canonical_json=False)
register_servlets(hs, self)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
"""
Register all the admin servlets.
"""
register_servlets_for_client_rest_resource(hs, http_server)
ListRoomRestServlet(hs).register(http_server)
RoomStateRestServlet(hs).register(http_server)
RoomRestServlet(hs).register(http_server)
RoomMembersRestServlet(hs).register(http_server)
JoinRoomAliasServlet(hs).register(http_server)
VersionServlet(hs).register(http_server)
UserAdminServlet(hs).register(http_server)
UserMembershipRestServlet(hs).register(http_server)
UserTokenRestServlet(hs).register(http_server)
UserRestServletV2(hs).register(http_server)
UsersRestServletV2(hs).register(http_server)
DeviceRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server)
DeleteDevicesRestServlet(hs).register(http_server)
UserMediaStatisticsRestServlet(hs).register(http_server)
EventReportDetailRestServlet(hs).register(http_server)
EventReportsRestServlet(hs).register(http_server)
PushersRestServlet(hs).register(http_server)
MakeRoomAdminRestServlet(hs).register(http_server)
ShadowBanRestServlet(hs).register(http_server)
ForwardExtremitiesRestServlet(hs).register(http_server)
RoomEventContextServlet(hs).register(http_server)
RateLimitRestServlet(hs).register(http_server)
UsernameAvailableRestServlet(hs).register(http_server)
ListRegistrationTokensRestServlet(hs).register(http_server)
NewRegistrationTokenRestServlet(hs).register(http_server)
RegistrationTokenRestServlet(hs).register(http_server)
# Some servlets only get registered for the main process.
if hs.config.worker.worker_app is None:
SendServerNoticeServlet(hs).register(http_server)
BackgroundUpdateEnabledRestServlet(hs).register(http_server)
BackgroundUpdateRestServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource(
hs: "HomeServer", http_server: HttpServer
) -> None:
"""Register only the servlets which need to be exposed on /_matrix/client/xxx"""
WhoisRestServlet(hs).register(http_server)
PurgeHistoryStatusRestServlet(hs).register(http_server)
DeactivateAccountRestServlet(hs).register(http_server)
PurgeHistoryRestServlet(hs).register(http_server)
ResetPasswordRestServlet(hs).register(http_server)
SearchUsersRestServlet(hs).register(http_server)
UserRegisterServlet(hs).register(http_server)
DeleteGroupAdminRestServlet(hs).register(http_server)
AccountValidityRenewServlet(hs).register(http_server)
# Load the media repo ones if we're using them. Otherwise load the servlets which
# don't need a media repo (typically readonly admin APIs).
if hs.config.media.can_load_media_repo:
register_servlets_for_media_repo(hs, http_server)
else:
ListMediaInRoom(hs).register(http_server)
# don't add more things here: new servlets should only be exposed on
# /_synapse/admin so should not go here. Instead register them in AdminRestResource.
| 38.153025 | 88 | 0.702267 |
7c731c4002939095d465672d3e36c4bffcec618d | 76,736 | py | Python | python/paddle/hapi/model.py | LWhite027/PaddleBox | b14bcdf285dd8829e11ab12cc815ac1b1ab62694 | [
"Apache-2.0"
] | 10 | 2021-05-12T07:20:32.000Z | 2022-03-04T08:21:56.000Z | python/paddle/hapi/model.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | 1 | 2021-01-07T11:00:58.000Z | 2021-01-07T11:00:58.000Z | python/paddle/hapi/model.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | 18 | 2021-05-19T08:01:49.000Z | 2022-02-11T03:11:32.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import pickle
import numpy as np
import six
import warnings
import time
import socket
import contextlib
from collections import Iterable
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.framework import in_dygraph_mode, Variable, ParamBase, _current_expected_place
from paddle.fluid.framework import in_dygraph_mode, Variable
from paddle.fluid.framework import _current_expected_place as _get_device
from paddle.fluid.executor import global_scope
from paddle.fluid.io import is_belong_to_optimizer
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, FunctionSpec
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.fluid.layers.utils import flatten
from paddle.fluid.layers import collective
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
from paddle.fluid.incubate.fleet.base import role_maker
from paddle.io import DataLoader, Dataset, DistributedBatchSampler
from paddle.fluid.executor import scope_guard, Executor
from paddle.fluid.dygraph.layers import Layer
from paddle.metric import Metric
from paddle.static import InputSpec as Input
import paddle.distributed as dist
from .callbacks import config_callbacks, EarlyStopping
from .model_summary import summary
__all__ = ['Model', ]
_parallel_context_initialized = False
def to_list(value):
if value is None:
return value
if isinstance(value, (list, tuple)):
return list(value)
return [value]
def to_numpy(var):
assert isinstance(var, (Variable, fluid.core.VarBase)), "not a variable"
if isinstance(var, fluid.core.VarBase):
return var.numpy()
t = global_scope().find_var(var.name).get_tensor()
return np.array(t)
def flatten_list(l):
assert isinstance(l, list), "not a list"
outl = []
splits = []
for sl in l:
assert isinstance(sl, list), "sub content not a list"
splits.append(len(sl))
outl += sl
return outl, splits
def restore_flatten_list(l, splits):
outl = []
for split in splits:
assert len(l) >= split, "list length invalid"
sl, l = l[:split], l[split:]
outl.append(sl)
return outl
def extract_args(func):
if hasattr(inspect, 'getfullargspec'):
return inspect.getfullargspec(func)[0]
else:
return inspect.getargspec(func)[0]
def _all_gather(x, nranks, ring_id=0, use_calc_stream=True):
return collective._c_allgather(
x, nranks, ring_id=ring_id, use_calc_stream=use_calc_stream)
def wait_server_ready(endpoints):
assert not isinstance(endpoints, six.string_types)
while True:
all_ok = True
not_ready_endpoints = []
for ep in endpoints:
ip_port = ep.split(":")
with contextlib.closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex((ip_port[0], int(ip_port[1])))
if result != 0:
all_ok = False
not_ready_endpoints.append(ep)
if not all_ok:
time.sleep(3)
else:
break
def init_communicator(program, rank, nranks, wait_port, current_endpoint,
endpoints):
if nranks < 2:
return
other_endpoints = endpoints[:]
other_endpoints.remove(current_endpoint)
if rank == 0 and wait_port:
wait_server_ready(other_endpoints)
block = program.global_block()
nccl_id_var = block.create_var(
name=fluid.unique_name.generate('nccl_id'),
persistable=True,
type=fluid.core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_nccl_id',
inputs={},
outputs={'Out': nccl_id_var},
attrs={
'rank': rank,
'endpoint': current_endpoint,
'other_endpoints': other_endpoints
})
block.append_op(
type='c_comm_init',
inputs={'X': nccl_id_var},
outputs={},
attrs={
'nranks': nranks,
'rank': rank,
'ring_id': 0,
})
def prepare_distributed_context(place=None):
if place is None:
place = fluid.CUDAPlace(ParallelEnv().dev_id) if ParallelEnv().nranks > 1 \
else fluid.CUDAPlace(0)
strategy = fluid.dygraph.parallel.ParallelStrategy()
strategy.nranks = ParallelEnv().nranks
strategy.local_rank = ParallelEnv().local_rank
strategy.trainer_endpoints = ParallelEnv().trainer_endpoints
strategy.current_endpoint = ParallelEnv().current_endpoint
if strategy.nranks < 2:
return
global _parallel_context_initialized
if not _parallel_context_initialized and isinstance(place, fluid.CUDAPlace):
def _init_context():
communicator_prog = fluid.Program()
init_communicator(communicator_prog, strategy.local_rank,
strategy.nranks, True, strategy.current_endpoint,
strategy.trainer_endpoints)
exe = fluid.Executor(place)
exe.run(communicator_prog)
if fluid.in_dygraph_mode():
fluid.disable_dygraph()
_init_context()
fluid.enable_dygraph(place)
else:
_init_context()
else:
assert ("Only support CUDAPlace for now.")
_parallel_context_initialized = True
return strategy
def _update_input_info(inputs):
"Get input shape list by given inputs in Model initialization."
shapes = None
dtypes = None
if isinstance(inputs, Input):
shapes = [list(inputs.shape)]
dtypes = [inputs.dtype]
elif isinstance(inputs, list):
shapes = [list(input.shape) for input in inputs]
dtypes = [input.dtype for input in inputs]
elif isinstance(inputs, dict):
shapes = [list(inputs[name].shape) for name in inputs]
dtypes = [inputs[name].dtype for name in inputs]
else:
return None
return shapes, dtypes
class StaticGraphAdapter(object):
"""
Model traning/inference with a static graph.
"""
def __init__(self, model):
super(StaticGraphAdapter, self).__init__()
self.model = model
# with `_build_once` gone, parameters are now created in `__init__`
# so we need to keep track of the parameters already created
self._startup_prog = fluid.default_startup_program()
self._orig_prog = fluid.default_main_program()
self._label_vars = {} # label variables
self._input_vars = {} # label variables
self._endpoints = {}
self._loss_endpoint = None
self._executor = None
self._progs = {}
self._compiled_progs = {}
self._merge_count = {
'eval_total': 0,
'test_total': 0,
'eval_batch': 0,
'test_batch': 0
}
self._nranks = ParallelEnv().nranks
self._local_rank = ParallelEnv().local_rank
@property
def mode(self):
return self.model.mode
@mode.setter
def mode(self, value):
self.model.mode = value
def train_batch(self, inputs, labels=None):
assert self.model._optimizer, \
"model not ready, please call `model.prepare()` first"
self.mode = 'train'
return self._run(inputs, labels)
def eval_batch(self, inputs, labels=None):
self.mode = 'eval'
return self._run(inputs, labels)
def predict_batch(self, inputs):
self.mode = 'test'
return self._run(inputs, None)
def parameters(self, *args, **kwargs):
return self.model.network.parameters(*args, **kwargs)
def save(self, path):
def _save(state, path):
if not state:
return
state = {
k: to_numpy(v) if isinstance(v, Variable) else v
for k, v in state.items()
}
with open(path, 'wb') as f:
pickle.dump(state, f)
base = os.path.basename(path)
assert base != "", "path should be of 'dirname/filename' format"
dir_name = os.path.dirname(path)
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
param_path = path + ".pdparams"
_save(self.model.network.state_dict(), param_path)
prog = self._progs.get('train', None)
if prog is None or self.model._optimizer is None:
return
# XXX `optimizer.state_dict()` only work in dygraph mode
optim_path = path + ".pdopt"
optim = {
p.name: p
for p in filter(is_belong_to_optimizer, prog.list_vars())
}
if not optim:
return
_save(optim, optim_path)
def load(self, param_state_pairs, optim_state):
if self._executor is None:
executor = fluid.Executor(fluid.CPUPlace())._default_executor
else:
executor = self._executor._default_executor
# restore parameter states
fluid.core._create_loaded_parameter(
[param for param, state in param_state_pairs],
global_scope(), executor)
for param, state in param_state_pairs:
self._set_var(param, state)
# restore optimizer states
# FIXME what if a different optimizer is used?
if not self.model._optimizer or not optim_state:
return
self._load_optimizer(optim_state, executor)
def _load_optimizer(self, state, executor):
prog = self._progs.get('train', None)
optim = list(filter(is_belong_to_optimizer, prog.list_vars()))
if not optim:
return
fluid.core._create_loaded_parameter(optim, global_scope(), executor)
converted_state = dict(state)
for var in optim:
if var.name in ["@LR_DECAY_COUNTER@", "global_step"]:
# When using learning rate scheduler, dygraph would name the
# global step var as "global_step" to save, while static-graph
# would has a state var named as "@LR_DECAY_COUNTER@".
# NOTE: dygraph saved global_step is 1 larger than that in
# static-graph, since the time of global_step to increase is
# different.
state_val = (
np.array(converted_state.pop("global_step")) - 1
) if "global_step" in converted_state else converted_state.pop(
"@LR_DECAY_COUNTER@", None)
if state_val is not None:
converted_state[var.name] = state_val
elif var.name.startswith("learning_rate_"):
# When using static learning rate, static-graph would make it
# a persistable var named 'unique_name.generate("learning_rate")',
# However, dygraph wouldn't save it.
if var.name not in state:
continue
else:
# moment and other accumulators
if var.name not in converted_state:
# try to convert from dygraph name
opt_name = self.model._optimizer._name
opt_cls_name = self.model._optimizer.__class__.__name__
opt_unq_name = None
for name in self.model._optimizer._accumulators.keys():
accum_name = name if opt_name is None else name[len(
opt_name) + 1:]
for param_name, state_var in self.model._optimizer._accumulators[
name].items():
if opt_unq_name is None:
# can not infer out the exact unique(opt_name),
# thus try to extract rather than generate
for state_key in sorted(
state.keys(),
key=lambda x: len(x),
reverse=True):
prefix = param_name + "_" + (
opt_cls_name
if opt_name is None else opt_name) + "_"
if state_key.startswith(prefix):
prefix_offset = state_key[len(
prefix):].find("_") + len(prefix)
opt_unq_name = state_key[len(
param_name + "_"):prefix_offset]
# TODO: assert
# assert opt_unq_name is None
# gen(param.name + "_" + gen(opt_name) + "_" + accum_name)
# always end with "_0" since the unique optimizer._name
dy_state_name = (param_name + "_" + opt_unq_name +
"_" + accum_name + "_0")
converted_state[
state_var.name] = converted_state.pop(
dy_state_name)
assert var.name in converted_state, \
"variable [{}] is not in optimizer state file".format(var.name)
self._set_var(var, converted_state[var.name])
def _set_var(self, var, ndarray):
t = global_scope().find_var(var.name).get_tensor()
p = t._place()
if p.is_cpu_place():
place = fluid.CPUPlace()
elif p.is_cuda_pinned_place():
place = fluid.CUDAPinnedPlace()
else:
p = fluid.core.Place()
p.set_place(t._place())
place = fluid.CUDAPlace(p.gpu_device_id())
t.set(ndarray, place)
def _run(self, inputs, labels=None):
compiled_prog = self._compiled_progs.get(self.mode, None)
assert compiled_prog, \
"Model is not ready, please call `model.prepare()` first"
inputs = to_list(inputs)
if labels is not None:
labels = to_list(labels)
assert len(inputs) == len(self._input_vars[self.mode]), \
"number of inputs" \
+ " does not match number of arguments of `forward` method"
feed = {}
input_names = [v.name for v in self._input_vars[self.mode]]
for idx, n in enumerate(input_names):
# train and test may take different arguments
if inputs[idx] is not None:
feed[n] = inputs[idx]
if labels is not None:
for idx, v in enumerate(self._label_vars[self.mode]):
feed[v.name] = labels[idx]
endpoints = self._endpoints[self.mode]
if self.mode == 'test':
fetch_list = endpoints['output']
else:
metric_list, metric_splits = flatten_list(endpoints['metric'])
fetch_list = endpoints['loss'] + metric_list
num_loss = len(endpoints['loss'])
# if fetch Variable is same as input Variable, do not fetch
# from program, get it from input directly
pruned_fetch_list = []
pruned_fetch_idx_name_map = [""] * len(fetch_list)
for i, fetch_var in enumerate(fetch_list):
if fetch_var.name in feed.keys():
pruned_fetch_idx_name_map[i] = fetch_var.name
else:
pruned_fetch_list.append(fetch_var)
rets = self._executor.run(compiled_prog,
feed=feed,
fetch_list=pruned_fetch_list,
return_numpy=False)
# restore pruned fetch_list Variable from feeds
for i, name in enumerate(pruned_fetch_idx_name_map):
if len(name) > 0:
rets.insert(i, feed[name])
# LoDTensor cannot be fetch as numpy directly
rets = [np.array(v) for v in rets]
if self.mode == 'test':
return rets[:]
metric_states = restore_flatten_list(rets[num_loss:], metric_splits)
metrics = []
for metric, state in zip(self.model._metrics, metric_states):
# cut off padding size
if self.mode != 'train' and self.model._test_dataloader is not None \
and isinstance(self.model._test_dataloader, DataLoader) \
and self._nranks > 1:
total_size = len(self.model._test_dataloader.dataset)
# TODO: fixme if have better way to get batch size
samples = state[0].shape[0]
current_count = self._merge_count.get(self.mode + '_total', 0)
if current_count + samples >= total_size:
state = [
s[:int(total_size - current_count), ...] for s in state
]
self._merge_count[self.mode + '_total'] = 0
self._merge_count[self.mode + '_batch'] = int(total_size -
current_count)
else:
self._merge_count[self.mode + '_total'] += samples
self._merge_count[self.mode + '_batch'] = samples
metrics.append(metric.update(*state))
if num_loss and len(metrics):
return rets[:num_loss], metrics
else:
return rets[:num_loss] if num_loss else metrics
def prepare(self):
modes = ['train', 'eval', 'test']
for mode in modes:
self._make_program(mode)
self._compile_and_initialize(self._progs[mode], mode)
def _make_program(self, mode):
prog = self._progs.get(mode, None)
if prog is not None:
return
prog = self._orig_prog.clone()
# NOTE: When defining learning rate scheduling in static-graph, ops to
# increase the global step var and calculate learning rate would be
# prepended into _orig_prog. test program maked by `_orig_prog.clone`
# also would include these ops. Thus must prune these ops in test
# program, otherwise the global step would be changed in test.
if mode != 'train':
for op in list(prog.global_block().ops):
prog.global_block()._remove_op(0)
if mode == 'train' and self.model._optimizer \
and self.model._optimizer._learning_rate_map:
# HACK workaround learning rate map issue
lr_var = self.model._optimizer._learning_rate_map[self._orig_prog]
new_lr_var = prog.global_block().vars[lr_var.name]
self.model._optimizer._learning_rate_map[prog] = new_lr_var
losses = []
metrics = []
with fluid.program_guard(prog, self._startup_prog):
inputs = self.model._inputs
labels = self.model._labels if self.model._labels else []
inputs = [k._create_feed_layer() for k in to_list(inputs)]
labels = [k._create_feed_layer() for k in to_list(labels)]
self._label_vars[mode] = labels
outputs = to_list(self.model.network.forward(*inputs))
if mode != 'test' and self.model._loss:
losses = self.model._loss(*(outputs + labels))
if self._nranks > 1 and mode != 'train':
outputs = [_all_gather(o, self._nranks) for o in outputs]
if mode != 'test':
labels = [_all_gather(l, self._nranks) for l in labels]
if mode != 'test':
for metric in self.model._metrics:
metrics.append(to_list(metric.compute(*(outputs + labels))))
if mode == 'train' and self.model._optimizer:
self._loss_endpoint = fluid.layers.sum(losses)
if self._nranks > 1:
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
dist_strategy = DistributedStrategy()
dist_strategy.mode = "collective"
dist_strategy.collective_mode = "grad_allreduce"
self.model._optimizer = fleet.distributed_optimizer(
self.model._optimizer, strategy=dist_strategy)
self.model._optimizer.minimize(self._loss_endpoint)
if mode != 'train': # clone again to put it in test mode
prog = prog.clone(for_test=True)
self._input_vars[mode] = inputs
self._progs[mode] = prog
self._endpoints[mode] = {
"output": outputs,
"loss": to_list(losses),
"metric": metrics
}
def _compile_and_initialize(self, prog, mode):
compiled_prog = self._compiled_progs.get(mode, None)
if compiled_prog is not None:
return compiled_prog
assert self.model._place is not None, \
"device is not set, please call `model.prepare()` first"
place = self.model._place
# XXX *ALL WEIGHTS* should be initialized upon model construction
# even if `forward()` may run different code path for different mode
# therefore startup program only needs to run once
if self._executor is None:
self._executor = fluid.Executor(place)
# XXX incremental initialization
uninitialized = []
for var_py in self._startup_prog.list_vars():
var = fluid.global_scope().find_var(var_py.name)
if not var_py.name.startswith('nccl_id') and var and \
var.get_tensor()._is_initialized():
continue
uninitialized.append(var_py)
if uninitialized:
startup_prog = self._startup_prog._prune(uninitialized)
self._executor.run(startup_prog)
if self._nranks < 2:
compiled_prog = fluid.CompiledProgram(prog)
else:
compiled_prog = prog
self._compiled_progs[mode] = compiled_prog
class DynamicGraphAdapter(object):
def __init__(self, model):
super(DynamicGraphAdapter, self).__init__()
self.model = model
self._nranks = ParallelEnv().nranks
self._local_rank = ParallelEnv().local_rank
self._merge_count = {
'eval_total': 0,
'test_total': 0,
'eval_batch': 0,
'test_batch': 0
}
self._input_info = None
if self._nranks > 1:
stradegy = fluid.dygraph.parallel.ParallelStrategy()
stradegy.nranks = ParallelEnv().nranks
stradegy.local_rank = ParallelEnv().local_rank
stradegy.trainer_endpoints = ParallelEnv().trainer_endpoints
stradegy.current_endpoint = ParallelEnv().current_endpoint
self.ddp_model = fluid.dygraph.parallel.DataParallel(
self.model.network, stradegy)
@property
def mode(self):
return self.model.mode
@mode.setter
def mode(self, value):
self.model.mode = value
# TODO multi device in dygraph mode not implemented at present time
def train_batch(self, inputs, labels=None):
assert self.model._optimizer, \
"model not ready, please call `model.prepare()` first"
self.model.network.train()
self.mode = 'train'
inputs = to_list(inputs)
self._input_info = _update_input_info(inputs)
labels = labels or []
labels = [to_variable(l) for l in to_list(labels)]
if self._nranks > 1:
outputs = self.ddp_model.forward(* [to_variable(x) for x in inputs])
else:
outputs = self.model.network.forward(
* [to_variable(x) for x in inputs])
losses = self.model._loss(*(to_list(outputs) + labels))
losses = to_list(losses)
final_loss = fluid.layers.sum(losses)
final_loss.backward()
self.model._optimizer.minimize(final_loss)
self.model.network.clear_gradients()
metrics = []
for metric in self.model._metrics:
metric_outs = metric.compute(*(to_list(outputs) + labels))
m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)])
metrics.append(m)
return ([to_numpy(l) for l in losses], metrics) \
if len(metrics) > 0 else [to_numpy(l) for l in losses]
def eval_batch(self, inputs, labels=None):
self.model.network.eval()
self.mode = 'eval'
inputs = to_list(inputs)
self._input_info = _update_input_info(inputs)
labels = labels or []
labels = [to_variable(l) for l in to_list(labels)]
outputs = self.model.network.forward(* [to_variable(x) for x in inputs])
if self.model._loss:
losses = self.model._loss(*(to_list(outputs) + labels))
losses = to_list(losses)
if self._nranks > 1:
outputs = [_all_gather(o, self._nranks) for o in to_list(outputs)]
labels = [_all_gather(l, self._nranks) for l in labels]
metrics = []
for metric in self.model._metrics:
# cut off padding value.
if self.model._test_dataloader is not None and self._nranks > 1 \
and isinstance(self.model._test_dataloader, DataLoader):
total_size = len(self.model._test_dataloader.dataset)
samples = outputs[0].shape[0]
current_count = self._merge_count.get(self.mode + '_total', 0)
if current_count + samples >= total_size:
outputs = [
o[:int(total_size - current_count)] for o in outputs
]
labels = [
l[:int(total_size - current_count)] for l in labels
]
self._merge_count[self.mode + '_total'] = 0
self._merge_count[self.mode + '_batch'] = int(total_size -
current_count)
else:
self._merge_count[self.mode + '_total'] += samples
self._merge_count[self.mode + '_batch'] = samples
metric_outs = metric.compute(*(to_list(outputs) + labels))
m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)])
metrics.append(m)
if self.model._loss and len(metrics):
return [to_numpy(l) for l in losses], metrics
elif self.model._loss:
return [to_numpy(l) for l in losses]
else:
return metrics
def predict_batch(self, inputs):
self.model.network.eval()
self.mode = 'test'
inputs = [to_variable(x) for x in to_list(inputs)]
self._input_info = _update_input_info(inputs)
outputs = self.model.network.forward(*inputs)
if self._nranks > 1 and isinstance(self.model._place, fluid.CUDAPlace):
outputs = [_all_gather(o, self._nranks) for o in to_list(outputs)]
return [to_numpy(o) for o in to_list(outputs)]
def parameters(self, *args, **kwargs):
return self.model.network.parameters(*args, **kwargs)
def save(self, path):
params = self.model.network.state_dict()
fluid.save_dygraph(params, path)
if self.model._optimizer is None:
return
if self.model._optimizer.state_dict():
optim = self.model._optimizer.state_dict()
fluid.save_dygraph(optim, path)
def load(self, param_state_pairs, optim_state):
# restore parameter states
for param, state in param_state_pairs:
param.set_value(state)
# resotre optimizer states
if not self.model._optimizer or not optim_state:
return
# If optimizer performs set_state_dict when state vars haven't been created,
# which would happen when set_state_dict before minimize, the state would be
# stored in optimizer._accumulators_holder and loaded lazily.
# To contrive this when loading from static-graph saved states, extend
# state dict to include keys named accoring to dygraph naming rules.
# TODO: if len(self.model._optimizer._accumulators) > 0
converted_state = dict(optim_state)
opt_unq_name = self.model._optimizer._name
if opt_unq_name is None:
opt_unq_name = ''
opt_cls_name = self.model._optimizer.__class__.__name__
opt_name = opt_unq_name[:opt_unq_name.rfind("_")] # remove suffix idx
param_names = [param.name for param in self.model.network.parameters()]
for var_name, state_var in sorted(
optim_state.items(), key=lambda x: len(x[0]), reverse=True):
if var_name in ["@LR_DECAY_COUNTER@", "global_step"]:
# NOTE: dygraph saved global_step is 1 larger than that in
# static-graph, since the time of global_step to increase is
# different.
if var_name == "@LR_DECAY_COUNTER@":
converted_state["global_step"] = np.array(
converted_state.pop("@LR_DECAY_COUNTER@")) + 1
else:
# moment and other accumulators
# extend state dict to include promising dygraph names
for param_name in param_names:
if var_name.startswith(param_name + "_" + opt_name):
# when init optimizer with name
accum_name = var_name[len(param_name + "_" + opt_name +
"_"):]
elif var_name.startswith(param_name +
"_") and opt_name == opt_cls_name:
# when init optimizer without name
accum_name = var_name[len(param_name + "_"):]
else:
continue
# remove suffix idx
accum_name = accum_name[:accum_name.rfind("_")]
# state names always end with "_0" in dygraph because of the
# unique optimizer._name
dy_state_name = (param_name + "_" + opt_unq_name + "_" +
accum_name + "_0")
converted_state[dy_state_name] = state_var
if not hasattr(self.model._optimizer, 'set_state_dict'):
warnings.warn(
"paddle.fluid.optimizer is deprecated in API 2.0, please use paddle.optimizer instead."
)
self.model._optimizer.set_dict(converted_state)
else:
self.model._optimizer.set_state_dict(converted_state)
class Model(object):
"""
An Model object is network with training and inference features.
Dynamic graph and static graph are supported at the same time,
switched by `paddle.enable_static()`. The usage is as follows.
But note, the switching between dynamic and static should be before
instantiating a Model. The input description, i.e, paddle.static.InputSpec,
must be required for static graph.
Args:
network (paddle.nn.Layer): The network is an instance of
paddle.nn.Layer.
inputs (InputSpec|list|dict|None): `inputs`, entry points of network,
could be a InputSpec instance, or lits of InputSpec instances,
or dict ({name: InputSpec}), and it couldn't be None in static
graph.
labels (InputSpec|list|None): `labels`, entry points of network,
could be a InputSpec instnace or lits of InputSpec instances,
or None. For static graph, if labels is required in loss,
labels must be set. Otherwise, it could be None.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Flatten(1),
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
# inputs and labels are not required for dynamic graph.
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy())
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=2, batch_size=32, verbose=1)
"""
def __init__(self, network, inputs=None, labels=None):
self.mode = 'train'
self.network = network
self._inputs = None
self._labels = None
self._loss = None
self._loss_weights = None
self._optimizer = None
self._input_info = None
self._is_shape_inferred = False
self._test_dataloader = None
self.stop_training = False
if not in_dygraph_mode():
if not isinstance(inputs, (list, dict, Input)):
raise TypeError(
"'inputs' must be list or dict, and couldn't be None.")
elif inputs:
self._input_info = _update_input_info(inputs)
self._inputs = self._verify_spec(inputs, is_input=True)
self._labels = self._verify_spec(labels)
# init backend
if fluid.in_dygraph_mode():
dist.init_parallel_env()
self._adapter = DynamicGraphAdapter(self)
else:
self._adapter = StaticGraphAdapter(self)
def train_batch(self, inputs, labels=None):
"""
Run one training step on a batch of data.
Args:
inputs (numpy.ndarray|Tensor|list): Batch of input data. It could
be a numpy array or paddle.Tensor, or a list of arrays or
tensors (in case the model has multiple inputs).
labels (numpy.ndarray|Tensor|list): Batch of labels. It could be
a numpy array or paddle.Tensor, or a list of arrays or tensors
(in case the model has multiple labels). If has no labels,
set None. Default is None.
Returns:
A list of scalar training loss if the model has no metrics,
or a tuple (list of scalar loss, list of metrics) if the model
set metrics.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim, paddle.nn.CrossEntropyLoss())
data = np.random.random(size=(4,784)).astype(np.float32)
label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
loss = model.train_batch([data], [label])
print(loss)
"""
loss = self._adapter.train_batch(inputs, labels)
if fluid.in_dygraph_mode() and self._input_info is None:
self._update_inputs()
return loss
def eval_batch(self, inputs, labels=None):
"""
Run one evaluating step on a batch of data.
Args:
inputs (numpy.ndarray|Tensor|list): Batch of input data. It could
be a numpy array or paddle.Tensor, or a list of arrays or
tensors (in case the model has multiple inputs).
labels (numpy.ndarray|Tensor|list): Batch of labels. It could be
a numpy array or paddle.Tensor, or a list of arrays or tensors
(in case the model has multiple labels). If has no labels,
set None. Default is None.
Returns:
A list of scalar testing loss if the model has no metrics,
or a tuple (list of scalar loss, list of metrics) if the model
set metrics.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim,
paddle.nn.CrossEntropyLoss())
data = np.random.random(size=(4,784)).astype(np.float32)
label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
loss = model.eval_batch([data], [label])
print(loss)
"""
loss = self._adapter.eval_batch(inputs, labels)
if fluid.in_dygraph_mode() and self._input_info is None:
self._update_inputs()
return loss
def predict_batch(self, inputs):
"""
Run one predicting step on a batch of data.
Args:
inputs (numpy.ndarray|Tensor|list): Batch of input data. It could
be a numpy array or paddle.Tensor, or a list of arrays or
tensors (in case the model has multiple inputs).
Returns:
A list of numpy.ndarray of predictions, that is the outputs
of Model forward.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax())
model = paddle.Model(net, input, label)
model.prepare()
data = np.random.random(size=(4,784)).astype(np.float32)
out = model.predict_batch([data])
print(out)
"""
loss = self._adapter.predict_batch(inputs)
if fluid.in_dygraph_mode() and self._input_info is None:
self._update_inputs()
return loss
def save(self, path, training=True):
"""
This function saves parameters, optimizer information or model and
paramters only for inference to path. It depends on the parameter
`training`.
If `training` is set to True, the parameters saved contain all
the trainable Variable, will save to a file with suffix ".pdparams".
The optimizer information contains all the variable used by optimizer.
For Adam optimizer, contains beta1, beta2, momentum etc. All the
information will save to a file with suffix ".pdopt". (If the optimizer
have no variable need to save (like SGD), the fill will not generated).
This function will silently overwrite existing file at the target location.
If `training` is set to False, only inference model will be saved.
Args:
path (str): The file prefix to save model. The format
is 'dirname/file_prefix' or 'file_prefix'. if empty str.
A exception will be raised.
training (bool, optional): Whether to save for training. If not, save
for inference only. Default: True.
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
from paddle.static import InputSpec
class Mnist(nn.Layer):
def __init__(self):
super(Mnist, self).__init__()
self.net = nn.Sequential(
nn.Flatten(1),
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax())
def forward(self, x):
return self.net(x)
dynamic = True # False
# if use static graph, do not set
if not dynamic:
paddle.enable_static()
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(Mnist(), input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim, paddle.nn.CrossEntropyLoss())
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=1, batch_size=32, verbose=0)
model.save('checkpoint/test') # save for training
model.save('inference_model', False) # save for inference
"""
if ParallelEnv().local_rank == 0:
if not training:
self._save_inference_model(path)
else:
self._adapter.save(path)
def load(self, path, skip_mismatch=False, reset_optimizer=False):
"""
Load from files storing the model states and optimizer states. The file
for optimizer states is not necessary if no need to restore the optimizer.
NOTE: parameters are retrieved out from the file storing model states
accoring to their structured names.
For fine-tuning or transfer-learning models where some of the layers have
changed, keep parameters needed to restore have same structured names in
the pre-trained model and fine-tuning model.
Args:
path (str): The prefix of files storing the model states and
optimizer states. The files would be `path.pdparams` and
`path.pdopt` separately, and the latter is not necessary
when no need to restore.
skip_mismatch (bool): Whether to skip the loading of mismatch
parameter or raise an error when mismatch happens (not found
the parameter in file storing model states of or receives a
mismatch shape).
reset_optimizer (bool): If True, ignore the providing file storing
optimizer states and initialize optimizer states from scratch.
Otherwise, restore optimizer states from `path.pdopt` if
a optimizer has been set to the model. Default False.
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu')
input = InputSpec([None, 784], 'float32', 'x')
model = paddle.Model(nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax()), input)
model.save('checkpoint/test')
model.load('checkpoint/test')
"""
def _load_state_from_path(path):
if not os.path.exists(path):
return
with open(path, 'rb') as f:
return pickle.load(f) if six.PY2 else pickle.load(
f, encoding='latin1')
def _check_match(key, param):
state = param_state.get(key, None)
if state is None:
raise ValueError(
"{} is not found in the providing file.".format(key))
if list(state.shape) != list(param.shape):
raise ValueError(
"{} receives a shape {}, but the expected shape is {}.".
format(key, list(state.shape), list(param.shape)))
return param, state
def _strip_postfix(path):
path, ext = os.path.splitext(path)
assert ext in ['', '.pdparams', '.pdopt', '.pdmodel'], \
"Unknown postfix {} from weights".format(ext)
return path
path = _strip_postfix(path)
param_state = _load_state_from_path(path + ".pdparams")
assert param_state, "Failed to load parameters, please check path."
matched_param_state = []
for key, param in self.network.state_dict().items():
try:
match_res = _check_match(key, param)
except ValueError as err:
if skip_mismatch:
warnings.warn(
("Skip loading for {}. ".format(key) + str(err)))
# reset optimizer when mismatch happens
reset_optimizer = True
else:
raise err
matched_param_state.append(match_res)
optim_state = None if reset_optimizer else _load_state_from_path(
path + ".pdopt")
return self._adapter.load(matched_param_state, optim_state)
def parameters(self, *args, **kwargs):
"""
Returns a list of parameters of the model.
Returns:
A list of Parameter in static graph.
A list of ParamBase in dynamic graph.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
input = InputSpec([None, 784], 'float32', 'x')
model = paddle.Model(nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10)), input)
params = model.parameters()
"""
return self._adapter.parameters()
def prepare(self, optimizer=None, loss=None, metrics=None):
"""
Configures the model before runing.
Args:
optimizer (Optimizer|None): Optimizer must be set in training
and should be a Optimizer instance. It can be None in eval
and test mode.
loss (Loss|callable function|None): Loss function can
be a `paddle.nn.Layer` instance or any callable function
taken the predicted values and ground truth values as input.
It can be None when there is no loss.
metrics (Metric|list of Metric|None): If metrics is set, all
metrics will be calculated and output in train/eval mode.
Returns:
None
"""
self._place = _get_device()
if isinstance(self._place, fluid.CUDAPlace):
global _parallel_context_initialized
if ParallelEnv().nranks > 1 and not _parallel_context_initialized:
if fluid.in_dygraph_mode():
main_prog_seed = fluid.default_main_program().random_seed
startup_prog_seed = fluid.default_startup_program(
).random_seed
fluid.disable_dygraph()
paddle.disable_static(self._place)
# enable_dygraph would create and switch to a new program,
# thus also copy seed to the new program
fluid.default_main_program().random_seed = main_prog_seed
fluid.default_startup_program(
).random_seed = startup_prog_seed
else:
prepare_distributed_context(self._place)
_parallel_context_initialized = True
self._optimizer = optimizer
if loss is not None:
if not isinstance(loss, paddle.nn.Layer) and not callable(loss):
raise TypeError("'loss' must be sub classes of " \
"`paddle.nn.Layer` or any callable function.")
self._loss = loss
metrics = metrics or []
for metric in to_list(metrics):
assert isinstance(metric, Metric), \
"{} is not sub class of Metric".format(
metric.__class__.__name__)
self._metrics = to_list(metrics)
if not in_dygraph_mode():
self._adapter.prepare()
def fit(
self,
train_data=None,
eval_data=None,
batch_size=1,
epochs=1,
eval_freq=1,
log_freq=10,
save_dir=None,
save_freq=1,
verbose=2,
drop_last=False,
shuffle=True,
num_workers=0,
callbacks=None, ):
"""
Trains the model for a fixed number of epochs. If `eval_data` is set,
evaluation will be done at the end of each epoch.
Args:
train_data (Dataset|DataLoader): An iterable data loader is used for
train. An instance of paddle paddle.io.Dataset or
paddle.io.Dataloader is recomended. Default: None.
eval_data (Dataset|DataLoader): An iterable data loader is used for
evaluation at the end of epoch. If None, will not do evaluation.
An instance of paddle.io.Dataset or paddle.io.Dataloader
is recomended. Default: None.
batch_size (int): Integer number. The batch size of train_data
and eval_data. When train_data and eval_data are both the
instance of Dataloader, this parameter will be ignored.
Default: 1.
epochs (int): Integer number. The number of epochs to train
the model. Default: 1.
eval_freq (int): The frequency, in number of epochs, an evalutation
is performed. Default: 1.
log_freq (int): The frequency, in number of steps, the training logs
are printed. Default: 10.
save_dir(str|None): The directory to save checkpoint during training.
If None, will not save checkpoint. Default: None.
save_freq (int): The frequency, in number of epochs, to save
checkpoint. Default: 1.
verbose (int): The verbosity mode, should be 0, 1, or 2. 0 = silent,
1 = progress bar, 2 = one line per epoch. Default: 2.
drop_last (bool): Whether drop the last incomplete batch of
train_data when dataset size is not divisible by the batch size.
When train_data is an instance of Dataloader, this parameter
will be ignored. Default: False.
shuffle (bool): Whther to shuffle train_data. When train_data is
an instance of Dataloader, this parameter will be ignored.
Default: True.
num_workers (int): The number of subprocess to load data, 0 for no
subprocess used and loading data in main process.
When train_data and eval_data are both the instance of
Dataloader, this parameter will be ignored. Default: 0.
callbacks (Callback|None): A list of `Callback` instances to apply
during training. If None, `ProgBarLogger` and `ModelCheckpoint`
are automatically inserted. Default: None.
Returns:
None
Examples:
1. An example use Dataset and set btch size, shuffle in fit.
How to make a batch is done internally.
.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
dynamic = True
if not dynamic:
paddle.enable_static()
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(
paddle.vision.models.LeNet(),
input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 2)))
model.fit(train_dataset,
val_dataset,
epochs=2,
batch_size=64,
save_dir='mnist_checkpoint')
2. An example use DataLoader, batch size and shuffle is set in
DataLoader.
.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
dynamic = True
if not dynamic:
paddle.enable_static()
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
train_loader = paddle.io.DataLoader(train_dataset,
batch_size=64)
val_dataset = MNIST(mode='test', transform=transform)
val_loader = paddle.io.DataLoader(val_dataset,
batch_size=64)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(
paddle.vision.models.LeNet(), input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 2)))
model.fit(train_loader,
val_loader,
epochs=2,
save_dir='mnist_checkpoint')
"""
assert train_data is not None, \
"train_data must be given!"
if isinstance(train_data, Dataset):
train_sampler = DistributedBatchSampler(
train_data,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
train_loader = DataLoader(
train_data,
batch_sampler=train_sampler,
places=self._place,
num_workers=num_workers,
return_list=True)
else:
train_loader = train_data
if eval_data is not None and isinstance(eval_data, Dataset):
eval_sampler = DistributedBatchSampler(
eval_data, batch_size=batch_size)
eval_loader = DataLoader(
eval_data,
batch_sampler=eval_sampler,
places=self._place,
num_workers=num_workers,
return_list=True)
elif eval_data is not None:
eval_loader = eval_data
else:
eval_loader = None
do_eval = eval_loader is not None
self._test_dataloader = eval_loader
steps = self._len_data_loader(train_loader)
cbks = config_callbacks(
callbacks,
model=self,
epochs=epochs,
steps=steps,
log_freq=log_freq,
save_freq=save_freq,
save_dir=save_dir,
verbose=verbose,
metrics=self._metrics_name(), )
if any(isinstance(k, EarlyStopping) for k in cbks) and not do_eval:
warnings.warn("EarlyStopping needs validation data.")
cbks.on_begin('train')
for epoch in range(epochs):
cbks.on_epoch_begin(epoch)
logs = self._run_one_epoch(train_loader, cbks, 'train')
cbks.on_epoch_end(epoch, logs)
if do_eval and epoch % eval_freq == 0:
eval_steps = self._len_data_loader(eval_loader)
cbks.on_begin('eval', {
'steps': eval_steps,
'metrics': self._metrics_name()
})
eval_logs = self._run_one_epoch(eval_loader, cbks, 'eval')
cbks.on_end('eval', eval_logs)
if self.stop_training:
break
cbks.on_end('train', logs)
self._test_dataloader = None
def evaluate(
self,
eval_data,
batch_size=1,
log_freq=10,
verbose=2,
num_workers=0,
callbacks=None, ):
"""
Evaluate the loss and metrics of the model on input dataset.
Args:
eval_data (Dataset|DataLoader): An iterable data loader is used for
evaluation. An instance of paddle.io.Dataset or
paddle.io.Dataloader is recomended.
batch_size (int): Integer number. The batch size of train_data
and eval_data. When eval_data is the instance of Dataloader,
this argument will be ignored. Default: 1.
log_freq (int): The frequency, in number of steps, the eval logs
are printed. Default: 10.
verbose (int): The verbosity mode, should be 0, 1, or 2. 0 = silent,
1 = progress bar, 2 = one line per epoch. Default: 2.
num_workers (int): The number of subprocess to load data,
0 for no subprocess used and loading data in main process. When
train_data and eval_data are both the instance of Dataloader,
this parameter will be ignored. Default: 0.
callbacks (Callback|None): A list of `Callback` instances to apply
during training. If None, `ProgBarLogger` and `ModelCheckpoint`
are automatically inserted. Default: None.
Returns:
dict: Result of metric. The key is the names of Metric,
value is a scalar or numpy.array.
Examples:
.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.static import InputSpec
# declarative mode
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
val_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(paddle.vision.models.LeNet(), input, label)
model.prepare(metrics=paddle.metric.Accuracy())
result = model.evaluate(val_dataset, batch_size=64)
print(result)
"""
if eval_data is not None and isinstance(eval_data, Dataset):
eval_sampler = DistributedBatchSampler(
eval_data, batch_size=batch_size)
eval_loader = DataLoader(
eval_data,
batch_sampler=eval_sampler,
places=self._place,
num_workers=num_workers,
return_list=True)
else:
eval_loader = eval_data
self._test_dataloader = eval_loader
cbks = config_callbacks(
callbacks,
model=self,
log_freq=log_freq,
verbose=verbose,
metrics=self._metrics_name(), )
eval_steps = self._len_data_loader(eval_loader)
cbks.on_begin('eval',
{'steps': eval_steps,
'metrics': self._metrics_name()})
logs = self._run_one_epoch(eval_loader, cbks, 'eval')
cbks.on_end('eval', logs)
self._test_dataloader = None
eval_result = {}
for k in self._metrics_name():
eval_result[k] = logs[k]
return eval_result
def predict(self,
test_data,
batch_size=1,
num_workers=0,
stack_outputs=False,
callbacks=None):
"""
Compute the output predictions on testing data.
Args:
test_data (Dataset|DataLoader): An iterable data loader is used for
predict. An instance of paddle.io.Dataset or paddle.io.Dataloader
is recomended.
batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this
argument will be ignored. Default: 1.
num_workers (int): The number of subprocess to load data, 0 for no subprocess
used and loading data in main process. When train_data and eval_data are
both the instance of Dataloader, this argument will be ignored. Default: 0.
stack_outputs (bool): Whether stack output field like a batch, as for an output
filed of a sample is in shape [X, Y], test_data contains N samples, predict
output field will be in shape [N, X, Y] if stack_output is True, and will
be a length N list in shape [[X, Y], [X, Y], ....[X, Y]] if stack_outputs
is False. stack_outputs as False is used for LoDTensor output situation,
it is recommended set as True if outputs contains no LoDTensor. Default: False.
callbacks(Callback): A Callback instance, default None.
Returns:
list: output of models.
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.static import InputSpec
class MnistDataset(paddle.vision.datasets.MNIST):
def __init__(self, mode, return_label=True):
super(MnistDataset, self).__init__(mode=mode)
self.return_label = return_label
def __getitem__(self, idx):
img = np.reshape(self.images[idx], [1, 28, 28])
if self.return_label:
return img, np.array(self.labels[idx]).astype('int64')
return img,
def __len__(self):
return len(self.images)
test_dataset = MnistDataset(mode='test', return_label=False)
# imperative mode
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
model = paddle.Model(paddle.vision.models.LeNet(), input)
model.prepare()
result = model.predict(test_dataset, batch_size=64)
print(len(result[0]), result[0][0].shape)
# declarative mode
device = paddle.set_device('cpu')
paddle.enable_static()
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
model = paddle.Model(paddle.vision.models.LeNet(), input)
model.prepare()
result = model.predict(test_dataset, batch_size=64)
print(len(result[0]), result[0][0].shape)
"""
if test_data is not None and isinstance(test_data, Dataset):
test_sampler = DistributedBatchSampler(
test_data, batch_size=batch_size)
test_loader = DataLoader(
test_data,
batch_sampler=test_sampler,
places=self._place,
num_workers=num_workers,
return_list=True)
else:
test_loader = test_data
self._test_dataloader = test_loader
cbks = config_callbacks(callbacks, model=self, verbose=1)
test_steps = self._len_data_loader(test_loader)
logs = {'steps': test_steps}
cbks.on_begin('predict', logs)
outputs = []
logs, outputs = self._run_one_epoch(test_loader, cbks, 'predict')
outputs = list(zip(*outputs))
# NOTE: for lod tensor output, we should not stack outputs
# for stacking may lose its detail info
if stack_outputs:
outputs = [np.vstack(outs) for outs in outputs]
self._test_dataloader = None
cbks.on_end('predict', logs)
return outputs
def _save_inference_model(self, path):
"""
Save inference model can be used in static or dynamic mode.
Args:
path (str): The path prefix to save model. The format is
``dirname/file_prefix`` or ``file_prefix``.
Returns:
None
"""
if fluid.in_dygraph_mode():
with fluid.framework._dygraph_guard(None):
layer = self.network
if self._input_info is None: # No provided or inferred
raise RuntimeError(
"Saving inference model needs 'inputs' or running before saving. Please specify 'inputs' in Model initialization or input training data and perform a training for shape derivation."
)
if self._is_shape_inferred:
warnings.warn(
"'inputs' was not specified when Model initialization, so the input shape to be saved will be the shape derived from the user's actual inputs. The input shape to be saved is %s. For saving correct input shapes, please provide 'inputs' for Model initialization."
% self._input_info[0])
paddle.jit.save(layer, path, input_spec=self._inputs)
else:
# path check
file_prefix = os.path.basename(path)
if file_prefix == "":
raise ValueError(
"The input path MUST be format of dirname/file_prefix "
"[dirname\\file_prefix in Windows system], but received "
"file_prefix is empty string.")
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
model_path = dirname
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
prog = self._adapter._progs.get('test', None)
assert prog, \
"Model is not ready, please call `model.prepare()` first"
infer_prog = prog.clone(for_test=True)
input_names = [v.name for v in self._adapter._input_vars['test']]
endpoints = self._adapter._endpoints['test']['output']
fluid.io.save_inference_model(
model_path,
input_names,
endpoints,
self._adapter._executor,
main_program=infer_prog,
model_filename=model_filename,
params_filename=params_filename)
def _run_one_epoch(self, data_loader, callbacks, mode, logs={}):
outputs = []
for step, data in enumerate(data_loader):
# data might come from different types of data_loader and have
# different format, as following:
# 1. DataLoader in static graph:
# [[input1, input2, ..., label1, lable2, ...]]
# 2. DataLoader in dygraph
# [input1, input2, ..., label1, lable2, ...]
# 3. custumed iterator yield concated inputs and labels:
# [input1, input2, ..., label1, lable2, ...]
# 4. custumed iterator yield seperated inputs and labels:
# ([input1, input2, ...], [label1, lable2, ...])
# To handle all of these, flatten (nested) list to list.
data = flatten(data)
# LoDTensor.shape is callable, where LoDTensor comes from
# DataLoader in static graph
batch_size = data[0].shape()[0] if callable(data[
0].shape) else data[0].shape[0]
callbacks.on_batch_begin(mode, step, logs)
if mode != 'predict':
outs = getattr(self, mode + '_batch')(data[:len(self._inputs)],
data[len(self._inputs):])
if self._metrics and self._loss:
metrics = [[l[0] for l in outs[0]]]
elif self._loss:
metrics = [[l[0] for l in outs]]
else:
metrics = []
# metrics
for metric in self._metrics:
res = metric.accumulate()
metrics.extend(to_list(res))
assert len(self._metrics_name()) == len(metrics)
for k, v in zip(self._metrics_name(), metrics):
logs[k] = v
else:
if self._inputs is not None:
outs = self.predict_batch(data[:len(self._inputs)])
else:
outs = self.predict_batch(data)
outputs.append(outs)
logs['step'] = step
if mode == 'train' or self._adapter._merge_count.get(
mode + '_batch', 0) <= 0:
logs['batch_size'] = batch_size * ParallelEnv().nranks
else:
logs['batch_size'] = self._adapter._merge_count[mode + '_batch']
callbacks.on_batch_end(mode, step, logs)
self._reset_metrics()
if mode == 'predict':
return logs, outputs
return logs
def summary(self, input_size=None, dtype=None):
"""Prints a string summary of the network.
Args:
input_size (tuple|InputSpec|list[tuple|InputSpec], optional): size of input tensor.
if not set, input_size will get from ``self._inputs`` if network only have
one input, input_size can be tuple or InputSpec. if model have multiple
input, input_size must be a list which contain every input's shape.
Default: None.
dtypes (str, optional): if dtypes is None, 'float32' will be used, Default: None.
Returns:
Dict: a summary of the network including total params and total trainable params.
Examples:
.. code-block:: python
import paddle
from paddle.static import InputSpec
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(paddle.vision.LeNet(),
input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss())
params_info = model.summary()
print(params_info)
"""
assert (input_size is not None or self._inputs is not None
), "'input_size' or 'self._input' must be set"
if input_size is not None:
_input_size = input_size
else:
_input_size = self._inputs
return summary(self.network, _input_size, dtype)
def _verify_spec(self, specs, shapes=None, dtypes=None, is_input=False):
out_specs = []
if specs is None:
# Note(Aurelius84): If not specific specs of `Input`, using argument names of `forward` function
# to generate `Input`. But how can we know the actual shape of each input tensor?
if is_input:
arg_names = extract_args(self.network.forward)[1:]
# While Saving inference model in dygraph, and providing inputs only in running.
if shapes is not None and dtypes is not None and fluid.in_dygraph_mode(
):
out_specs = [
Input(
name=n, dtype=dtypes[i], shape=shapes[i])
for i, n in enumerate(arg_names)
]
else:
out_specs = [Input(name=n, shape=[None]) for n in arg_names]
else:
out_specs = to_list(specs)
elif isinstance(specs, dict):
assert is_input == False
out_specs = [specs[n] \
for n in extract_args(self.network.forward) if n != 'self']
else:
out_specs = to_list(specs)
# Note: checks each element has specificed `name`.
if out_specs is not None:
for i, spec in enumerate(out_specs):
assert isinstance(spec, Input)
if spec.name is None:
raise ValueError(
"Requires Input[{}].name != None, but receive `None` with {}."
.format(i, spec))
return out_specs
def _reset_metrics(self):
for metric in self._metrics:
metric.reset()
def _metrics_name(self):
metrics_name = ['loss'] if self._loss else []
for m in self._metrics:
metrics_name.extend(to_list(m.name()))
return metrics_name
def _len_data_loader(self, data_loader):
try:
steps = len(data_loader)
except Exception:
steps = None
return steps
def _update_inputs(self):
"Update self._inputs according to given inputs."
self._input_info = self._adapter._input_info
if self._input_info is not None and len(self._input_info) == 2:
self._inputs = self._verify_spec(None, self._input_info[0],
self._input_info[1], True)
self._is_shape_inferred = True
| 39.513903 | 285 | 0.561588 |
34bd926193a5b7aee6b96744d2b5571d1d11cff8 | 701 | py | Python | src/nti/fakestatsd/__init__.py | NextThought/nti.fakestatsd | 99bce0f7819af62d0cfa8da4617299c5f41ce7c7 | [
"Apache-2.0"
] | 2 | 2018-10-12T21:49:22.000Z | 2019-09-03T19:23:54.000Z | src/nti/fakestatsd/__init__.py | NextThought/nti.fakestatsd | 99bce0f7819af62d0cfa8da4617299c5f41ce7c7 | [
"Apache-2.0"
] | 16 | 2018-10-12T21:56:30.000Z | 2019-09-03T18:35:14.000Z | src/nti/fakestatsd/__init__.py | NextThought/nti.fakestatsd | 99bce0f7819af62d0cfa8da4617299c5f41ce7c7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
__docformat__ = "restructuredtext en"
__all__ = [
'FakeStatsDClient',
'Metric',
'METRIC_COUNTER_KIND',
'METRIC_GAUGE_KIND',
'METRIC_SET_KIND',
'METRIC_TIMER_KIND',
]
from perfmetrics.testing import Observation as Metric
from perfmetrics.testing import OBSERVATION_KIND_COUNTER as METRIC_COUNTER_KIND
from perfmetrics.testing import OBSERVATION_KIND_GAUGE as METRIC_GAUGE_KIND
from perfmetrics.testing import OBSERVATION_KIND_SET as METRIC_SET_KIND
from perfmetrics.testing import OBSERVATION_KIND_TIMER as METRIC_TIMER_KIND
from .client import FakeStatsDClient
| 29.208333 | 79 | 0.805991 |
8e6e2b1f73e2e8bebd72e6a162ad32ab110a242c | 3,091 | py | Python | data/p2DJ/New/program/qiskit/simulator/startQiskit181.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/simulator/startQiskit181.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/simulator/startQiskit181.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=13
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.h(input_qubit[0]) # number=10
prog.cz(input_qubit[1],input_qubit[0]) # number=11
prog.h(input_qubit[0]) # number=12
prog.x(input_qubit[0]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=9
prog.x(input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit181.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.1 | 82 | 0.622452 |
a0940e17cb92ce8e411b43c39ac970b74fa81ba6 | 11,111 | py | Python | core/lnp.py | thurin/python-lnp | 6b2ba77940a5d95311e3ac154ebc8c59f4c43866 | [
"0BSD"
] | 48 | 2017-11-24T00:30:16.000Z | 2022-03-27T20:59:20.000Z | core/lnp.py | thurin/python-lnp | 6b2ba77940a5d95311e3ac154ebc8c59f4c43866 | [
"0BSD"
] | 171 | 2020-04-04T14:33:06.000Z | 2021-05-13T11:06:32.000Z | core/lnp.py | thurin/python-lnp | 6b2ba77940a5d95311e3ac154ebc8c59f4c43866 | [
"0BSD"
] | 8 | 2020-03-03T06:00:10.000Z | 2021-12-16T12:23:51.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""PyLNP main library."""
from __future__ import print_function, unicode_literals, absolute_import
import sys
import os
from . import log
from .json_config import JSONConfiguration
VERSION = '0.14a'
# pylint:disable=too-many-instance-attributes
class UI(object):
"""
Specifies the interface required by the core PyLNP library for communicating
with the user. Provided for reference; UIs do not need to inherit from this.
"""
def start(self):
"""Notifies the UI to start. On return, PyLNP will terminate."""
pass
def on_update_available(self):
"""
Called when an update is available. Use this to show a notification
and prompt the user for further action.
"""
pass
def on_program_running(self, path, is_df):
"""
Called when attempting to launch a program that is already running.
<path> provides the path to the program that is being launched, so you
can request a forced launch.
<is_df> specifies if the program is DF (True) or a utility (False).
"""
pass
def on_invalid_config(self, errors):
"""
Called before running DF if an invalid configuration is detected.
<errors> contains a list of discovered problems, which should be shown
to the user.
A true return value will launch DF anyway; a false return value cancels.
"""
pass
def on_request_update_permission(self, interval):
"""
Called when PyLNP.json specifies a desired update interval but the
user configuration does not hold a value for this.
<interval> contains the number of days between update checks.
A true return value will change the configuration to use the specified
interval. A false return value will turn off automatic update checks.
"""
pass
def on_query_migration(self):
"""
When no saves are detected, this function will be called.
This should provide the user with an option to import a previous
DF install or starter pack into the newly selected DF version.
"""
pass
lnp = None
class PyLNP(object):
"""
PyLNP library class.
Acts as an abstraction layer between the UI and the Dwarf Fortress
instance.
"""
def __init__(self):
"""Constructor for the PyLNP library."""
# pylint:disable=global-statement
global lnp
lnp = self
self.args = self.parse_commandline()
self.BASEDIR = '.'
if sys.platform == 'win32':
self.os = 'win'
elif sys.platform.startswith('linux'):
self.os = 'linux'
elif sys.platform == 'darwin':
self.os = 'osx'
self.bundle = ''
if hasattr(sys, 'frozen'):
self.bundle = self.os
os.chdir(os.path.dirname(sys.executable))
if self.bundle == 'osx':
# OS X bundles start in different directory
os.chdir('../../..')
else:
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
from . import update
self.folders = []
self.df_info = None
self.settings = None
self.running = {}
self.autorun = []
self.updater = None
self.config = None
self.userconfig = None
self.ui = None
self.initialize_program()
self.initialize_df()
self.new_version = None
self.initialize_ui()
update.check_update()
from . import paths
save_dir = paths.get('save')
saves_exist = os.path.isdir(save_dir) and os.listdir(save_dir)
if paths.get('df') and not saves_exist:
self.ui.on_query_migration()
self.ui.start()
def initialize_program(self):
"""Initializes the main program (errorlog, path registration, etc.)."""
from . import paths, utilities, errorlog
self.BASEDIR = '.'
self.detect_basedir()
paths.clear()
paths.register('root', self.BASEDIR)
errorlog.start()
paths.register('lnp', self.BASEDIR, 'LNP')
if not os.path.isdir(paths.get('lnp')):
log.w('LNP folder is missing!')
paths.register('keybinds', paths.get('lnp'), 'Keybinds')
paths.register('graphics', paths.get('lnp'), 'Graphics')
paths.register('utilities', paths.get('lnp'), 'Utilities')
paths.register('colors', paths.get('lnp'), 'Colors')
paths.register('embarks', paths.get('lnp'), 'Embarks')
paths.register('tilesets', paths.get('lnp'), 'Tilesets')
paths.register('baselines', paths.get('lnp'), 'Baselines')
paths.register('mods', paths.get('lnp'), 'Mods')
config_file = 'PyLNP.json'
if os.access(paths.get('lnp', 'PyLNP.json'), os.F_OK):
config_file = paths.get('lnp', 'PyLNP.json')
default_config = {
"folders": [
["Savegame folder", "<df>/data/save"],
["Utilities folder", "LNP/Utilities"],
["Graphics folder", "LNP/Graphics"],
["-", "-"],
["Main folder", ""],
["LNP folder", "LNP"],
["Dwarf Fortress folder", "<df>"],
["Init folder", "<df>/data/init"]
],
"links": [
["DF Homepage", "http://www.bay12games.com/dwarves/"],
["DF Wiki", "http://dwarffortresswiki.org/"],
["DF Forums", "http://www.bay12forums.com/smf/"]
],
"to_import": [
['text_prepend', '<df>/gamelog.txt'],
['text_prepend', '<df>/ss_fix.log'],
['text_prepend', '<df>/dfhack.history'],
['copy_add', '<df>/data/save'],
['copy_add', '<df>/soundsense',
'LNP/Utilities/Soundsense/packs'],
['copy_add', 'LNP/Utilities/Soundsense/packs'],
['copy_add', 'User Generated Content']
],
"hideUtilityPath": False,
"hideUtilityExt": False,
"updates": {
"updateMethod": ""
}
}
self.config = JSONConfiguration(config_file, default_config)
self.userconfig = JSONConfiguration('PyLNP.user')
self.autorun = []
utilities.load_autorun()
if self.args.terminal_test_parent:
from . import terminal
errorlog.stop()
sys.exit(terminal.terminal_test_parent(
self.args.terminal_test_parent[0]))
if self.args.terminal_test_child:
from . import terminal
errorlog.stop()
sys.exit(terminal.terminal_test_child(
self.args.terminal_test_child[0]))
def initialize_df(self):
"""Initializes the DF folder and related variables."""
from . import df
self.df_info = None
self.folders = []
self.settings = None
df.find_df_folder()
def initialize_ui(self):
"""Instantiates the UI object."""
from tkgui.tkgui import TkGui
self.ui = TkGui()
def reload_program(self):
"""Reloads the program to allow the user to change DF folders."""
self.args.df_folder = None
self.initialize_program()
self.initialize_df()
self.initialize_ui()
self.ui.start()
def parse_commandline(self):
"""Parses and acts on command line options."""
args = self.get_commandline_args()
if args.debug == 1:
log.set_level(log.DEBUG)
elif args.debug is not None and args.debug > 1:
log.set_level(log.VERBOSE)
if args.release_prep:
args.raw_lint = True
log.d(args)
return args
@staticmethod
def get_commandline_args():
"""Responsible for the actual parsing of command line options."""
import argparse
parser = argparse.ArgumentParser(
description="PyLNP " +VERSION)
parser.add_argument(
'-d', '--debug', action='count',
help='Turn on debugging output (use twice for extra verbosity)')
parser.add_argument(
'--raw-lint', action='store_true',
help='Verify contents of raw files and exit')
parser.add_argument(
'df_folder', nargs='?',
help='Dwarf Fortress folder to use (if it exists)')
parser.add_argument(
'--version', action='version', version="PyLNP "+VERSION)
parser.add_argument(
'--df-executable', action='store',
help='Override DF/DFHack executable name')
parser.add_argument(
'--release-prep', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument(
'--terminal-test-parent', nargs=1,
help=argparse.SUPPRESS)
parser.add_argument(
'--terminal-test-child', nargs=1,
help=argparse.SUPPRESS)
return parser.parse_known_args()[0]
def save_config(self):
"""Saves LNP configuration."""
self.userconfig.save_data()
def macos_check_translocated(self):
"""Verify that macOS isn't isolating our application."""
assert self.os == 'osx'
if '/AppTranslocation/' in sys.executable:
try:
import tkinter.messagebox as messagebox
except ImportError:
import tkMessageBox as messagebox
messagebox.showinfo(
'Cannot launch PyLNP',
'PyLNP cannot be run from a disk image or from the Downloads '
'folder. Please copy the PyLNP app and its other Dwarf '
'Fortress files elsewhere, such as to the Applications folder.')
def detect_basedir(self):
"""Detects the location of Dwarf Fortress by walking up the directory
tree."""
prev_path = '.'
from . import df
try:
while os.path.abspath(self.BASEDIR) != prev_path:
df.find_df_folders()
if len(self.folders) != 0:
return
# pylint:disable=redefined-variable-type
prev_path = os.path.abspath(self.BASEDIR)
self.BASEDIR = os.path.join(self.BASEDIR, '..')
except UnicodeDecodeError:
# This seems to no longer be an issue, but leaving in the check
# just in case
log.e(
"PyLNP is being stored in a path containing non-ASCII "
"characters, and cannot continue. Folder names may only use "
"the characters A-Z, 0-9, and basic punctuation.\n"
"Alternatively, you may run PyLNP from source using Python 3.")
sys.exit(1)
log.e("Could not find any Dwarf Fortress installations.")
if self.os == 'osx':
self.macos_check_translocated()
sys.exit(2)
# vim:expandtab
| 35.498403 | 80 | 0.570606 |
55b7967044e4cdb55b59ae705f9ebefdeca00d1c | 1,321 | py | Python | reversing/cracking8.py | brerodrigues/exploit_drafts | 643bca8d13b44f99205fe614beda59c040a431c2 | [
"MIT"
] | 1 | 2022-01-01T20:33:44.000Z | 2022-01-01T20:33:44.000Z | reversing/cracking8.py | brerodrigues/exploit_drafts | 643bca8d13b44f99205fe614beda59c040a431c2 | [
"MIT"
] | null | null | null | reversing/cracking8.py | brerodrigues/exploit_drafts | 643bca8d13b44f99205fe614beda59c040a431c2 | [
"MIT"
] | null | null | null | #8teaching
def hex_list_to_str(list_hex):
string_key = ''
for hex_value in list_hex:
string_key = string_key + chr(int(hex_value, 16))
return string_key
def crack_key(list_hex):
key = []
# magic xor
for idx, hex_value in enumerate(list_hex):
decisions = (idx - (idx >> 0x1f) and 1) + (idx >> 0x1f) # this is wrong
if decisions == 0 or (decisions == 1 and (idx % 2 == 0)): # this aditional 'or' fix the wrong, but without really understand why :(
key_value = (hex_value^0xc7)
elif decisions == 1:
key_value = (hex_value^0x81)
else:
print('should never happen')
other_decisions = idx % 3
if other_decisions == 2:
key_value = hex(key_value ^0xe4)
elif other_decisions == 0:
key_value = hex(key_value ^0x15)
elif other_decisions == 1:
key_value = hex(key_value ^0x9d)
else:
print('should neve happern')
key.append(key_value)
return key
encoded_key = [0xb5,0x77,0x4a,0xe6,0x35,0x16,0xb9,0x77,0x56,0xf3,0x3c,0x04,0xb5,0x6a,0x5b,0xff]
cracked_key_hex = crack_key(encoded_key)
decoded_key = hex_list_to_str(cracked_key_hex)
print (decoded_key[::-1]) # revert the string to get the right key
| 30.72093 | 139 | 0.606359 |
184b931c89d12ab44e32d824084ec8049acc4b3d | 13,238 | py | Python | instauto/api/structs.py | Samu1808/instauto | bbb402d6df9880352643ffef7d2122160a5d8b51 | [
"MIT"
] | null | null | null | instauto/api/structs.py | Samu1808/instauto | bbb402d6df9880352643ffef7d2122160a5d8b51 | [
"MIT"
] | null | null | null | instauto/api/structs.py | Samu1808/instauto | bbb402d6df9880352643ffef7d2122160a5d8b51 | [
"MIT"
] | null | null | null | from typing import Tuple, Union, Callable, List
import time
import enum
import pprint
import random
from dataclasses import dataclass
from .constants import (DEFAULT_SIGNATURE_KEY, DEFAULT_HTTP_ENGINE, DEFAULT_IG_CAPABILITIES, DEFAULT_APP_ID,
DEFAULT_IG_VERSION, DEFAULT_BUILD_NUMBER, DEFAULT_ANDROID_RELEASE, DEFAULT_ANDROID_SDK,
DEFAULT_CHIPSET, DEFAULT_DEVICE, DEFAULT_DPI, DEFAULT_MANUFACTURER, DEFAULT_MODEL,
DEFAULT_RESOLUTION, DEFAULT_SIGNATURE_KEY_V, DEFAULT_ACCEPT, DEFAULT_ACCEPT_ENCODING,
DEFAULT_ACCEPT_LANGUAGE, DEFAULT_APP_STARTUP_COUNTRY,
DEFAULT_BANDWIDTH_TOTALBYTES_B, DEFAULT_BANDWIDTH_TOTALTIME_MS, DEFAULT_RUR, DEFAULT_WWW_CLAIM,
DEFAULT_AUTHORIZATION, DEFAULT_CONNECTION_TYPE, DEFAULT_APP_LOCALE, DEFAULT_DEVICE_LOCALE,
DEFAULT_ADS_OPT_OUT, DEFAULT_BLOKS_VERSION_ID, DEFAULT_BLOKS_IS_LAYOUT_RTL)
#: Struct that is used to specify which HTTP method to use
class Method(enum.Enum):
GET = 1
POST = 2
class ChallengeChoice(enum.Enum):
phone = 0
email = 1
class WhichGender(enum.Enum):
male = 1
female = 2
prefer_not_to_say = 3
other = 4
class Surface(enum.Enum):
profile = 'following_sheet'
following_list = 'self_unified_follow_lists'
follow_list = 'follow_list_page'
follow_requests = 'follow_requests'
#: Struct that is used to specify where a post should be posted
class PostLocation(enum.Enum):
Story = 4
Feed = 3
class IGProfile:
"""Holds all data that is generated by Instagram. For pretty much every request, at least one of the attributes
is used.
Attributes
----------
signature_key : str, DEPRECATED
Key generated by Instagram to sign post requests. Can be extracted from the app. Currently, the actual
signature key is no longer used for signing actual requests.
signature_key : str
The version of the signature key. This key is still sent along with signed requests. Could probably work
without. TODO: check if we still need to send this along with signed requests / if we have to use the signed
request format at all
http_engine : str,
Facebook uses a custom HTTP engine, called Liger. This is unlikely to change.
capabilities: str,
Not sure what this means on Instagram's side, but it needs to get sent along with all requests. Can change
overtime. Can be extracted from all requests to the 'logging_client_events' endpoint.
id : str,
The app id, presumably a constant.
version : str,
The version number of the version of instagram to use.
build_number : str,
The build number associated with the version number
"""
def __init__(self, signature_key: str = None, signature_key_version: str = None, http_engine: str = None, \
capabilities: str = None, id: str = None, version: str = None, build_number: str = None):
self.signature_key = signature_key or DEFAULT_SIGNATURE_KEY
self.signature_key_version = signature_key_version or DEFAULT_SIGNATURE_KEY_V
self.http_engine = http_engine or DEFAULT_HTTP_ENGINE
self.capabilities = capabilities or DEFAULT_IG_CAPABILITIES
self.id = id or DEFAULT_APP_ID
self.version = version or DEFAULT_IG_VERSION
self.build_number = build_number or DEFAULT_BUILD_NUMBER
class DeviceProfile:
"""Holds all data about the android 'phone' that we simulate using.
Attributes
----------
manufacturer : str,
The phone manufacturer
android_sdk_version : str,
The Android sdk version that is, presumably, used by the Instagram app.
android_release : str,
The version of Android that the phone runs on.
device : str,
The version name of the phone
model : str,
The codename from Samsung under which the phone was build, i.e. for the Galaxy S10E, beyond1.
dpi : str,
The DPI of the phone used.
resolution : tuple[int, int],
The resolution of the phone.
chipset:
The chipset that the phone runs on.
"""
def __init__(self, manufacturer: str = None, android_sdk_version: str = None, android_release: str = None,
device: str = None, model: str = None, dpi: int = None, resolution: Tuple[int] = None, chipset: str
= None):
self.manufacturer = manufacturer or DEFAULT_MANUFACTURER
self.android_sdk_version = android_sdk_version or DEFAULT_ANDROID_SDK
self.android_release = android_release or DEFAULT_ANDROID_RELEASE
self.device = device or DEFAULT_DEVICE
self.model = model or DEFAULT_MODEL
self.dpi = dpi or DEFAULT_DPI
self.resolution = resolution or DEFAULT_RESOLUTION
self.chipset = chipset or DEFAULT_CHIPSET
class State:
"""Structure that holds a lot of data about the state of a session. It contains mainly header values that need to be
send along with requests to the API.
Attributes
----------
www_claim : str,
Some sort of tracking / identifying header value that is send along with every HTTP request. It is also
updated in almost all responses received from Instagram's API.
authorization : str,
Contains the token used for Bearer authentication.
mid : str,
Another tracking / identifying header value. Is also sent along with all requests. Is also updated in every
response.
logged_in_account_data : LoggedInAccountData,
Gets filled as soon as you login. Contains a lot of data about your account.
"""
def __init__(self, app_startup_country: str = None, device_locale: str = None, app_locale: str = None,
bandwidth_totalbytes_b: str = None, bandwidth_totaltime_ms: str =
None, connection_type: str = None, accept_language: str = None, accept_encoding: str = None,
accept: str = None, ads_opt_out: bool = None, authorization: str = None, www_claim: str = None,
rur: str = None, bloks_version_id: str = None, bloks_is_layout_rtl: str = None, **kwargs):
self.app_startup_country = app_startup_country or DEFAULT_APP_STARTUP_COUNTRY
self.device_locale = device_locale or DEFAULT_DEVICE_LOCALE
self.app_locale = app_locale or DEFAULT_APP_LOCALE
self.bandwidth_totalbytes_b = bandwidth_totalbytes_b or DEFAULT_BANDWIDTH_TOTALBYTES_B
self.bandwidth_totaltime_ms = bandwidth_totaltime_ms or DEFAULT_BANDWIDTH_TOTALTIME_MS
self.connection_type = connection_type or DEFAULT_CONNECTION_TYPE
self.accept_language = accept_language or DEFAULT_ACCEPT_LANGUAGE
self.accept_encoding = accept_encoding or DEFAULT_ACCEPT_ENCODING
self.accept = accept or DEFAULT_ACCEPT
self.ads_opt_out = ads_opt_out or DEFAULT_ADS_OPT_OUT
self.authorization = authorization or DEFAULT_AUTHORIZATION
self.www_claim = www_claim or DEFAULT_WWW_CLAIM
self.rur = rur or DEFAULT_RUR
self.bloks_version_id = bloks_version_id or DEFAULT_BLOKS_VERSION_ID
self.bloks_is_layout_rtl = bloks_is_layout_rtl or DEFAULT_BLOKS_IS_LAYOUT_RTL
self.uuid = None
self.device_id = None
self.ad_id = None
self.session_id = None
self.phone_id = None
self.pigeon_session_id = None
self.created = None
self.user_id = None
self.mid = None
self.direct_region_hint = None
self.shbid = None
self.shbts = None
self.target = None
self.public_api_key = None
self.public_api_key_id = None
self.logged_in_account_data = None
for k, v in kwargs.items():
setattr(self, k, v)
def fill(self, f: Callable) -> None:
"""Initializes all variables that:
1) do not have a default value to start with;
2) need a unique generated key on a per-user basis
Parameters
----------
f : function
The function that generates the unique keys used throughout.
"""
self.uuid = f()
self.device_id = f()
self.ad_id = f()
self.session_id = f()
self.phone_id = f()
self.pigeon_session_id = f()
self.created = time.time()
self.user_id = ""
self.mid = ""
self.direct_region_hint = ""
self.shbid = ""
self.shbts = ""
self.target = ""
self.public_api_key = ""
self.public_api_key_id = 0
self.logged_in_account_data = LoggedInAccountData()
@property
def connection_speed(self) -> str:
"""Randomizes the connection speed."""
return f"{random.randint(1000, 3700)}kbps"
@property
def bandwidth_speed_kbps(self):
"""Randomizes the bandwidth speed"""
return f"{random.randint(1000, 5000)}.{random.randint(100, 999)}"
@property
def android_id(self):
"""Creates an Android id from the device id."""
return f"android-{self.device_id[9:28].replace('-', '')}"
@property
def valid(self) -> bool:
"""Sessions older then 90 days will not work anymore."""
return self.created + 60 * 60 * 24 * 90 > time.time()
@property
def startup_country(self) -> str:
return self.app_locale.split('_')[-1]
def __repr__(self):
return pprint.pformat(vars(self))
def refresh(self, f: Callable):
self.uuid = f()
self.device_id = f()
self.ad_id = f()
self.session_id = f()
class LoggedInAccountData:
"""Structure that stores information about the Instagram account"""
def __init__(self, account_type: int = None, account_badges: list = None, allow_contacts_sync: bool = None,
allowed_commenter_type:
str = None, can_boost_post: bool = None, can_see_organic_insights: bool = None, can_see_primary_country_in_settings: bool = None, full_name:
str = None, has_anonymous_profile_picture: bool = None, has_placed_orders: bool = None, interop_messaging_user_fbid: int = None, is_business:
bool = None, is_call_to_action_enabled: Union[bool, None] = None, nametag: dict = None, phone_number: str = None, pk: int = None,
professional_conversion_suggested_account_type: int = None, profile_pic_id: str = None, profile_pic_url: str = None,
show_insights_terms: bool = None, total_igtv_videos: int = None, username: str = None,
is_private: bool = None, is_verified: bool = None, reel_auto_archive: str = None, is_using_unified_inbox_for_direct:
bool = None, can_hide_category: str = None, can_hide_public_contacts: str = None, *args, **kwargs):
self.account_badges = account_badges
self.account_type = account_type
self.allow_contacts_sync = allow_contacts_sync
self.allowed_commenter_type = allowed_commenter_type
self.can_boost_post = can_boost_post
self.can_see_organic_insights = can_see_organic_insights
self.can_see_primary_country_in_settings = can_see_primary_country_in_settings
self.full_name = full_name
self.has_anonymous_profile_picture = has_anonymous_profile_picture
self.has_placed_orders = has_placed_orders
self.interop_messaging_user_fbid = interop_messaging_user_fbid
self.is_business = is_business
self.is_call_to_action_enabled = is_call_to_action_enabled
self.nametag = nametag
self.phone_number = phone_number
self.pk = pk
self.professional_conversion_suggested_account_type = professional_conversion_suggested_account_type
self.profile_pic_id = profile_pic_id
self.profile_pic_url = profile_pic_url
self.show_insights_terms = show_insights_terms
self.total_igtv_videos = total_igtv_videos
self.username = username
self.is_private = is_private
self.is_verified = is_verified
self.reel_auto_archive = reel_auto_archive
self.is_using_unified_inbox_for_direct = is_using_unified_inbox_for_direct
self.can_hide_category = can_hide_category
self.can_hide_public_contacts = can_hide_public_contacts
def __repr__(self):
return pprint.pformat(vars(self))
@dataclass
class Thread:
thread_id: str
thread_v2_id: str
users: List[dict]
#: users that have left the thread
left_users: List[dict]
#: users that are admins in the thread
admin_user_ids: List[dict]
#: a list of all messages sent in the thread
items: List[dict]
#: all other properties
properties: dict
@dataclass
class Inbox:
#: a list of your threads (chats)
threads: List[Thread]
#: has more threads available
has_older: bool
#: amount of unseen threads
unseen_count: int
#: timestamp of last check for unseen threads
unseen_count_ts: int
oldest_cursor: str
prev_cursor: dict
next_cursor: dict
blended_inbox_enabled: bool
seq_id: int
snapshot_at_ms: int
pending_requests_total: int
has_pending_top_requests: bool
| 42.025397 | 145 | 0.684016 |
d438e4928639ed02665f41f1899ab7b74581df81 | 146 | py | Python | ifitwala_ed/setup/doctype/party_type/test_party_type.py | fderyckel/ifitwala_dev | b3f973463cf81f9a0fbd81801b2fbf900fc86b89 | [
"MIT"
] | 13 | 2020-09-02T10:27:57.000Z | 2022-03-11T15:28:46.000Z | ifitwala_ed/setup/doctype/party_type/test_party_type.py | fderyckel/ifitwala_ed | f7043b80957ea972a0773d53f19bbada17c3d7fd | [
"MIT"
] | 43 | 2020-09-02T07:00:42.000Z | 2021-07-05T13:22:58.000Z | ifitwala_ed/setup/doctype/party_type/test_party_type.py | fderyckel/ifitwala_dev | b3f973463cf81f9a0fbd81801b2fbf900fc86b89 | [
"MIT"
] | 6 | 2020-10-19T01:02:18.000Z | 2022-03-11T15:28:47.000Z | # Copyright (c) 2021, ifitwala and Contributors
# See license.txt
# import frappe
import unittest
class TestPartyType(unittest.TestCase):
pass
| 16.222222 | 47 | 0.780822 |
70c75d50330891948569eae10c849b8fff522b2e | 2,957 | py | Python | efficientdet/hparams_config_test.py | WannaFIy/automl | 287f54af377a177d8c58eef3ac22350a3d8ced64 | [
"Apache-2.0"
] | 5,277 | 2020-03-12T23:09:47.000Z | 2022-03-30T17:28:35.000Z | efficientdet/hparams_config_test.py | WannaFIy/automl | 287f54af377a177d8c58eef3ac22350a3d8ced64 | [
"Apache-2.0"
] | 988 | 2020-03-17T02:53:40.000Z | 2022-03-17T19:34:10.000Z | efficientdet/hparams_config_test.py | WannaFIy/automl | 287f54af377a177d8c58eef3ac22350a3d8ced64 | [
"Apache-2.0"
] | 1,486 | 2020-03-14T05:15:22.000Z | 2022-03-29T02:28:56.000Z | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Tests for hparams_config."""
import os
import tempfile
from absl import logging
import tensorflow.compat.v1 as tf
import yaml
import hparams_config
class HparamsConfigTest(tf.test.TestCase):
def test_config_override(self):
c = hparams_config.Config({'a': 1, 'b': 2})
self.assertEqual(c.as_dict(), {'a': 1, 'b': 2})
c.update({'a': 10})
self.assertEqual(c.as_dict(), {'a': 10, 'b': 2})
c.b = 20
self.assertEqual(c.as_dict(), {'a': 10, 'b': 20})
c.override('a=true,b=ss')
self.assertEqual(c.as_dict(), {'a': True, 'b': 'ss'})
c.override('a=100,,,b=2.3,') # extra ',' is fine.
self.assertEqual(c.as_dict(), {'a': 100, 'b': 2.3})
c.override('a=2x3,b=50') # a is a special format for image size.
self.assertEqual(c.as_dict(), {'a': '2x3', 'b': 50})
# overrride string must be in the format of xx=yy.
with self.assertRaises(ValueError):
c.override('a=true,invalid_string')
def test_config_yaml(self):
tmpdir = tempfile.gettempdir()
yaml_file_path = os.path.join(tmpdir, 'x.yaml')
with open(yaml_file_path, 'w') as f:
f.write("""
x: 2
y:
z: 'test'
""")
c = hparams_config.Config(dict(x=234, y=2342))
c.override(yaml_file_path)
self.assertEqual(c.as_dict(), {'x': 2, 'y': {'z': 'test'}})
yaml_file_path2 = os.path.join(tmpdir, 'y.yaml')
c.save_to_yaml(yaml_file_path2)
with open(yaml_file_path2, 'r') as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
self.assertEqual(config_dict, {'x': 2, 'y': {'z': 'test'}})
def test_config_override_recursive(self):
c = hparams_config.Config({'x': 1})
self.assertEqual(c.as_dict(), {'x': 1})
c.override('y.y0=2,y.y1=3', allow_new_keys=True)
self.assertEqual(c.as_dict(), {'x': 1, 'y': {'y0': 2, 'y1': 3}})
c.update({'y': {'y0': 5, 'y1': {'y11': 100}}})
self.assertEqual(c.as_dict(), {'x': 1, 'y': {'y0': 5, 'y1': {'y11': 100}}})
self.assertEqual(c.y.y1.y11, 100)
def test_config_override_list(self):
c = hparams_config.Config({'x': [1.0, 2.0]})
self.assertEqual(c.as_dict(), {'x': [1.0, 2.0]})
c.override('x=3.0*4.0*5.0')
self.assertEqual(c.as_dict(), {'x': [3.0, 4.0, 5.0]})
if __name__ == '__main__':
logging.set_verbosity(logging.WARNING)
tf.test.main()
| 33.602273 | 79 | 0.62462 |
317208d6fb6db0bbb1e9c92a10de8c43480aa821 | 2,460 | py | Python | python/camera.py | lindsayshuo/yolov5_TRT_C-_python_api | 29f7a9f1a3eda0c99fb843cfe0689b8e1e1f0bac | [
"Info-ZIP"
] | 5 | 2021-10-09T05:57:57.000Z | 2022-03-22T23:11:32.000Z | python/camera.py | lindsayshuo/yolov5_TRT_C-_python_api | 29f7a9f1a3eda0c99fb843cfe0689b8e1e1f0bac | [
"Info-ZIP"
] | null | null | null | python/camera.py | lindsayshuo/yolov5_TRT_C-_python_api | 29f7a9f1a3eda0c99fb843cfe0689b8e1e1f0bac | [
"Info-ZIP"
] | null | null | null | import cv2
import traceback
import threading
import queue
class JetCamera():
def __init__(self, cap_w, cap_h, cap_fps):
#self.cap_orig_w, self.cap_orig_h = 3264, 2464 # 4/3 , 21 fps
self.cap_orig_w, self.cap_orig_h = 1920, 1080 # 16/9 , 30 fps
#self.cap_orig_w, self.cap_orig_h = 1280, 720 # 60/120 fps
self.cap_orig_fps = 30
self.cap_out_w = cap_w
self.cap_out_h = cap_h
self.cap_out_fps = cap_fps
self.h_thread = None
self.b_exit = None
self.max_queue = 3
self.queue = queue.Queue(maxsize=self.max_queue)
self.cap_str = 'nvarguscamerasrc tnr-strength=1 tnr-mode=2 ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 '\
'! nvvidconv ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx '\
'! videorate ! video/x-raw, framerate=(fraction)%d/1 '\
'! videoconvert ! video/x-raw, format=BGR ! appsink sync=false ' \
% (self.cap_orig_w, self.cap_orig_h, self.cap_orig_fps, self.cap_out_w, self.cap_out_h, self.cap_out_fps)
self.cap = None
def open(self):
if self.cap:
return True
try:
self.cap = cv2.VideoCapture(self.cap_str, cv2.CAP_GSTREAMER)
except:
traceback.print_exc()
self.h_thread = threading.Thread(target=self.read_run)
self.h_thread.start()
return self.cap is not None
def read_run(self):
while not self.b_exit:
try:
ret, img = self.cap.read()
if ret:
if self.queue.qsize() < self.max_queue:
self.queue.put_nowait(img)
except:
traceback.print_exc()
def read(self):
if not self.cap:
return False, None
try:
img = self.queue.get(block=True, timeout=5)
if img is None:
return False, None
return True, img
except:
pass
return False, None
def close(self):
self.b_exit = True
try:
if self.cap:
self.cap.release()
self.cap = None
except:
pass
try:
self.queue.put_nowait(None)
except:
pass
self.h_thread.join()
self.h_thread = None
| 29.638554 | 164 | 0.541463 |
4f6509acde216569bae5ef42caf50385c567ec65 | 1,165 | py | Python | PythonExercicio/ex062.py | VazMF/Curso-de-Python | 9b33f0bc3de5dd9380fdd4eb3d901b04e536d45a | [
"MIT"
] | null | null | null | PythonExercicio/ex062.py | VazMF/Curso-de-Python | 9b33f0bc3de5dd9380fdd4eb3d901b04e536d45a | [
"MIT"
] | null | null | null | PythonExercicio/ex062.py | VazMF/Curso-de-Python | 9b33f0bc3de5dd9380fdd4eb3d901b04e536d45a | [
"MIT"
] | null | null | null | #melhore o ex061, perguntando ao usuário se ele quer mostrar mais alguns termos. O programa encerra quando ele disser que quer mostrar 0 termos.
print('\033[35m-=-=-=-GERADOR-DE-PA=-=--=-\033[m') #titulo
primeiro = int(input('Primeiro termo: ')) #input do primeiro termo
razao = int(input('Razão da PA: ')) #input da razão
termo = primeiro #variavel termo recebe o conteudo da variavel primeiro
cont = 1 #contador recebe 1
total = 0 #total recebe 0
mais = 10 #define o mais como 10 pq esse é a quantidade inicial de termos que será mostrada
while mais != 0: #enquanto mais for diferente de 0
total += mais #o total será a soma do total com o mais
while cont <= total: #enquanto o contador for menor ou igual ao total
print(f'{termo} \033[35m->\033[m ', end='') #mostra o resultado com os 10 primeiros termos
termo += razao #termo é o termo somado com a razão
cont += 1 #contador sobe 1
print('PAUSA')
mais = int(input('Quantos termos você quer mostrar a mais? [0 para parar] ')) #lê quantos termos mais o usuário deseja mostrar
print(f'Progressão finalizada com {total} termos mostrados.') #mostra o total de termos mostrado
| 64.722222 | 144 | 0.71073 |
2e17d42191b40ddd586a3685d06dd1dc28ec5c8e | 262 | py | Python | Statistics/Randomgen_1.py | Sakthi0722/StatsCalculator | f18b8ddb300ccaba796665829ddf20777ff15ed9 | [
"MIT"
] | 1 | 2021-09-20T18:39:53.000Z | 2021-09-20T18:39:53.000Z | Statistics/Randomgen_1.py | Sakthi0722/StatsCalculator | f18b8ddb300ccaba796665829ddf20777ff15ed9 | [
"MIT"
] | null | null | null | Statistics/Randomgen_1.py | Sakthi0722/StatsCalculator | f18b8ddb300ccaba796665829ddf20777ff15ed9 | [
"MIT"
] | 2 | 2021-07-12T18:58:23.000Z | 2021-07-14T17:47:40.000Z | import random
r1 = random.randint(1, 40)
print("Generate a random number without a seed between a range of two numbers - Integer:", r1)
r2 = random.uniform(1, 40)
print("Generate a random number without a seed between a range of two numbers - Decimal:", r2)
| 26.2 | 94 | 0.732824 |
7840eeb4a6b127a74f8239cb985e1f87d1b3a2d8 | 2,478 | py | Python | profiles_api/models.py | xpa700/profiles-rest-api | bfb124b408ca21f9d8b9effebc04515b302aa8fe | [
"MIT"
] | null | null | null | profiles_api/models.py | xpa700/profiles-rest-api | bfb124b408ca21f9d8b9effebc04515b302aa8fe | [
"MIT"
] | 6 | 2020-06-06T01:40:42.000Z | 2022-02-10T14:49:42.000Z | profiles_api/models.py | xpa700/profiles-rest-api | bfb124b408ca21f9d8b9effebc04515b302aa8fe | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
# Create your models here.
# There should be two empty lines between classes/big blocs
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('Users must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name,)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name for user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
# The following allows us to reference the User as a foreign ForeignKey
# Instead of linking to the our custom UserProfile, we use Django user
# because otherwise, if we decide to change the auth process later to use
# another class, we won't have to update all the references (foreign keys)
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Return the model as a string"""
return self.status_text
| 31.367089 | 82 | 0.686441 |
7ed7ea1369a6078d095878078a37e0e94b13c25c | 1,065 | py | Python | server/tests/conftest.py | daryllstrauss/prefect | 2dd308ec39dc189a85defa216ef15ebec78855f5 | [
"Apache-2.0"
] | 1 | 2020-08-01T15:44:32.000Z | 2020-08-01T15:44:32.000Z | server/tests/conftest.py | daryllstrauss/prefect | 2dd308ec39dc189a85defa216ef15ebec78855f5 | [
"Apache-2.0"
] | null | null | null | server/tests/conftest.py | daryllstrauss/prefect | 2dd308ec39dc189a85defa216ef15ebec78855f5 | [
"Apache-2.0"
] | 1 | 2020-05-04T13:22:11.000Z | 2020-05-04T13:22:11.000Z | # Licensed under the Prefect Community License, available at
# https://www.prefect.io/legal/prefect-community-license
import asyncio
import inspect
import pytest
import prefect
import prefect_server
from prefect.engine.state import Running, Submitted, Success
from prefect_server import api, cli, config
from prefect_server.database import hasura, models
import sqlalchemy as sa
from .fixtures.database_fixtures import *
def pytest_collection_modifyitems(session, config, items):
"""
Modify tests prior to execution
"""
for item in items:
# automatically add @pytest.mark.asyncio to async tests
if isinstance(item, pytest.Function) and inspect.iscoroutinefunction(
item.function
):
item.add_marker(pytest.mark.asyncio)
# redefine the event loop to support module-scoped fixtures
# https://github.com/pytest-dev/pytest-asyncio/issues/68
@pytest.yield_fixture(scope="session")
def event_loop(request):
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
| 27.307692 | 77 | 0.749296 |
bcbd909185d1b1b8fb9e9917c725f9aa9462b246 | 2,923 | py | Python | src/zope/tal/tests/test_sourcepos.py | zopefoundation/zope.tal | a166a730f7aac87f7a8f3c2796e8d4b914930ac9 | [
"ZPL-2.1"
] | 2 | 2017-12-15T07:17:24.000Z | 2020-03-09T10:05:40.000Z | src/zope/tal/tests/test_sourcepos.py | zopefoundation/zope.tal | a166a730f7aac87f7a8f3c2796e8d4b914930ac9 | [
"ZPL-2.1"
] | 12 | 2015-06-05T20:40:00.000Z | 2022-01-03T13:51:43.000Z | src/zope/tal/tests/test_sourcepos.py | zopefoundation/zope.tal | a166a730f7aac87f7a8f3c2796e8d4b914930ac9 | [
"ZPL-2.1"
] | 6 | 2015-04-03T09:48:20.000Z | 2021-05-14T00:59:41.000Z | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests for TALInterpreter.
"""
import unittest
try:
# Python 2.x
from StringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
from zope.tal.htmltalparser import HTMLTALParser
from zope.tal.talinterpreter import TALInterpreter
from zope.tal.talgenerator import TALGenerator
from zope.tal.dummyengine import DummyEngine
page1 = '''<html metal:use-macro="main"><body>
<div metal:fill-slot="body">
page1=<span tal:replace="position:" />
</div>
</body></html>'''
main_template = '''<html metal:define-macro="main"><body>
main_template=<span tal:replace="position:" />
<div metal:define-slot="body" />
main_template=<span tal:replace="position:" />
<div metal:use-macro="foot" />
main_template=<span tal:replace="position:" />
</body></html>'''
footer = '''<div metal:define-macro="foot">
footer=<span tal:replace="position:" />
</div>'''
expected = '''<html><body>
main_template=main_template (2,14)
<div>
page1=page1 (3,6)
</div>
main_template=main_template (4,14)
<div>
footer=footer (2,7)
</div>
main_template=main_template (6,14)
</body></html>'''
class SourcePosTestCase(unittest.TestCase):
def parse(self, eng, s, fn):
gen = TALGenerator(expressionCompiler=eng, xml=0, source_file=fn)
parser = HTMLTALParser(gen)
parser.parseString(s)
program, macros = parser.getCode()
return program, macros
def test_source_positions(self):
# Ensure source file and position are set correctly by TAL
macros = {}
eng = DummyEngine(macros)
page1_program, page1_macros = self.parse(eng, page1, 'page1')
main_template_program, main_template_macros = self.parse(
eng, main_template, 'main_template')
footer_program, footer_macros = self.parse(eng, footer, 'footer')
macros['main'] = main_template_macros['main']
macros['foot'] = footer_macros['foot']
stream = StringIO()
interp = TALInterpreter(page1_program, macros, eng, stream)
interp()
self.assertEqual(stream.getvalue().strip(), expected.strip(),
"Got result:\n%s\nExpected:\n%s"
% (stream.getvalue(), expected))
def test_suite():
return unittest.makeSuite(SourcePosTestCase)
| 31.430108 | 78 | 0.648307 |
eee428181051c616095b3104c978224d12ab8db6 | 16,910 | py | Python | ctapipe/io/hdftableio.py | orelgueta/ctapipe | ee28440e83cc283ccd57428d5fdad764a1e786f0 | [
"BSD-3-Clause"
] | null | null | null | ctapipe/io/hdftableio.py | orelgueta/ctapipe | ee28440e83cc283ccd57428d5fdad764a1e786f0 | [
"BSD-3-Clause"
] | null | null | null | ctapipe/io/hdftableio.py | orelgueta/ctapipe | ee28440e83cc283ccd57428d5fdad764a1e786f0 | [
"BSD-3-Clause"
] | null | null | null | import re
from abc import abstractmethod, ABCMeta
from collections import defaultdict
from functools import partial
import numpy as np
import tables
from astropy.time import Time
from astropy.units import Quantity
import ctapipe
from ctapipe.core import Component
__all__ = ['TableWriter',
'TableReader',
'HDF5TableWriter',
'HDF5TableReader']
PYTABLES_TYPE_MAP = {
'float': tables.Float64Col,
'float64': tables.Float64Col,
'float32': tables.Float32Col,
'int': tables.IntCol,
'int32': tables.Int32Col,
'int64': tables.Int64Col,
'bool': tables.BoolCol,
}
class TableWriter(Component, metaclass=ABCMeta):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self._transforms = defaultdict(dict)
self._exclusions = defaultdict(list)
def exclude(self, table_name, pattern):
"""
Exclude any columns matching the pattern from being written
Parameters
----------
table_name: str
name of table on which to apply the exclusion
pattern: str
regular expression string to match column name
"""
self._exclusions[table_name].append(re.compile(pattern))
def _is_column_excluded(self, table_name, col_name):
for pattern in self._exclusions[table_name]:
if pattern.match(col_name):
return True
return False
def add_column_transform(self, table_name, col_name, transform):
"""
Add a transformation function for a column. This function will be
called on the value in the container before it is written to the
output file.
Parameters
----------
table_name: str
identifier of table being written
col_name: str
name of column in the table (or item in the Container)
transform: callable
function that take a value and returns a new one
"""
self._transforms[table_name][col_name] = transform
self.log.debug("Added transform: {}/{} -> {}".format(table_name,
col_name,
transform))
@abstractmethod
def write(self, table_name, container):
"""
Write the contents of the given container to a table. The first call
to write will create a schema and initialize the table within the
file. The shape of data within the container must not change between
calls, since variable-length arrays are not supported.
Parameters
----------
table_name: str
name of table to write to
container: `ctapipe.core.Container`
container to write
"""
pass
def _apply_col_transform(self, table_name, col_name, value):
"""
apply value transform function if it exists for this column
"""
if col_name in self._transforms[table_name]:
tr = self._transforms[table_name][col_name]
value = tr(value)
return value
class HDF5TableWriter(TableWriter):
"""
A very basic table writer that can take a container (or more than one)
and write it to an HDF5 file. It does _not_ recursively write the
container. This is intended as a building block to create a more complex
I/O system.
It works by creating a HDF5 Table description from the `Field`s inside a
container, where each item becomes a column in the table. The first time
`SimpleHDF5TableWriter.write()` is called, the container is registered
and the table created in the output file.
Each item in the container can also have an optional transform function
that is called before writing to transform the value. For example,
unit quantities always have their units removed, or converted to a
common unit if specified in the `Field`.
Any metadata in the `Container` (stored in `Container.meta`) will be
written to the table's header on the first call to write()
Multiple tables may be written at once in a single file, as long as you
change the table_name attribute to write() to specify which one to write
to.
TODO:
- ability to write several containers to the same table (appending a
string to each column name). Perhaps `write(name, dict(method_a=cont,
method_b=cont2))`, where "method_a_X" would be a column name. May be
possible to do this with some container magic, like joining two
containers `joined_container(cont1, cont2, "A", "B")` or "cont1+cont2".
Perhaps need to provide a better way to get container contents as a
dictionary.
Parameters
----------
filename: str
name of hdf5 output file
group_name: str
name of group into which to put all of the tables generated by this
Writer (it will be placed under "/" in the file)
"""
def __init__(self, filename, group_name, **kwargs):
super().__init__()
self._schemas = {}
self._tables = {}
self._h5file = tables.open_file(filename, mode="w", **kwargs)
self._group = self._h5file.create_group("/", group_name)
self.log.debug("h5file: {}".format(self._h5file))
def __del__(self):
self._h5file.close()
def _create_hdf5_table_schema(self, table_name, container):
"""
Creates a pytables description class for a container
and registers it in the Writer
Parameters
----------
table_name: str
name of table
container: ctapipe.core.Container
instance of an initalized container
Returns
-------
dictionary of extra metadata to add to the table's header
"""
class Schema(tables.IsDescription):
pass
meta = {} # any extra meta-data generated here (like units, etc)
# create pytables schema description for the given container
for col_name, value in container.items():
typename = ""
shape = 1
if self._is_column_excluded(table_name, col_name):
self.log.debug("excluded column: {}/{}".format(table_name,
col_name))
continue
if isinstance(value, Quantity):
req_unit = container.fields[col_name].unit
if req_unit is not None:
tr = partial(tr_convert_and_strip_unit, unit=req_unit)
meta['{}_UNIT'.format(col_name)] = str(req_unit)
else:
tr = lambda x: x.value
meta['{}_UNIT'.format(col_name)] = str(value.unit)
value = tr(value)
self.add_column_transform(table_name, col_name, tr)
if isinstance(value, np.ndarray):
typename = value.dtype.name
coltype = PYTABLES_TYPE_MAP[typename]
shape = value.shape
Schema.columns[col_name] = coltype(shape=shape)
if isinstance(value, Time):
# TODO: really should use MET, but need a func for that
Schema.columns[col_name] = tables.Float64Col()
self.add_column_transform(table_name, col_name,
tr_time_to_float)
elif type(value).__name__ in PYTABLES_TYPE_MAP:
typename = type(value).__name__
coltype = PYTABLES_TYPE_MAP[typename]
Schema.columns[col_name] = coltype()
self.log.debug("Table {}: added col: {} type: {} shape: {}"
.format(table_name, col_name, typename, shape))
self._schemas[table_name] = Schema
meta['CTAPIPE_VERSION'] = ctapipe.__version__
return meta
def _setup_new_table(self, table_name, container):
""" set up the table. This is called the first time `write()`
is called on a new table """
self.log.debug("Initializing table '{}'".format(table_name))
meta = self._create_hdf5_table_schema(table_name, container)
meta.update(container.meta) # copy metadata from container
table = self._h5file.create_table(where=self._group,
name=table_name,
title="storage of {}".format(
container.__class__.__name__),
description=self._schemas[table_name])
for key, val in meta.items():
table.attrs[key] = val
self._tables[table_name] = table
def _append_row(self, table_name, container):
"""
append a row to an already initialized table. This is called
automatically by `write()`
"""
table = self._tables[table_name]
row = table.row
for colname in table.colnames:
value = self._apply_col_transform(table_name, colname,
container[colname])
row[colname] = value
row.append()
def write(self, table_name, container):
"""
Write the contents of the given container to a table. The first call
to write will create a schema and initialize the table within the
file. The shape of data within the container must not change between
calls, since variable-length arrays are not supported.
Parameters
----------
table_name: str
name of table to write to
container: `ctapipe.core.Container`
container to write
"""
if table_name not in self._schemas:
self._setup_new_table(table_name, container)
self._append_row(table_name, container)
class TableReader(Component, metaclass=ABCMeta):
"""
Base class for row-wise table readers. Generally methods that read a
full table at once are preferred to this method, since they are faster,
but this can be used to re-play a table row by row into a
`ctapipe.core.Container` class (the opposite of TableWriter)
"""
def __init__(self):
super().__init__()
self._cols_to_read = defaultdict(list)
self._transforms = defaultdict(dict)
def add_column_transform(self, table_name, col_name, transform):
"""
Add a transformation function for a column. This function will be
called on the value in the container before it is written to the
output file.
Parameters
----------
table_name: str
identifier of table being written
col_name: str
name of column in the table (or item in the Container)
transform: callable
function that take a value and returns a new one
"""
self._transforms[table_name][col_name] = transform
self.log.debug("Added transform: {}/{} -> {}".format(table_name,
col_name,
transform))
def _apply_col_transform(self, table_name, col_name, value):
"""
apply value transform function if it exists for this column
"""
if col_name in self._transforms[table_name]:
tr = self._transforms[table_name][col_name]
value = tr(value)
return value
@abstractmethod
def read(self, table_name, container):
"""
Returns a generator that reads the next row from the table into the
given container. The generator returns the same container. Note that
no containers are copied, the data are overwritten inside.
"""
pass
class HDF5TableReader(TableReader):
"""
Reader that reads a single row of an HDF5 table at once into a Container.
Simply construct a `HDF5TableReader` with an input HDF5 file,
and call the `read(path, container)` method to get a generator that fills
the given container with a new row of the table on each access.
Columns in the table are automatically mapped to container fields by
name, and if a field is missing in either, it is skipped during read,
but a warning is emitted.
Columns that were written by SimpleHDF5TableWriter and which had unit
transforms applied, will have the units re-applied when reading (the
unit used is stored in the header attributes).
Note that this is only useful if you want to read all information *one
event at a time* into a container, which is not very I/O efficient. For
some other use cases, it may be much more efficient to access the
table data directly, for example to read an entire column or table at
once (which means not using the Container data structure).
Todo:
- add ability to synchronize reading of multiple tables on a key
- add ability (also with TableWriter) to read a row into n containers at
once, assuming no naming conflicts (so we can add e.g. event_id)
"""
def __init__(self, filename):
"""
Parameters
----------
filename: str
name of hdf5 file
group_name: str
HDF5 path to group where tables are to be found
"""
super().__init__()
self._tables = {}
self._h5file = tables.open_file(filename)
pass
def _setup_table(self, table_name, container):
tab = self._h5file.get_node(table_name)
self._tables[table_name] = tab
self._map_table_to_container(table_name, container)
self._map_transforms_from_table_header(table_name)
return tab
def _map_transforms_from_table_header(self, table_name):
"""
create any transforms needed to "undo" ones in the writer
"""
tab = self._tables[table_name]
for attr in tab.attrs._f_list():
if attr.endswith("_UNIT"):
colname = attr[:-5]
tr = partial(tr_add_unit, unitname=tab.attrs[attr])
self.add_column_transform(table_name, colname, tr)
def _map_table_to_container(self, table_name, container):
""" identifies which columns in the table to read into the container,
by comparing their names."""
tab = self._tables[table_name]
for colname in tab.colnames:
if colname in container.fields:
self._cols_to_read[table_name].append(colname)
else:
self.log.warn("Table '{}' has column '{}' that is not in "
"container {}. It will be skipped"
.format(table_name, colname,
container.__class__.__name__))
# also check that the container doesn't have fields that are not
# in the table:
for colname in container.fields:
if colname not in self._cols_to_read[table_name]:
self.log.warn("Table '{}' is missing column '{}' that is "
"in container {}. It will be skipped"
.format(table_name, colname,
container.__class__.__name__))
# copy all user-defined attributes back to Container.mets
for key in tab.attrs._f_list():
container.meta[key] = tab.attrs[key]
def read(self, table_name, container):
"""
Returns a generator that reads the next row from the table into the
given container. The generator returns the same container. Note that
no containers are copied, the data are overwritten inside.
"""
if table_name not in self._tables:
tab = self._setup_table(table_name, container)
else:
tab = self._tables[table_name]
row_count = 0
while 1:
try:
row = tab[row_count]
except IndexError:
return # stop generator when done
for colname in self._cols_to_read[table_name]:
container[colname] = self._apply_col_transform(table_name,
colname,
row[colname])
yield container
row_count += 1
def tr_convert_and_strip_unit(quantity, unit):
return quantity.to(unit).value
def tr_list_to_mask(thelist, length):
""" transform list to a fixed-length mask"""
arr = np.zeros(shape=length, dtype=np.bool)
arr[thelist] = True
return arr
def tr_time_to_float(thetime):
return thetime.mjd
def tr_add_unit(value, unitname):
return Quantity(value, unitname)
| 36.055437 | 80 | 0.601715 |
9d7269283c08fe7768593bbc1c9f272de97e4292 | 10,599 | py | Python | python/ex_draw_bitmap.py | allegrofb/Allegrofb | 0f4b29d3b783ccc5bacda98bd2c1c11c716a1d9e | [
"BSD-3-Clause"
] | null | null | null | python/ex_draw_bitmap.py | allegrofb/Allegrofb | 0f4b29d3b783ccc5bacda98bd2c1c11c716a1d9e | [
"BSD-3-Clause"
] | null | null | null | python/ex_draw_bitmap.py | allegrofb/Allegrofb | 0f4b29d3b783ccc5bacda98bd2c1c11c716a1d9e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
import sys, os, glob
from random import *
from math import *
from ctypes import *
# Get path to examples data.
p = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "examples"))
from allegro import *
def abort_example(text):
sys.stderr.write(text)
sys.exit(1)
FPS = 60
MAX_SPRITES = 1024
class Sprite:
def __init__(self):
self.x, self.y, self.dx, self.dy = 0, 0, 0, 0
text = [
"Space - toggle use of textures",
"B - toggle alpha blending",
"Left/Right - change bitmap size",
"Up/Down - change bitmap count",
"F1 - toggle help text"
]
class Example:
sprites = [Sprite() for i in range(MAX_SPRITES)]
use_memory_bitmaps = False
blending = False
display = None
mysha = None
bitmap = None
bitmap_size = 0
sprite_count = 0
show_help = True
font = None
mouse_down = False
last_x, last_y = 0, 0
white = None
half_white = None
dark = None
red = None
direct_speed_measure = 1.0
ftpos = 0
frame_times = [0.0] * FPS
example = Example()
def add_time():
example.frame_times[example.ftpos] = al_get_time()
example.ftpos += 1
if example.ftpos >= FPS:
example.ftpos = 0
def get_fps():
prev = FPS - 1
min_dt = 1
max_dt = 1 / 1000000
av = 0
for i in range(FPS):
if i != example.ftpos:
dt = example.frame_times[i] - example.frame_times[prev]
if dt < min_dt and dt > 0:
min_dt = dt
if dt > max_dt:
max_dt = dt
av += dt
prev = i
av /= FPS - 1
average = ceil(1 / av)
d = 1 / min_dt - 1 / max_dt
minmax = floor(d / 2)
return average, minmax
def add_sprite():
if example.sprite_count < MAX_SPRITES:
w = al_get_display_width(example.display)
h = al_get_display_height(example.display)
i = example.sprite_count
example.sprite_count += 1
s = example.sprites[i]
a = randint(0, 359)
s.x = randint(0, w - example.bitmap_size - 1)
s.y = randint(0, h - example.bitmap_size - 1)
s.dx = cos(a) * FPS * 2
s.dy = sin(a) * FPS * 2
def add_sprites(n):
for i in range(n):
add_sprite()
def remove_sprites(n):
example.sprite_count -= n
if example.sprite_count < 0:
example.sprite_count = 0
def change_size(size):
if size < 1:
size = 1
if size > 1024:
size = 1024
if example.bitmap:
al_destroy_bitmap(example.bitmap)
al_set_new_bitmap_flags(
ALLEGRO_MEMORY_BITMAP if example.use_memory_bitmaps else 0)
example.bitmap = al_create_bitmap(size, size)
example.bitmap_size = size
al_set_target_bitmap(example.bitmap)
al_set_blender(ALLEGRO_ADD, ALLEGRO_ONE, ALLEGRO_ZERO, example.white)
al_clear_to_color(al_map_rgba_f(0, 0, 0, 0))
bw = al_get_bitmap_width(example.mysha)
bh = al_get_bitmap_height(example.mysha)
al_draw_scaled_bitmap(example.mysha, 0, 0, bw, bh, 0, 0,
size, size, 0)
al_set_target_backbuffer(example.display)
def sprite_update(s):
w = al_get_display_width(example.display)
h = al_get_display_height(example.display)
s.x += s.dx / FPS
s.y += s.dy / FPS
if s.x < 0:
s.x = -s.x
s.dx = -s.dx
if s.x + example.bitmap_size > w:
s.x = -s.x + 2 * (w - example.bitmap_size)
s.dx = -s.dx
if s.y < 0:
s.y = -s.y
s.dy = -s.dy
if s.y + example.bitmap_size > h:
s.y = -s.y + 2 * (h - example.bitmap_size)
s.dy = -s.dy
if example.bitmap_size > w: s.x = w / 2 - example.bitmap_size / 2
if example.bitmap_size > h: s.y = h / 2 - example.bitmap_size / 2
def update():
for i in range(example.sprite_count):
sprite_update(example.sprites[i])
def redraw():
w = al_get_display_width(example.display)
h = al_get_display_height(example.display)
fh = al_get_font_line_height(example.font)
info = ["textures", "memory buffers"]
binfo = ["alpha", "additive", "tinted", "solid"]
tint = example.white
if example.blending == 0:
al_set_blender(ALLEGRO_ADD, ALLEGRO_ALPHA, ALLEGRO_INVERSE_ALPHA)
tint = example.half_white
elif example.blending == 1:
al_set_blender(ALLEGRO_ADD, ALLEGRO_ONE, ALLEGRO_ONE)
tint = example.dark
elif example.blending == 2:
al_set_blender(ALLEGRO_ADD, ALLEGRO_ONE, ALLEGRO_ZERO)
tint = example.red
elif example.blending == 3:
al_set_blender(ALLEGRO_ADD, ALLEGRO_ONE, ALLEGRO_ZERO, example.white)
for i in range(example.sprite_count):
s = example.sprites[i]
al_draw_tinted_bitmap(example.bitmap, tint, s.x, s.y, 0)
al_set_blender(ALLEGRO_ADD, ALLEGRO_ALPHA, ALLEGRO_INVERSE_ALPHA)
if example.show_help:
for i in range(5):
al_draw_text(example.font, example.white, 0, h - 5 * fh + i * fh, 0, text[i])
al_draw_textf(example.font, example.white, 0, 0, 0, "count: %d",
example.sprite_count)
al_draw_textf(example.font, example.white, 0, fh, 0, "size: %d",
example.bitmap_size)
al_draw_textf(example.font, example.white, 0, fh * 2, 0, "%s",
info[example.use_memory_bitmaps])
al_draw_textf(example.font, example.white, 0, fh * 3, 0, "%s",
binfo[example.blending])
f1, f2 = get_fps()
al_draw_textf(example.font, example.white, w, 0, ALLEGRO_ALIGN_RIGHT, "%s",
"FPS: %4d +- %-4d" % (f1, f2))
al_draw_textf(example.font, example.white, w, fh, ALLEGRO_ALIGN_RIGHT, "%s",
"%4d / sec" % int(1.0 / example.direct_speed_measure))
def main():
w, h = 640, 480
done = False
need_redraw = True
example.show_help = True
if not al_install_system(ALLEGRO_VERSION_INT, None):
abort_example("Failed to init Allegro.\n")
sys.exit(1)
if not al_init_image_addon():
abort_example("Failed to init IIO addon.\n")
sys.exit(1)
al_init_font_addon()
al_get_num_video_adapters()
info = ALLEGRO_MONITOR_INFO()
al_get_monitor_info(0, byref(info))
if info.x2 - info.x1 < w:
w = info.x2 - info.x1
if info.y2 - info.y1 < h:
h = info.y2 - info.y1
example.display = al_create_display(w, h)
if not example.display:
abort_example("Error creating display.\n")
if not al_install_keyboard():
abort_example("Error installing keyboard.\n")
if not al_install_mouse():
abort_example("Error installing mouse.\n")
example.font = al_load_font(p + "/data/fixed_font.tga", 0, 0)
if not example.font:
abort_example("Error loading data/fixed_font.tga\n")
example.mysha = al_load_bitmap(p + "/data/mysha256x256.png")
if not example.mysha:
abort_example("Error loading data/mysha256x256.png\n")
example.white = al_map_rgb_f(1, 1, 1)
example.half_white = al_map_rgba_f(1, 1, 1, 0.5)
example.dark = al_map_rgb(15, 15, 15)
example.red = al_map_rgb_f(1, 0.2, 0.1)
change_size(256)
add_sprite()
add_sprite()
timer = al_create_timer(1.0 / FPS)
queue = al_create_event_queue()
al_register_event_source(queue, al_get_keyboard_event_source())
al_register_event_source(queue, al_get_mouse_event_source())
al_register_event_source(queue, al_get_timer_event_source(timer))
al_register_event_source(queue, al_get_display_event_source(example.display))
al_start_timer(timer)
while not done:
event = ALLEGRO_EVENT()
if need_redraw and al_is_event_queue_empty(queue):
t = -al_get_time()
add_time()
al_clear_to_color(al_map_rgb_f(0, 0, 0))
redraw()
t += al_get_time()
example.direct_speed_measure = t
al_flip_display()
need_redraw = False
al_wait_for_event(queue, byref(event))
if event.type == ALLEGRO_EVENT_KEY_CHAR:
if event.keyboard.keycode == ALLEGRO_KEY_ESCAPE:
done = True
elif event.keyboard.keycode == ALLEGRO_KEY_UP:
add_sprites(1)
elif event.keyboard.keycode == ALLEGRO_KEY_DOWN:
remove_sprites(1)
elif event.keyboard.keycode == ALLEGRO_KEY_LEFT:
change_size(example.bitmap_size - 1)
elif event.keyboard.keycode == ALLEGRO_KEY_RIGHT:
change_size(example.bitmap_size + 1)
elif event.keyboard.keycode == ALLEGRO_KEY_F1:
example.show_help ^= 1
elif event.keyboard.keycode == ALLEGRO_KEY_SPACE:
example.use_memory_bitmaps ^= 1
change_size(example.bitmap_size)
elif event.keyboard.keycode == ALLEGRO_KEY_B:
example.blending += 1
if example.blending == 4:
example.blending = 0
elif event.type == ALLEGRO_EVENT_DISPLAY_CLOSE:
done = True
elif event.type == ALLEGRO_EVENT_TIMER:
update()
need_redraw = True
elif event.type == ALLEGRO_EVENT_MOUSE_BUTTON_DOWN:
example.mouse_down = True
example.last_x = event.mouse.x
example.last_y = event.mouse.y
elif event.type == ALLEGRO_EVENT_MOUSE_BUTTON_UP:
fh = al_get_font_line_height(example.font)
example.mouse_down = False
if event.mouse.x < 40 and event.mouse.y >= h - fh * 5:
button = (event.mouse.y - (h - fh * 5)) // fh
if button == 0:
example.use_memory_bitmaps ^= 1
change_size(example.bitmap_size)
if button == 1:
example.blending += 1
if example.blending == 4:
example.blending = 0
if button == 4:
example.show_help ^= 1
elif event.type == ALLEGRO_EVENT_MOUSE_AXES:
if example.mouse_down:
dx = event.mouse.x - example.last_x
dy = event.mouse.y - example.last_y
if dy > 4:
add_sprites(int(dy / 4))
if dy < -4:
remove_sprites(-int(dy / 4))
if dx > 4:
change_size(example.bitmap_size + dx - 4)
if dx < -4:
change_size(example.bitmap_size + dx + 4)
example.last_x = event.mouse.x
example.last_y = event.mouse.y
al_destroy_bitmap(example.bitmap)
al_uninstall_system()
al_main(main)
| 30.456897 | 89 | 0.604397 |
62db3a502bf53524dc7f6642b60d7bf12c41332f | 14,337 | py | Python | config_app/config_util/k8saccessor.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | config_app/config_util/k8saccessor.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | config_app/config_util/k8saccessor.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | import logging
import json
import base64
import datetime
import os
from requests import Request, Session
from collections import namedtuple
from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX
from config_app.config_util.k8sconfig import KubernetesConfig
logger = logging.getLogger(__name__)
QE_DEPLOYMENT_LABEL = "quay-enterprise-component"
QE_CONTAINER_NAME = "quay-enterprise-app"
# Tuple containing response of the deployment rollout status method.
# status is one of: 'failed' | 'progressing' | 'available'
# message is any string describing the state.
DeploymentRolloutStatus = namedtuple("DeploymentRolloutStatus", ["status", "message"])
class K8sApiException(Exception):
pass
def _deployment_rollout_status_message(deployment, deployment_name):
"""
Gets the friendly human readable message of the current state of the deployment rollout
:param deployment: python dict matching: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#deployment-v1-apps
:param deployment_name: string
:return: DeploymentRolloutStatus
"""
# Logic for rollout status pulled from the `kubectl rollout status` command:
# https://github.com/kubernetes/kubernetes/blob/d9ba19c751709c8608e09a0537eea98973f3a796/pkg/kubectl/rollout_status.go#L62
if deployment["metadata"]["generation"] <= deployment["status"]["observedGeneration"]:
for cond in deployment["status"]["conditions"]:
if cond["type"] == "Progressing" and cond["reason"] == "ProgressDeadlineExceeded":
return DeploymentRolloutStatus(
status="failed",
message="Deployment %s's rollout failed. Please try again later."
% deployment_name,
)
desired_replicas = deployment["spec"]["replicas"]
current_replicas = deployment["status"].get("replicas", 0)
if current_replicas == 0:
return DeploymentRolloutStatus(
status="available",
message="Deployment %s updated (no replicas, so nothing to roll out)"
% deployment_name,
)
# Some fields are optional in the spec, so if they're omitted, replace with defaults that won't indicate a wrong status
available_replicas = deployment["status"].get("availableReplicas", 0)
updated_replicas = deployment["status"].get("updatedReplicas", 0)
if updated_replicas < desired_replicas:
return DeploymentRolloutStatus(
status="progressing",
message="Waiting for rollout to finish: %d out of %d new replicas have been updated..."
% (updated_replicas, desired_replicas),
)
if current_replicas > updated_replicas:
return DeploymentRolloutStatus(
status="progressing",
message="Waiting for rollout to finish: %d old replicas are pending termination..."
% (current_replicas - updated_replicas),
)
if available_replicas < updated_replicas:
return DeploymentRolloutStatus(
status="progressing",
message="Waiting for rollout to finish: %d of %d updated replicas are available..."
% (available_replicas, updated_replicas),
)
return DeploymentRolloutStatus(
status="available", message="Deployment %s successfully rolled out." % deployment_name
)
return DeploymentRolloutStatus(
status="progressing", message="Waiting for deployment spec to be updated..."
)
class KubernetesAccessorSingleton(object):
""" Singleton allowing access to kubernetes operations """
_instance = None
def __init__(self, kube_config=None):
self.kube_config = kube_config
if kube_config is None:
self.kube_config = KubernetesConfig.from_env()
KubernetesAccessorSingleton._instance = self
@classmethod
def get_instance(cls, kube_config=None):
"""
Singleton getter implementation, returns the instance if one exists, otherwise creates the
instance and ties it to the class.
:return: KubernetesAccessorSingleton
"""
if cls._instance is None:
return cls(kube_config)
return cls._instance
def save_secret_to_directory(self, dir_path):
"""
Saves all files in the kubernetes secret to a local directory.
Assumes the directory is empty.
"""
secret = self._lookup_secret()
secret_data = secret.get("data", {})
# Make the `extra_ca_certs` dir to ensure we can populate extra certs
extra_ca_dir_path = os.path.join(dir_path, EXTRA_CA_DIRECTORY)
os.mkdir(extra_ca_dir_path)
for secret_filename, data in secret_data.iteritems():
write_path = os.path.join(dir_path, secret_filename)
if EXTRA_CA_DIRECTORY_PREFIX in secret_filename:
write_path = os.path.join(
extra_ca_dir_path, secret_filename.replace(EXTRA_CA_DIRECTORY_PREFIX, "")
)
with open(write_path, "w") as f:
f.write(base64.b64decode(data))
return 200
def save_file_as_secret(self, name, file_pointer):
value = file_pointer.read()
self._update_secret_file(name, value)
def replace_qe_secret(self, new_secret_data):
"""
Removes the old config and replaces it with the new_secret_data as one action
"""
# Check first that the namespace for Red Hat Quay exists. If it does not, report that
# as an error, as it seems to be a common issue.
namespace_url = "namespaces/%s" % (self.kube_config.qe_namespace)
response = self._execute_k8s_api("GET", namespace_url)
if response.status_code // 100 != 2:
msg = (
"A Kubernetes namespace with name `%s` must be created to save config"
% self.kube_config.qe_namespace
)
raise Exception(msg)
# Check if the secret exists. If not, then we create an empty secret and then update the file
# inside.
secret_url = "namespaces/%s/secrets/%s" % (
self.kube_config.qe_namespace,
self.kube_config.qe_config_secret,
)
secret = self._lookup_secret()
if secret is None:
self._assert_success(
self._execute_k8s_api(
"POST",
secret_url,
{
"kind": "Secret",
"apiVersion": "v1",
"metadata": {"name": self.kube_config.qe_config_secret},
"data": {},
},
)
)
# Update the secret to reflect the file change.
secret["data"] = new_secret_data
self._assert_success(self._execute_k8s_api("PUT", secret_url, secret))
def get_deployment_rollout_status(self, deployment_name):
""""
Returns the status of a rollout of a given deployment
:return _DeploymentRolloutStatus
"""
deployment_selector_url = "namespaces/%s/deployments/%s" % (
self.kube_config.qe_namespace,
deployment_name,
)
response = self._execute_k8s_api("GET", deployment_selector_url, api_prefix="apis/apps/v1")
if response.status_code != 200:
return DeploymentRolloutStatus(
"failed", "Could not get deployment. Please check that the deployment exists"
)
deployment = json.loads(response.text)
return _deployment_rollout_status_message(deployment, deployment_name)
def get_qe_deployments(self):
""""
Returns all deployments matching the label selector provided in the KubeConfig
"""
deployment_selector_url = "namespaces/%s/deployments?labelSelector=%s%%3D%s" % (
self.kube_config.qe_namespace,
QE_DEPLOYMENT_LABEL,
self.kube_config.qe_deployment_selector,
)
response = self._execute_k8s_api(
"GET", deployment_selector_url, api_prefix="apis/extensions/v1beta1"
)
if response.status_code != 200:
return None
return json.loads(response.text)
def cycle_qe_deployments(self, deployment_names):
""""
Triggers a rollout of all desired deployments in the qe namespace
"""
for name in deployment_names:
logger.debug("Cycling deployment %s", name)
deployment_url = "namespaces/%s/deployments/%s" % (self.kube_config.qe_namespace, name)
# There is currently no command to simply rolling restart all the pods: https://github.com/kubernetes/kubernetes/issues/13488
# Instead, we modify the template of the deployment with a dummy env variable to trigger a cycle of the pods
# (based off this comment: https://github.com/kubernetes/kubernetes/issues/13488#issuecomment-240393845)
self._assert_success(
self._execute_k8s_api(
"PATCH",
deployment_url,
{
"spec": {
"template": {
"spec": {
"containers": [
{
# Note: this name MUST match the deployment template's pod template
# (e.g. <template>.spec.template.spec.containers[0] == 'quay-enterprise-app')
"name": QE_CONTAINER_NAME,
"env": [
{
"name": "RESTART_TIME",
"value": str(datetime.datetime.now()),
}
],
}
]
}
}
}
},
api_prefix="apis/extensions/v1beta1",
content_type="application/strategic-merge-patch+json",
)
)
def rollback_deployment(self, deployment_name):
deployment_rollback_url = "namespaces/%s/deployments/%s/rollback" % (
self.kube_config.qe_namespace,
deployment_name,
)
self._assert_success(
self._execute_k8s_api(
"POST",
deployment_rollback_url,
{
"name": deployment_name,
"rollbackTo": {
# revision=0 makes the deployment rollout to the previous revision
"revision": 0
},
},
api_prefix="apis/extensions/v1beta1",
),
201,
)
def _assert_success(self, response, expected_code=200):
if response.status_code != expected_code:
logger.error(
"Kubernetes API call failed with response: %s => %s",
response.status_code,
response.text,
)
raise K8sApiException("Kubernetes API call failed: %s" % response.text)
def _update_secret_file(self, relative_file_path, value=None):
if "/" in relative_file_path:
raise Exception("Expected path from get_volume_path, but found slashes")
# Check first that the namespace for Red Hat Quay exists. If it does not, report that
# as an error, as it seems to be a common issue.
namespace_url = "namespaces/%s" % (self.kube_config.qe_namespace)
response = self._execute_k8s_api("GET", namespace_url)
if response.status_code // 100 != 2:
msg = (
"A Kubernetes namespace with name `%s` must be created to save config"
% self.kube_config.qe_namespace
)
raise Exception(msg)
# Check if the secret exists. If not, then we create an empty secret and then update the file
# inside.
secret_url = "namespaces/%s/secrets/%s" % (
self.kube_config.qe_namespace,
self.kube_config.qe_config_secret,
)
secret = self._lookup_secret()
if secret is None:
self._assert_success(
self._execute_k8s_api(
"POST",
secret_url,
{
"kind": "Secret",
"apiVersion": "v1",
"metadata": {"name": self.kube_config.qe_config_secret},
"data": {},
},
)
)
# Update the secret to reflect the file change.
secret["data"] = secret.get("data", {})
if value is not None:
secret["data"][relative_file_path] = base64.b64encode(value)
else:
secret["data"].pop(relative_file_path)
self._assert_success(self._execute_k8s_api("PUT", secret_url, secret))
def _lookup_secret(self):
secret_url = "namespaces/%s/secrets/%s" % (
self.kube_config.qe_namespace,
self.kube_config.qe_config_secret,
)
response = self._execute_k8s_api("GET", secret_url)
if response.status_code != 200:
return None
return json.loads(response.text)
def _execute_k8s_api(
self, method, relative_url, data=None, api_prefix="api/v1", content_type="application/json"
):
headers = {"Authorization": "Bearer " + self.kube_config.service_account_token}
if data:
headers["Content-Type"] = content_type
data = json.dumps(data) if data else None
session = Session()
url = "https://%s/%s/%s" % (self.kube_config.api_host, api_prefix, relative_url)
request = Request(method, url, data=data, headers=headers)
return session.send(request.prepare(), verify=False, timeout=2)
| 39.279452 | 137 | 0.580386 |
b6e866f9e91c604ababbde2022abc36a02b652e1 | 10,907 | py | Python | nvchecker/core.py | lilydjwg/nvchecker | 83286263d265835feb2fe4833e1d801c50ba7f6c | [
"MIT"
] | 320 | 2015-01-11T06:58:09.000Z | 2022-03-31T10:26:27.000Z | nvchecker/core.py | lilydjwg/nvchecker | 83286263d265835feb2fe4833e1d801c50ba7f6c | [
"MIT"
] | 142 | 2015-06-28T03:09:56.000Z | 2022-02-28T06:09:26.000Z | nvchecker/core.py | lilydjwg/nvchecker | 83286263d265835feb2fe4833e1d801c50ba7f6c | [
"MIT"
] | 68 | 2015-04-15T05:09:45.000Z | 2022-02-23T05:52:47.000Z | # MIT licensed
# Copyright (c) 2013-2020 lilydjwg <lilydjwg@gmail.com>, et al.
from __future__ import annotations
import os
import sys
import asyncio
from asyncio import Queue
import logging
import argparse
from typing import (
Tuple, NamedTuple, Optional, List, Union,
cast, Dict, Awaitable, Sequence, Any,
)
import types
from pathlib import Path
from importlib import import_module
import re
import contextvars
import json
import structlog
import tomli
import appdirs
from .lib import nicelogger
from . import slogconf
from .util import (
Entry, Entries, KeyManager, RawResult, Result, VersData,
FunctionWorker, GetVersionError,
FileLoadError, EntryWaiter,
)
from . import __version__
from .sortversion import sort_version_keys
from .ctxvars import tries as ctx_tries
from .ctxvars import entry_waiter as ctx_entry_waiter
from . import httpclient
logger = structlog.get_logger(logger_name=__name__)
def get_default_config() -> str:
confdir = appdirs.user_config_dir(appname='nvchecker')
file = os.path.join(confdir, 'nvchecker.toml')
return file
def add_common_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument('-l', '--logging',
choices=('debug', 'info', 'warning', 'error'), default='info',
help='logging level (default: info)')
parser.add_argument('--logger', default='pretty',
choices=['pretty', 'json', 'both'],
help='select which logger to use')
parser.add_argument('--json-log-fd', metavar='FD',
type=lambda fd: os.fdopen(int(fd), mode='w'),
help='specify fd to send json logs to. stdout by default')
parser.add_argument('-V', '--version', action='store_true',
help='show version and exit')
default_config = get_default_config()
parser.add_argument('-c', '--file',
metavar='FILE', type=str,
default=default_config,
help=f'software version configuration file [default: {default_config}]')
def process_common_arguments(args: argparse.Namespace) -> bool:
'''return True if should stop'''
processors = [
slogconf.exc_info,
slogconf.filter_exc,
]
logger_factory = None
if args.logger in ['pretty', 'both']:
slogconf.fix_logging()
nicelogger.enable_pretty_logging(
getattr(logging, args.logging.upper()))
processors.append(slogconf.stdlib_renderer)
if args.logger == 'pretty':
logger_factory=structlog.PrintLoggerFactory(
file=open(os.devnull, 'w'),
)
processors.append(slogconf.null_renderer)
if args.logger in ['json', 'both']:
processors.extend([
structlog.processors.format_exc_info,
slogconf.json_renderer,
])
if logger_factory is None:
logfile = args.json_log_fd or sys.stdout
logger_factory = structlog.PrintLoggerFactory(file=logfile)
structlog.configure(
processors = processors,
logger_factory = logger_factory,
)
if args.version:
progname = os.path.basename(sys.argv[0])
print(f'{progname} v{__version__}')
return True
return False
def safe_overwrite(fname: str, data: Union[bytes, str], *,
method: str = 'write', mode: str = 'w', encoding: Optional[str] = None) -> None:
# FIXME: directory has no read perm
# FIXME: symlinks and hard links
tmpname = fname + '.tmp'
# if not using "with", write can fail without exception
with open(tmpname, mode, encoding=encoding) as f:
getattr(f, method)(data)
# see also: https://thunk.org/tytso/blog/2009/03/15/dont-fear-the-fsync/
f.flush()
os.fsync(f.fileno())
# if the above write failed (because disk is full etc), the old data should be kept
os.rename(tmpname, fname)
def read_verfile(file: Path) -> VersData:
try:
with open(file) as f:
data = f.read()
except FileNotFoundError:
return {}
try:
v = json.loads(data)
except json.decoder.JSONDecodeError:
# old format
v = {}
for l in data.splitlines():
name, ver = l.rstrip().split(None, 1)
v[name] = ver
return v
def write_verfile(file: Path, versions: VersData) -> None:
# sort and indent to make it friendly to human and git
data = json.dumps(
dict(sorted(versions.items())),
indent=2,
ensure_ascii=False,
) + '\n'
safe_overwrite(str(file), data)
class Options(NamedTuple):
ver_files: Optional[Tuple[Path, Path]]
max_concurrency: int
proxy: Optional[str]
keymanager: KeyManager
source_configs: Dict[str, Dict[str, Any]]
httplib: Optional[str]
http_timeout: int
def load_file(
file: str, *,
use_keymanager: bool,
) -> Tuple[Entries, Options]:
try:
with open(file, 'rb') as f:
config = tomli.load(f)
except (OSError, tomli.TOMLDecodeError) as e:
raise FileLoadError('version configuration file', file, e)
ver_files: Optional[Tuple[Path, Path]] = None
keymanager = KeyManager(None)
source_configs = {}
if '__config__' in config:
c = config.pop('__config__')
d = Path(file).parent
if 'oldver' in c and 'newver' in c:
oldver_s = os.path.expandvars(
os.path.expanduser(c.get('oldver')))
oldver = d / oldver_s
newver_s = os.path.expandvars(
os.path.expanduser(c.get('newver')))
newver = d / newver_s
ver_files = oldver, newver
if use_keymanager:
keyfile = c.get('keyfile')
if keyfile:
keyfile_s = os.path.expandvars(
os.path.expanduser(c.get('keyfile')))
keyfile = d / keyfile_s
keymanager = KeyManager(keyfile)
if 'source' in c:
source_configs = c['source']
max_concurrency = c.get('max_concurrency', 20)
proxy = c.get('proxy')
httplib = c.get('httplib', None)
http_timeout = c.get('http_timeout', 20)
else:
max_concurrency = 20
proxy = None
httplib = None
http_timeout = 20
return cast(Entries, config), Options(
ver_files, max_concurrency, proxy, keymanager,
source_configs, httplib, http_timeout,
)
def setup_httpclient(
max_concurrency: int = 20,
httplib: Optional[str] = None,
http_timeout: int = 20,
) -> Dispatcher:
httplib_ = httplib or httpclient.find_best_httplib()
httpclient.setup(
httplib_, max_concurrency, http_timeout)
return Dispatcher()
class Dispatcher:
def dispatch(
self,
entries: Entries,
task_sem: asyncio.Semaphore,
result_q: Queue[RawResult],
keymanager: KeyManager,
entry_waiter: EntryWaiter,
tries: int,
source_configs: Dict[str, Dict[str, Any]],
) -> List[asyncio.Future]:
mods: Dict[str, Tuple[types.ModuleType, List]] = {}
ctx_tries.set(tries)
ctx_entry_waiter.set(entry_waiter)
root_ctx = contextvars.copy_context()
for name, entry in entries.items():
source = entry.get('source', 'none')
if source not in mods:
mod = import_module('nvchecker_source.' + source)
tasks: List[Tuple[str, Entry]] = []
mods[source] = mod, tasks
config = source_configs.get(source)
if config and getattr(mod, 'configure'):
mod.configure(config)
else:
tasks = mods[source][1]
tasks.append((name, entry))
ret = []
for mod, tasks in mods.values():
if hasattr(mod, 'Worker'):
worker_cls = mod.Worker
else:
worker_cls = FunctionWorker
ctx = root_ctx.copy()
worker = ctx.run(
worker_cls,
task_sem, result_q, tasks, keymanager,
)
if worker_cls is FunctionWorker:
func = mod.get_version
ctx.run(worker.initialize, func)
ret.append(ctx.run(worker.run))
return ret
def substitute_version(
version: str, conf: Entry,
) -> str:
'''
Substitute the version string via defined rules in the configuration file.
See README.rst#global-options for details.
'''
prefix = conf.get('prefix')
if prefix:
if version.startswith(prefix):
version = version[len(prefix):]
return version
from_pattern = conf.get('from_pattern')
if from_pattern:
to_pattern = conf.get('to_pattern')
if to_pattern is None:
raise ValueError("from_pattern exists but to_pattern doesn't")
return re.sub(from_pattern, to_pattern, version)
# No substitution rules found. Just return the original version string.
return version
def apply_list_options(
versions: List[str], conf: Entry,
) -> Optional[str]:
pattern = conf.get('include_regex')
if pattern:
re_pat = re.compile(pattern)
versions = [x for x in versions
if re_pat.fullmatch(x)]
pattern = conf.get('exclude_regex')
if pattern:
re_pat = re.compile(pattern)
versions = [x for x in versions
if not re_pat.fullmatch(x)]
ignored = set(conf.get('ignored', '').split())
if ignored:
versions = [x for x in versions if x not in ignored]
if not versions:
return None
sort_version_key = sort_version_keys[
conf.get("sort_version_key", "parse_version")]
versions.sort(key=sort_version_key)
return versions[-1]
def _process_result(r: RawResult) -> Union[Result, Exception]:
version = r.version
conf = r.conf
name = r.name
if isinstance(version, GetVersionError):
kw = version.kwargs
kw['name'] = name
logger.error(version.msg, **kw)
return version
elif isinstance(version, Exception):
logger.error('unexpected error happened',
name=r.name, exc_info=r.version)
return version
elif isinstance(version, list):
version_str = apply_list_options(version, conf)
else:
version_str = version
if version_str:
version_str = version_str.replace('\n', ' ')
try:
version_str = substitute_version(version_str, conf)
return Result(name, version_str, conf)
except (ValueError, re.error) as e:
logger.exception('error occurred in version substitutions', name=name)
return e
else:
return ValueError('no version returned')
def check_version_update(
oldvers: VersData, name: str, version: str,
) -> None:
oldver = oldvers.get(name, None)
if not oldver or oldver != version:
logger.info('updated', name=name, version=version, old_version=oldver)
else:
logger.debug('up-to-date', name=name, version=version)
async def process_result(
oldvers: VersData,
result_q: Queue[RawResult],
entry_waiter: EntryWaiter,
) -> VersData:
ret = {}
try:
while True:
r = await result_q.get()
r1 = _process_result(r)
if isinstance(r1, Exception):
entry_waiter.set_exception(r.name, r1)
continue
check_version_update(oldvers, r1.name, r1.version)
entry_waiter.set_result(r1.name, r1.version)
ret[r1.name] = r1.version
except asyncio.CancelledError:
return ret
async def run_tasks(
futures: Sequence[Awaitable[None]]
) -> None:
for fu in asyncio.as_completed(futures):
await fu
| 28.477807 | 99 | 0.664711 |
6c0e40565de3bd87ef4679c52b53e73e1122df6d | 6,192 | py | Python | src/program_guests.py | AdityaGupta030697/demo_bnb | 8f17b7fef77788b0dec6a782f68e9b2382281acf | [
"MIT"
] | null | null | null | src/program_guests.py | AdityaGupta030697/demo_bnb | 8f17b7fef77788b0dec6a782f68e9b2382281acf | [
"MIT"
] | null | null | null | src/program_guests.py | AdityaGupta030697/demo_bnb | 8f17b7fef77788b0dec6a782f68e9b2382281acf | [
"MIT"
] | null | null | null | from dateutil import parser
from datetime import datetime
from infra.switchlang import switch
import program_hosts as hosts
import infra.state as state
import services.db_services as db_svc
def run():
print(' ****************** Welcome guest ****************\n')
show_commands()
while True:
action = hosts.get_action()
with switch(action) as s:
s.case('c', hosts.create_account)
s.case('l', hosts.log_into_account)
s.case('a', add_guest)
s.case('y', view_guests)
s.case('b', book_room)
s.case('v', view_bookings)
s.case('m', lambda: 'change_mode')
s.case('?', show_commands)
s.case('', lambda: None)
s.case(['x', 'bye', 'exit', 'exit()'], hosts.exit_app)
s.default(hosts.unknown_command)
state.reload_account()
if s.result == 'change_mode':
return
def show_commands():
print('What would you like to do?')
print('[c]reate an account')
print('[l]ogin to your account')
print('[b]ook a room')
print('[a]dd a guest')
print('View [y]our guests')
print('[v]iew your bookings')
print('[m]ain menu')
print('e[x]it app')
print('[?] Help (this info)\n')
def add_guest():
print(' ****************** Add a guest **************** ')
# Require an active account
if not state.active_account:
hosts.error_msg("Please login first to register a guest!")
return
# Get guest info from guest
name = input("Please enter guest name as 'FIRST_NAME LAST_NAME':")
email = input("Please enter guest email id:").lower().strip()
age = int(input("Please enter guest age:"))
phone = input("Please enter guest phone number:")
gender = input("Please enter guest gender:")
# Create the guest in the DB.
guest = db_svc.add_guest(state.active_account, name, email, age, phone,
gender)
state.reload_account()
hosts.success_msg("Added {} {} as a guest".format(guest.first_name,
guest.last_name))
def view_guests():
# Require an active account
if not state.active_account:
hosts.error_msg("Please login first to register a guest!")
return
# Get guests from DB, show details list
guests = db_svc.find_guests_for_user(state.active_account.email)
print(' ****************** {}\'s Guests ****************'.
format(state.active_account.first_name))
for i, guest in enumerate(guests):
print("{}. {} {} is a guest with age {}, email {}, "
"gender {}, and phone {}".format(i+1, guest.first_name,
guest.last_name,
guest.age,
guest.email,
guest.gender,
guest.phone_number))
print(" ****************** END **************** ")
def book_room():
print(' ****************** Book a room **************** ')
# Require an active account
if not state.active_account:
hosts.error_msg("Please login first to register a guest!")
return
guests = db_svc.find_guests_for_user(state.active_account.email)
# Verify they have a guest
if not guests:
hosts.error_msg("Please add a guest first!")
return
print("Lets start finding rooms..")
# Get dates and select guest
start_date = input("Enter Check in date [YYYY-MM-DD]: ").strip()
if not start_date:
hosts.error_msg("Cancelled!")
return
start_date = parser.parse(start_date)
end_date = parser.parse(input("Enter Check out date [YYYY-MM-DD]: "))
if start_date >= end_date:
hosts.error_msg("Check in can't be on/after Checkout date")
return
print("Please choose available guest from the list: ")
view_guests()
guest_no = int(input("Chosen Guest no?: ").strip())
guest = guests[guest_no-1]
# Find rooms available across date range
allow_pets = bool(input("Does this guest has pet(s)? [y/n]: ")
.strip().startswith('y'))
rooms = db_svc.get_available_rooms(start_date, end_date, allow_pets)
if not rooms:
hosts.error_msg("Sorry, there are no rooms available for that date!")
return
print("You have {} rooms.".format(len(rooms)))
for idx, room in enumerate(rooms):
print("{} Room {}, {} type is priced at Rs.{} with pets {}\n"
.format(idx+1, room.number, room.rtype, room.price,
"allowed" if room.allow_pets else "not allowed"))
for b in room.bookings:
print(' * Booking: {}, {} days, booked? {}'.format(
b.check_in_date,
(b.check_out_date - b.check_in_date).days,
'YES' if b.booked_date is not None else 'no'
))
# Let user select room to book.
selected_room = rooms[int(input("Pick a room: "))-1]
db_svc.book_room(state.active_account, guest, selected_room,
start_date, end_date)
hosts.success_msg("Room {} booked successfully at Rs.{}/night!"
.format(selected_room.number, selected_room.price))
def view_bookings():
print(' ****************** Your bookings **************** ')
# Require an active account
if not state.active_account:
hosts.error_msg("Please login first to register a guest!")
return
guests = {g.id: g for g in
db_svc.find_guests_for_user(state.active_account.email)}
bookings = db_svc.get_bookings_for_user(state.active_account.email)
print("You have {} bookings.".format(len(bookings)))
for b in bookings:
# noinspection PyUnresolvedReferences
print(' * Guest: {} {} is booked at {} from {} for {} days.'.format(
guests.get(b.guest_id).first_name,
guests.get(b.guest_id).last_name,
b.room.number,
b.check_in_date.date(),
b.duration_in_days
))
| 34.983051 | 77 | 0.561854 |
1d2692991c57d4a389dcd889b349b0b01ff86806 | 7,723 | py | Python | pyhpecw7/features/errors.py | Sumico/pyhpecw7 | d71244e8372536eb091c1af199ad2b925d3d82e1 | [
"Apache-2.0"
] | 46 | 2016-08-23T11:49:45.000Z | 2021-08-23T18:01:39.000Z | pyhpecw7/features/errors.py | Sumico/pyhpecw7 | d71244e8372536eb091c1af199ad2b925d3d82e1 | [
"Apache-2.0"
] | 20 | 2016-07-08T17:34:04.000Z | 2021-05-28T08:37:54.000Z | pyhpecw7/features/errors.py | Sumico/pyhpecw7 | d71244e8372536eb091c1af199ad2b925d3d82e1 | [
"Apache-2.0"
] | 29 | 2016-06-10T14:55:55.000Z | 2021-03-31T08:10:22.000Z | """Feature-specific errors.
"""
from pyhpecw7.errors import PYHPError
class FeatureError(PYHPError):
def __init__(self):
pass
class LengthOfStringError(FeatureError):
def __init__(self, param_name):
# passing in the name of the variable rather than the actual string
# but feel free to pass in the value instead if you want!
self.param_name = param_name
def __repr__(self):
errstr = 'Maximum string length of exceeded for {0}'.format(
self.param_name)
return errstr
__str__ = __repr__
class InvalidIPAddress(FeatureError):
def __init__(self, ipaddr):
self.ipaddr = ipaddr
def __repr__(self):
errstr = 'Invalid IPv4 or IPv6 Address: {0}'.format(
self.ipaddr)
return errstr
__str__ = __repr__
##################################
# INTERFACE ERRORS #
##################################
class InterfaceError(FeatureError):
def __init__(self, if_name):
self.if_name = if_name
class InterfaceTypeError(InterfaceError):
def __init__(self, if_name, if_types=None):
super(InterfaceTypeError, self).__init__(if_name)
self.if_types = if_types
def __repr__(self):
errstr = '{0} is not a valid interface type.'.format(self.if_name)
if self.if_types:
errstr += ' Type must be one of {0}'.format(self.if_types)
return errstr
__str__ = __repr__
class InterfaceAbsentError(InterfaceError):
def __init__(self, if_name):
super(InterfaceAbsentError, self).__init__(if_name)
def __repr__(self):
return 'Interface {0} not found on the device.'.format(self.if_name)
__str__ = __repr__
class InterfaceParamsError(InterfaceError):
def __init__(self, if_name, params):
super(InterfaceParamsError, self).__init__(if_name)
self.params = params
def __repr__(self):
return 'Interface {0} does not take parameters {1}.'.format(
self.if_name, self.params)
__str__ = __repr__
class InterfaceCreateError(InterfaceError):
def __init__(self, if_name):
super(InterfaceCreateError, self).__init__(if_name)
def __repr__(self):
return 'Interface {0} could not be created.'.format(self.if_name)
__str__ = __repr__
class InterfaceRemoveError(InterfaceError):
def __init__(self, if_name):
super(InterfaceRemoveError, self).__init__(if_name)
def __repr__(self):
return 'Interface {0} could not be removed.'.format(self.if_name)
__str__ = __repr__
class InterfaceVlanMustExist(InterfaceError):
def __init__(self, if_name, number):
super(InterfaceVlanMustExist, self).__init__(if_name)
self.number = number
def __repr__(self):
return 'Vlan {0} must exist before interface can be created.'.format(
self.number)
__str__ = __repr__
######################
# IPINTERFACE ERRORS #
######################
class IpInterfaceError(FeatureError):
pass
class IpIfaceMissingData(IpInterfaceError):
def __init__(self):
super(IpIfaceMissingData, self).__init__()
def __repr__(self):
return 'IP address and mask must be supplied'
__str__ = __repr__
##################################
# VLAN ERRORS #
##################################
class VlanError(FeatureError):
pass
class VlanIDError(VlanError):
def __repr__(self):
errstr = 'VLAN ID must be between 1-4094'
return errstr
__str__ = __repr__
##################################
# REBOOT ERRORS #
##################################
class RebootError(FeatureError):
pass
class RebootTimeError(RebootError):
def __repr__(self):
errstr = 'Format for time must be HH:MM'
return errstr
__str__ = __repr__
class RebootDateError(RebootError):
def __repr__(self):
errstr = 'Format for the date must be MM/DD/YYYY'
return errstr
__str__ = __repr__
##################################
# PORTCHANNEL ERRORS #
##################################
class PortChannelError(FeatureError):
def __init__(self):
pass
class InvalidPortType(PortChannelError):
def __init__(self, if_name, config_type, pc_type):
self.if_name = if_name
self.config_type = config_type
self.pc_type = pc_type
def __repr__(self):
errstr = ('Proposed port-channel type of "{0}" '.format(self.pc_type)
+ '\ndoes not match existing physical interface '
+ '\nof port type "{0}" '.format(self.config_type)
+ 'on interface: "{0}"'.format(self.if_name))
return errstr
__str__ = __repr__
class AggregationGroupError(PortChannelError):
def __init__(self, if_name):
self.if_name = if_name
def __repr__(self):
errstr = ('interface {0}'.format(self.if_name)
+ ' is assigned \nto another aggregation group.'
+ 'It needs to be \nremoved first.')
return errstr
__str__ = __repr__
##################################
# FILE COPY ERRORS #
##################################
class FileError(FeatureError):
def __init__(self, src=None, dst=None):
self.src = src
self.dst = dst
class FileNotReadableError(FileError):
def __repr__(self):
return '{0} doesn\'t exist, or isn\'t readable.'.format(self.src)
__str__ = __repr__
class FileNotEnoughSpaceError(FileError):
def __init__(self, src, file_size, flash_size):
super(FileNotEnoughSpaceError, self).__init__(src)
self.file_size = file_size
self.flash_size = flash_size
def __repr__(self):
return 'Not enough space on remote device for {0}.\n'.format(self.src) +\
'File Size: {0} bytes\n'.format(self.file_size) +\
'Space Available: {0} bytes\n'.format(self.flash_size)
__str__ = __repr__
class FileTransferError(FileError):
def __repr__(self):
return 'There was an error while the file was in transit.'
__str__ = __repr__
class FileHashMismatchError(FileError):
def __init__(self, src, dst, src_hash, dst_hash):
super(FileHashMismatchError, self).__init__(src, dst)
self.src_hash = src_hash
self.dst_hash = dst_hash
def __repr__(self):
return 'The MD5 hash digests do not match.\n' +\
'The hash of the source {0} was {1}.\n'.format(self.src, self.src_hash) +\
'The hash of the destinatino {0} was {1}.\n'.format(self.dst, self.dst_hash)
__str__ = __repr__
class FileRemoteDirDoesNotExist(FileError):
def __init__(self, remote_dir):
self.remote_dir = remote_dir
def __repr__(self):
return 'The remote directory {0}'.format(self.remote_dir) +\
' does not exist.'
__str__ = __repr__
##################################
# Config Errors #
##################################
class ConfigError(FeatureError):
pass
class InvalidConfigFile(ConfigError):
def __repr__(self):
errstr = ('Config replace operation failed.\n' +
' Validate the config file being applied.')
return errstr
__str__ = __repr__
##################################
# IRF Errors #
##################################
class IRFError(FeatureError):
pass
class IRFMemberDoesntExistError(IRFError):
def __init__(self, member_id):
self.member_id = member_id
def __repr__(self):
return 'The IRF member {0}'.format(self.member_id) +\
' does not exist.'
__str__ = __repr__
| 24.134375 | 88 | 0.600026 |
194c1270ccee7051cf0d8c4a19c06c063871f250 | 808 | py | Python | text_renderer/text_renderer/effect/dropout_vertical.py | java-abhinav07/formreader | 63aa695eb4b8547bc56c6f070dd8d1fadbdf29b1 | [
"RSA-MD"
] | null | null | null | text_renderer/text_renderer/effect/dropout_vertical.py | java-abhinav07/formreader | 63aa695eb4b8547bc56c6f070dd8d1fadbdf29b1 | [
"RSA-MD"
] | null | null | null | text_renderer/text_renderer/effect/dropout_vertical.py | java-abhinav07/formreader | 63aa695eb4b8547bc56c6f070dd8d1fadbdf29b1 | [
"RSA-MD"
] | null | null | null | import random
from typing import Tuple
from text_renderer.utils.bbox import BBox
from text_renderer.utils.types import PILImage
from .base_effect import Effect
class DropoutVertical(Effect):
def __init__(self, p=0.5, num_line=10):
"""
Parameters
----------
p : float
Probability of apply this effect
num_line : int
Number of vertical dropout lines
"""
super().__init__(p)
self.num_line = num_line
def apply(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
pim = img.load()
for _ in range(self.num_line):
col = random.randint(1, img.width - 1)
for row in range(img.height):
self.rand_pick(pim, col, row)
return img, text_bbox
| 24.484848 | 77 | 0.596535 |
d11585ac77969c61eb390c650985ec341dac781f | 12,224 | py | Python | spec2model/mapping.py | vsoch/map2model | 4d5c187ab660da5d3fe088b49f92c298b9fd9a7b | [
"MIT"
] | null | null | null | spec2model/mapping.py | vsoch/map2model | 4d5c187ab660da5d3fe088b49f92c298b9fd9a7b | [
"MIT"
] | null | null | null | spec2model/mapping.py | vsoch/map2model | 4d5c187ab660da5d3fe088b49f92c298b9fd9a7b | [
"MIT"
] | null | null | null | import requests
import gspread
from pydrive.auth import GoogleAuth
from rdflib import ConjunctiveGraph
def __get_class_name(temp_uri):
return temp_uri.replace("http://schema.org/","")
def __add_property(props_dic, prop_desc):
sdo_uri="http://schema.org/"
if prop_desc['prop_name'] in props_dic:
t_prop_name = prop_desc['prop_name']
props_dic[t_prop_name]['exp_type'].append(prop_desc['exp_type'].replace(sdo_uri,""))
else:
props_dic[prop_desc['prop_name']]=prop_desc
props_dic[prop_desc['prop_name']]['exp_type'] = [prop_desc['exp_type'].replace(sdo_uri,"")]
return props_dic
def __get_class_props(class_name, graph):
print("Quering properties of %s in Schema.org" % class_name)
qres = graph.query("""prefix schema: <http://schema.org/>
select distinct * where {
?property schema:domainIncludes schema:%s .
?property schema:rangeIncludes ?exp_type .
?property rdfs:label ?prop_name.
?property rdfs:comment ?description
}""" % class_name)
temp_dic = {}
for row in qres:
labels=row.labels.keys()
labels_dic = {}
print('Parsing %s property.' % row['prop_name'])
for label in labels:
labels_dic[label] = str(row[label]).replace('<a href=\"/docs/', '<a href=\"http://schema.org/docs/')
temp_dic=__add_property(temp_dic, labels_dic)
return temp_dic
def __get_parent_type(class_name, graph):
print("Find parent type of %s in Schema.org" % class_name)
qres = graph.query("""prefix schema: <http://schema.org/>
select ?supclass where{
?class rdfs:label ?label .
?class rdfs:subClassOf ?supclass .
filter (?label='%s')
}""" % class_name)
resp_arr=[]
for row in qres:
resp_arr.append(str(row['supclass']))
return resp_arr[0].replace('http://schema.org/', '')
def __get_properties(class_name, graph, properties):
if(class_name=='Thing'):
properties[class_name]=__get_class_props(class_name, graph)
return properties
else:
temp_props = __get_class_props(class_name, graph)
properties[class_name] = temp_props
parent_type = __get_parent_type(class_name, graph)
__get_properties(parent_type, graph, properties)
def get_properties_in_hierarchy(type_name):
query_type = type_name
g = ConjunctiveGraph()
g.parse('http://schema.org/version/latest/schema.jsonld', format='json-ld')
props_dic={}
__get_properties(query_type, g, props_dic)
return props_dic
def get_hierarchy(props_dic):
type_hierarchy = []
for h_type in props_dic:
type_hierarchy.append(h_type)
return type_hierarchy
# Function that receives an string with expected types and generates an array with each expected pype
def get_expected_type(expected_types):
expected_types = expected_types.strip()
expected_types = expected_types.replace('\n', '')
expected_types = expected_types.replace(' OR ', ' ')
expected_types = expected_types.replace(' or ', ' ')
expected_types = expected_types.replace(',', '')
list_of_types = expected_types.split(" ")
i = 0
for type in list_of_types:
list_of_types[i] = type.strip()
i += 1
return list_of_types
def _parse_controlled_vocabulary(temp_cont_vocab):
cv_parsed = {'terms':[] , 'ontologies':[]}
element_list = temp_cont_vocab.split(',')
for element in element_list:
if ':' in element:
temp_onto = element.split(":",1)
ontology = {}
ontology['name'] = temp_onto[0].strip()
ontology['url'] = temp_onto[1].strip()
cv_parsed['ontologies'].append(ontology)
elif element != '':
element = element.replace('LIST - ', '').strip()
temp_term = {}
temp_term['name'] = element
cv_parsed['terms'].append(temp_term)
return cv_parsed
def __get_dic_from_sheet_row(c_property):
property_as_dic = {}
# Set Bioschemas attributes
property_as_dic['bsc_dec'] = c_property['BSC Description'].strip().replace('\n', ' ')
property_as_dic['marginality'] = c_property['Marginality'].replace('\n', ' ')
property_as_dic['cardinality'] = c_property['Cardinality'].strip().strip('\n').replace('\n', ' ')
temp_cont_vocab = c_property['Controlled Vocabulary'].strip().replace('\n', ' ')
property_as_dic['controlled_vocab'] = _parse_controlled_vocabulary(temp_cont_vocab)
# Set schema.org attributes
property_as_dic['name'] = c_property['Property'].strip().strip('\n')
property_as_dic['expected_type'] = get_expected_type(c_property['Expected Type'])
property_as_dic['sdo_desc'] = c_property['Description'].strip().replace('\n', ' ')
print (property_as_dic['name'] + ':' + property_as_dic['sdo_desc'] +'\n')
if property_as_dic['sdo_desc'] is None:
property_as_dic['sdo_desc'] = ' ';
return property_as_dic
def get_property_in_hierarchy(sdo_props, mapping_property):
prop_type="new_sdo"
for hierarchy_level in sdo_props:
if mapping_property['name'] in sdo_props[hierarchy_level].keys():
prop_type = hierarchy_level
mapping_property['sdo_desc']=sdo_props[hierarchy_level][mapping_property['name']]['description']
return {'type':prop_type, 'property': mapping_property}
def get_formatted_props(sdo_props, mapping_props, spec_name, spec_type):
all_props= []
bsc_props = []
# if type only get new properties from mapping file
if(spec_type == "Type" or spec_type == "type"):
for mapping_property in mapping_props:
bsc_props.append(mapping_property['name'])
temp_prop=get_property_in_hierarchy(sdo_props, mapping_property)
if temp_prop['type'] == "new_sdo":
temp_prop['property']['parent'] = spec_name
all_props.append(temp_prop['property'])
for sdo_prop in sdo_props:
# now get all props from schema & make them such that _layout can use them
for sdo_prop_prop in sdo_props[sdo_prop].keys():
if sdo_props[sdo_prop][sdo_prop_prop]['prop_name'] not in bsc_props:
sdo_props[sdo_prop][sdo_prop_prop]['parent'] = sdo_prop
sdo_props[sdo_prop][sdo_prop_prop]['name'] = sdo_props[sdo_prop][sdo_prop_prop]['prop_name']
# sdo_props[sdo_prop][sdo_prop_prop]['bsc_dec'] = sdo_props[sdo_prop][sdo_prop_prop]['description']
sdo_props[sdo_prop][sdo_prop_prop]['sdo_desc'] = sdo_props[sdo_prop][sdo_prop_prop]['description']
sdo_props[sdo_prop][sdo_prop_prop]['expected_type'] = sdo_props[sdo_prop][sdo_prop_prop]['exp_type']
all_props.append(sdo_props[sdo_prop][sdo_prop_prop])
else:
for i in all_props:
if i['name'] == sdo_props[sdo_prop][sdo_prop_prop]['prop_name']:
i['parent'] = sdo_prop
return {'properties': all_props}
# if profile
for mapping_property in mapping_props:
temp_prop=get_property_in_hierarchy(sdo_props, mapping_property)
if temp_prop['type'] == "new_sdo":
temp_prop['property']['parent'] = spec_name
else:
temp_prop['property']['parent'] = temp_prop['type']
all_props.append(temp_prop['property'])
return {'properties': all_props}
def get_mapping_properties(mapping_sheet, spec_type):
list_of_hashes = mapping_sheet.get_all_records(head=5)
type_properties = []
for c_property in list_of_hashes:
if(c_property['Expected Type']!="" # and c_property['Description']!=""
and c_property['Marginality']!="" and c_property['Cardinality']!=""):
print("Parsing %s property from Google Sheets." % c_property['Property'])
property_as_dic=__get_dic_from_sheet_row(c_property)
type_properties.append(property_as_dic)
return type_properties
class GSheetsParser:
gsheet_id = ''
cred_file = ''
gauth = "This variable will have the Google Authorization file"
scope = []
spec_metadata={}
bsc_specification = {}
def __init__(self):
self.gsheet_id = '1h0-fgqnRe25-tVCmu2yWNQjthLzgkW4a1TVNMpCABlc'
#self.cred_file = 'client_secrets.json'
#self.scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
self.spec_metadata={}
self.bsc_specification = {}
creds_path="spec2model/mycreds.txt"
self.gauth = GoogleAuth()
# Try to load saved client credentials
self.gauth.LoadCredentialsFile(creds_path)
if self.gauth.credentials is None:
# Authenticate if they're not there
self.gauth.LocalWebserverAuth()
elif self.gauth.access_token_expired:
# Refresh them if expired
self.gauth.Refresh()
else:
# Initialize the saved creds
self.gauth.Authorize()
# Save the current credentials to a file
self.gauth.SaveCredentialsFile(creds_path)
def set_gsheet_id(self, gsheet_id):
self.gsheet_id = gsheet_id
def set_spec_metadata(self, spec_metadata):
self.spec_metadata=spec_metadata
def check_url(self, spec_url):
if spec_url==None: return "err_404"
r=requests.get(spec_url)
if r==404:
return "err_404"
else:
return spec_url
def __get_mapping_description(self, mapping_sheet):
mapping_description = {}
mapping_description['name']=self.spec_metadata['name']
print("Parsing %s Google Sheet" % mapping_description['name'])
mapping_description['g_mapping_file']=self.spec_metadata['g_mapping_file']
mapping_description['spec_mapping_url']=self.spec_metadata['spec_mapping_url']
mapping_description['status']=self.spec_metadata['status']
mapping_description['spec_type']=self.spec_metadata['spec_type']
mapping_description['gh_folder']='https://github.com/BioSchemas/specifications/tree/master/'+self.spec_metadata['name']
mapping_description['gh_examples']='https://github.com/BioSchemas/specifications/tree/master/'+self.spec_metadata['name']+'/examples'
mapping_description['gh_tasks']='https://github.com/BioSchemas/bioschemas/labels/type%3A%20'+self.spec_metadata['name']
mapping_description['edit_url']='https://github.com/BioSchemas/specifications/tree/master/'+self.spec_metadata['name']+'/specification.html'
mapping_description['use_cases_url']=self.check_url(self.spec_metadata['use_cases_url'])
mapping_description['version']=self.spec_metadata['version']
mapping_description['subtitle'] = mapping_sheet.acell('B1').value
mapping_description['description'] = mapping_sheet.acell('B2').value
mapping_description['parent_type'] = mapping_sheet.acell('A6').value[8:].strip()
return mapping_description
def get_mapping_g_sheets(self):
client = gspread.authorize(self.gauth.credentials)
print("Parsing %s file." % self.spec_metadata['g_mapping_file'])
mapping_sheet = client.open_by_key(self.gsheet_id).get_worksheet(0)
spec_description = self.__get_mapping_description(mapping_sheet)
sdo_props = get_properties_in_hierarchy(spec_description['parent_type'])
spec_description ['hierarchy']= get_hierarchy(sdo_props)
print("Prepared schema.org properties for hierarchy %s" % str(spec_description ['hierarchy']))
print("Classifing %s properties" % spec_description['name'])
mapping_props = get_mapping_properties(mapping_sheet, spec_description['spec_type'])
formatted_props = get_formatted_props(sdo_props, mapping_props, spec_description['name'], spec_description['spec_type'])
spec_description.update(formatted_props)
return spec_description
| 41.297297 | 148 | 0.656577 |
1d4fdafe7737d3cb10c595905b704ec66acfbbee | 544 | py | Python | app/recipe/tests/test_tag_api.py | Aalekh-Sajonia/recipe-app-api | f5016354569358667048540a39f5338d686a52da | [
"MIT"
] | null | null | null | app/recipe/tests/test_tag_api.py | Aalekh-Sajonia/recipe-app-api | f5016354569358667048540a39f5338d686a52da | [
"MIT"
] | null | null | null | app/recipe/tests/test_tag_api.py | Aalekh-Sajonia/recipe-app-api | f5016354569358667048540a39f5338d686a52da | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
| 24.727273 | 71 | 0.775735 |
312409e82230206e2a67b0f0a0a285323e104bdf | 13,338 | py | Python | onnxruntime/python/tools/tensorrt/perf/post.py | jamill/onnxruntime | 0565fecf46c4dd711c01a4106641946963bf7ff0 | [
"MIT"
] | 669 | 2018-12-03T22:00:31.000Z | 2019-05-06T19:42:49.000Z | onnxruntime/python/tools/tensorrt/perf/post.py | jamill/onnxruntime | 0565fecf46c4dd711c01a4106641946963bf7ff0 | [
"MIT"
] | 440 | 2018-12-03T21:09:56.000Z | 2019-05-06T20:47:23.000Z | onnxruntime/python/tools/tensorrt/perf/post.py | jamill/onnxruntime | 0565fecf46c4dd711c01a4106641946963bf7ff0 | [
"MIT"
] | 140 | 2018-12-03T21:15:28.000Z | 2019-05-06T18:02:36.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
import argparse
import datetime
import os
import sys
import pandas as pd
from azure.kusto.data import KustoConnectionStringBuilder
from azure.kusto.data.data_format import DataFormat
from azure.kusto.ingest import IngestionProperties, QueuedIngestClient, ReportLevel
from perf_utils import (
avg_ending,
cpu,
cuda,
cuda_fp16,
fail_name,
group_title,
latency_name,
latency_over_time_name,
memory_ending,
memory_name,
model_title,
ort_provider_list,
provider_list,
second,
session_name,
specs_name,
standalone_trt,
standalone_trt_fp16,
status_name,
table_headers,
trt,
trt_fp16,
)
# database connection strings
CLUSTER_INGEST = "https://ingest-onnxruntimedashboarddb.southcentralus.kusto.windows.net"
DATABASE_NAME = "ep_perf_dashboard"
def parse_arguments():
"""
Parses command-line arguments and returns an object with each argument as a field.
:return: An object whose fields represent the parsed command-line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--report_folder", help="Path to the local file report", required=True)
parser.add_argument("-c", "--commit_hash", help="Commit hash", required=True)
parser.add_argument("-u", "--report_url", help="Report Url", required=True)
parser.add_argument("-t", "--trt_version", help="Tensorrt Version", required=True)
parser.add_argument("-b", "--branch", help="Branch", required=True)
parser.add_argument(
"-d",
"--commit_datetime",
help="Commit datetime in Python's datetime ISO 8601 format",
required=True,
type=datetime.datetime.fromisoformat,
)
return parser.parse_args()
def adjust_columns(table, columns, db_columns, model_group):
"""
Utility function that replaces column names in an in-memory table with the appropriate database column names.
Additionly, this function adds a model group column to all rows in the table.
:param table: The Pandas table to adjust.
:param columns: A list of existing column names to rename.
:param db_columns: A list of databse columns names to use.
:param model_group: The model group to append as a column.
:return: The updated table.
"""
table = table[columns]
table = table.set_axis(db_columns, axis=1)
table = table.assign(Group=model_group)
return table
def get_latency_over_time(commit_hash, report_url, branch, latency_table):
"""
Returns a new Pandas table with data that tracks the latency of model/EP inference runs over time.
:param commit_hash: The short git commit hash corresponding to the version of ORT used to gather latency data.
:param report_url: The URL of the Azure pipeline run/report which produced this latency data.
:param branch: The name of the git branch corresponding to the version of ORT used to gather latency data.
:param latency_table: The Pandas table containing raw "latency over time" data imported from a CSV file.
:return: The updated table.
"""
over_time = latency_table
over_time = over_time.melt(id_vars=[model_title, group_title], var_name="Ep", value_name="Latency")
over_time = over_time.assign(CommitId=commit_hash)
over_time = over_time.assign(ReportUrl=report_url)
over_time = over_time.assign(Branch=branch)
over_time = over_time[
[
"CommitId",
model_title,
"Ep",
"Latency",
"ReportUrl",
group_title,
"Branch",
]
]
over_time.fillna("", inplace=True)
return over_time
def get_failures(fail, model_group):
"""
Returns a new Pandas table with data that tracks failed model/EP inference runs.
:param fail: The Pandas table containing raw failure data imported from a CSV file.
:param model_group: The model group namespace to append as a column.
:return: The updated table.
"""
fail_columns = fail.keys()
fail_db_columns = [model_title, "Ep", "ErrorType", "ErrorMessage"]
fail = adjust_columns(fail, fail_columns, fail_db_columns, model_group)
return fail
def get_memory(memory, model_group):
"""
Returns a new Pandas table with data that tracks peak memory usage per model/EP.
:param memory: The Pandas table containing raw memory usage data imported from a CSV file.
:param model_group: The model group namespace to append as a column.
:return: The updated table.
"""
memory_columns = [model_title]
for provider in provider_list:
if cpu not in provider:
memory_columns.append(provider + memory_ending)
memory_db_columns = [
model_title,
cuda,
trt,
standalone_trt,
cuda_fp16,
trt_fp16,
standalone_trt_fp16,
]
memory = adjust_columns(memory, memory_columns, memory_db_columns, model_group)
return memory
def get_latency(latency, model_group):
"""
Returns a new Pandas table with data that tracks inference run latency per model/EP.
:param latency: The Pandas table containing raw latency data imported from a CSV file.
:param model_group: The model group namespace to append as a column.
:return: The updated table.
"""
latency_columns = [model_title]
for provider in provider_list:
latency_columns.append(provider + avg_ending)
latency_db_columns = table_headers
latency = adjust_columns(latency, latency_columns, latency_db_columns, model_group)
return latency
def get_status(status, model_group):
"""
Returns a new Pandas table with data that tracks whether an EP can successfully run a particular model.
:param status: The Pandas table containing raw model/EP status data imported from a CSV file.
:param model_group: The model group namespace to append as a column.
:return: The updated table.
"""
status_columns = status.keys()
status_db_columns = table_headers
status = adjust_columns(status, status_columns, status_db_columns, model_group)
return status
def get_specs(specs, branch, commit_hash, commit_datetime):
"""
Returns a new Pandas table with data that tracks the configuration/specs/versions of the hardware and software
used to gather benchmarking data.
:param specs: The Pandas table containing raw specs data imported from a CSV file.
:param branch: The name of the git branch corresponding to the version of ORT used to gather data.
:param commit_hash: The short git commit hash corresponding to the version of ORT used to gather data.
:param commit_datetime: The git commit datetime corresponding to the version of ORT used to gather data.
:return: The updated table.
"""
init_id = int(specs.tail(1).get(".", 0)) + 1
specs_additional = pd.DataFrame(
{
".": [init_id, init_id + 1, init_id + 2],
"Spec": ["Branch", "CommitId", "CommitTime"],
"Version": [branch, commit_hash, str(commit_datetime)],
}
)
return pd.concat([specs, specs_additional], ignore_index=True)
def get_session(session, model_group):
"""
Returns a new Pandas table with data that tracks the ORT session creation time for each model/EP combination.
:param session: The Pandas table containing raw model/EP session timing data imported from a CSV file.
:param model_group: The model group namespace to append as a column.
:return: The updated table.
"""
session_columns = session.keys()
session_db_columns = [model_title] + ort_provider_list + [p + second for p in ort_provider_list]
session = adjust_columns(session, session_columns, session_db_columns, model_group)
return session
def write_table(ingest_client, table, table_name, upload_time, identifier):
"""
Uploads the provided table to the database. This function also appends the upload time and unique run identifier
to the table.
:param ingest_client: An instance of QueuedIngestClient used to initiate data ingestion.
:param table: The Pandas table to ingest.
:param table_name: The name of the table in the database.
:param upload_time: A datetime object denoting the data's upload time.
:param identifier: An identifier that associates the uploaded data with an ORT commit/date/branch.
"""
if table.empty:
return
# Add upload time and identifier columns to data table.
table = table.assign(UploadTime=str(upload_time))
table = table.assign(Identifier=identifier)
ingestion_props = IngestionProperties(
database=DATABASE_NAME,
table=table_name,
data_format=DataFormat.CSV,
report_level=ReportLevel.FailuresAndSuccesses,
)
# append rows
ingest_client.ingest_from_dataframe(table, ingestion_properties=ingestion_props)
def get_identifier(commit_datetime, commit_hash, trt_version, branch):
"""
Returns an identifier that associates uploaded data with an ORT commit/date/branch and a TensorRT version.
:param commit_datetime: The datetime of the ORT commit used to run the benchmarks.
:param commit_hash: The hash of the ORT commit used to run the benchmarks.
:param trt_version: The TensorRT version used to run the benchmarks.
:param branch: The name of the ORT branch used to run the benchmarks.
:return: A string identifier.
"""
date = str(commit_datetime.date()) # extract date only
return date + "_" + commit_hash + "_" + trt_version + "_" + branch
def main():
"""
Entry point of this script. Uploads data produced by benchmarking scripts to the database.
"""
args = parse_arguments()
# connect to database
kcsb_ingest = KustoConnectionStringBuilder.with_az_cli_authentication(CLUSTER_INGEST)
ingest_client = QueuedIngestClient(kcsb_ingest)
identifier = get_identifier(args.commit_datetime, args.commit_hash, args.trt_version, args.branch)
upload_time = datetime.datetime.now(tz=datetime.timezone.utc).replace(microsecond=0)
try:
result_file = args.report_folder
folders = os.listdir(result_file)
os.chdir(result_file)
tables = [
fail_name,
memory_name,
latency_name,
status_name,
latency_over_time_name,
specs_name,
session_name,
]
table_results = {}
for table_name in tables:
table_results[table_name] = pd.DataFrame()
for model_group in folders:
os.chdir(model_group)
csv_filenames = os.listdir()
for csv in csv_filenames:
table = pd.read_csv(csv)
if session_name in csv:
table_results[session_name] = table_results[session_name].append(
get_session(table, model_group), ignore_index=True
)
elif specs_name in csv:
table_results[specs_name] = table_results[specs_name].append(
get_specs(table, args.branch, args.commit_hash, args.commit_datetime),
ignore_index=True,
)
elif fail_name in csv:
table_results[fail_name] = table_results[fail_name].append(
get_failures(table, model_group), ignore_index=True
)
elif latency_name in csv:
table_results[memory_name] = table_results[memory_name].append(
get_memory(table, model_group), ignore_index=True
)
table_results[latency_name] = table_results[latency_name].append(
get_latency(table, model_group), ignore_index=True
)
if not table_results[latency_name].empty:
table_results[latency_over_time_name] = table_results[latency_over_time_name].append(
get_latency_over_time(
args.commit_hash,
args.report_url,
args.branch,
table_results[latency_name],
),
ignore_index=True,
)
elif status_name in csv:
table_results[status_name] = table_results[status_name].append(
get_status(table, model_group), ignore_index=True
)
os.chdir(result_file)
for table in tables:
print("writing " + table + " to database")
db_table_name = "ep_model_" + table
write_table(
ingest_client,
table_results[table],
db_table_name,
upload_time,
identifier,
)
except BaseException as e:
print(str(e))
sys.exit(1)
if __name__ == "__main__":
main()
| 35.951482 | 116 | 0.654971 |
01273ce11f1d01e1d456726e422ff7f8be6567ad | 220 | py | Python | dmriprep/config/__init__.py | dPys/dmriprep-1 | 7e194c35e9d0435b51b355c5234a73876664b862 | [
"Apache-2.0"
] | null | null | null | dmriprep/config/__init__.py | dPys/dmriprep-1 | 7e194c35e9d0435b51b355c5234a73876664b862 | [
"Apache-2.0"
] | null | null | null | dmriprep/config/__init__.py | dPys/dmriprep-1 | 7e194c35e9d0435b51b355c5234a73876664b862 | [
"Apache-2.0"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Settings."""
DEFAULT_MEMORY_MIN_GB = 0.01
NONSTANDARD_REFERENCES = ['anat', 'T1w', 'dwi', 'fsnative']
| 31.428571 | 73 | 0.654545 |
abe29e6ae18d9e756a01bef512c784f3d5093b0f | 960 | py | Python | sunds/utils/py_utils_test.py | google-research/sunds | 751d31cc4017ebf01d79b4f558e4cc046f76d040 | [
"Apache-2.0"
] | 10 | 2021-07-30T07:55:18.000Z | 2022-03-25T12:37:59.000Z | sunds/utils/py_utils_test.py | google-research/sunds | 751d31cc4017ebf01d79b4f558e4cc046f76d040 | [
"Apache-2.0"
] | 3 | 2021-07-30T10:20:02.000Z | 2021-09-02T09:28:41.000Z | sunds/utils/py_utils_test.py | google-research/sunds | 751d31cc4017ebf01d79b4f558e4cc046f76d040 | [
"Apache-2.0"
] | 3 | 2021-11-05T14:52:11.000Z | 2022-02-05T23:48:58.000Z | # Copyright 2021 The sunds Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for py_utils."""
from sunds import utils
def test_map_fn():
@utils.map_fn
def add_prefix(val, *, prefix):
return prefix + val
# Function can still be applied individually
assert add_prefix('abc', prefix='_') == '_abc'
# Or within a map
assert list(map(add_prefix(prefix='_'), ['a', 'b'])) == ['_a', '_b'] # pylint: disable=no-value-for-parameter
| 32 | 112 | 0.717708 |
2557017b8f5c9e2995e1023fe6ca2617c290a403 | 47,585 | py | Python | ant/Main_dr.py | Currycurrycurry/Time-in-State-RL | f106d92c3556b955c108bf3e147bb0b12e60259c | [
"BSD-3-Clause"
] | 14 | 2020-11-03T01:46:49.000Z | 2022-01-18T06:16:46.000Z | ant/Main_dr.py | Currycurrycurry/Time-in-State-RL | f106d92c3556b955c108bf3e147bb0b12e60259c | [
"BSD-3-Clause"
] | null | null | null | ant/Main_dr.py | Currycurrycurry/Time-in-State-RL | f106d92c3556b955c108bf3e147bb0b12e60259c | [
"BSD-3-Clause"
] | 5 | 2020-11-23T06:29:14.000Z | 2022-01-08T18:55:31.000Z | """
Functions to train the DR models for ant where time is handeled.
Credits:
1) The PPO functions and classes are from baselines and modified to work for current setting
2) The environment code is taken from the pybullet github with timing characteristic modifications
"""
checkpoint_path = 'ant_dr_policies'
from baselines.common.tf_util import get_session
from baselines import logger
from importlib import import_module
G_T_Horizon = 1000
G_T_Steps = 10000
#Flag used for DR setting
G_TS = False
# Runner class which collects the experiments
import numpy as np
from baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
print('Max action value:')
print(mb_actions.max(),mb_actions.min(),mb_actions.shape)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, S=self.states, M=self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
#print('Sandeep s is:',s)
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
# Network architecture
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch
def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False):
"""
Stack of fully-connected layers to be used in a policy / q-function approximator
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input tensor / placeholder
"""
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
return h
return network_fn
# Policy maintenance functions which create the network architecture
import tensorflow as tf
from baselines.common import tf_util
from baselines.a2c.utils import fc
from baselines.common.distributions import make_pdtype
from baselines.common.input import observation_placeholder, encode_observation
from baselines.common.tf_util import adjust_shape
from baselines.common.mpi_running_mean_std import RunningMeanStd
#from baselines.common.models import get_network_builder
import gym
class PolicyWithValue(object):
"""
Encapsulates fields and methods for RL policy and value function estimation with shared parameters
"""
def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, sess=None, **tensors):
"""
Parameters:
----------
env RL environment
observations tensorflow placeholder in which the observations will be fed
latent latent state from which policy distribution parameters should be inferred
vf_latent latent state from which value function should be inferred (if None, then latent is used)
sess tensorflow session to run calculations in (if None, default session is used)
**tensors tensorflow tensors for additional attributes such as state or mask
"""
self.X = observations
self.state = tf.constant([])
self.initial_state = None
self.__dict__.update(tensors)
vf_latent = vf_latent if vf_latent is not None else latent
vf_latent = tf.layers.flatten(vf_latent)
latent = tf.layers.flatten(latent)
# Based on the action space, will select what probability distribution type
self.pdtype = make_pdtype(env.action_space)
self.pd, self.pi = self.pdtype.pdfromlatent(latent, init_scale=0.01)
# Take an action
self.action = self.pd.sample()
self.action2 = self.pd.mode()
# Calculate the neg log of our probability
self.neglogp = self.pd.neglogp(self.action)
self.sess = sess or tf.get_default_session()
if estimate_q:
assert isinstance(env.action_space, gym.spaces.Discrete)
self.q = fc(vf_latent, 'q', env.action_space.n)
self.vf = self.q
else:
self.vf = fc(vf_latent, 'vf', 1)
self.vf = self.vf[:,0]
def _evaluate(self, variables, observation, **extra_feed):
sess = self.sess
feed_dict = {self.X: adjust_shape(self.X, observation)}
for inpt_name, data in extra_feed.items():
if inpt_name in self.__dict__.keys():
inpt = self.__dict__[inpt_name]
if isinstance(inpt, tf.Tensor) and inpt._op.type == 'Placeholder':
feed_dict[inpt] = adjust_shape(inpt, data)
return sess.run(variables, feed_dict)
def step(self, observation, **extra_feed):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed)
if state.size == 0:
state = None
return a, v, state, neglogp
#used to evaluate the policy
def step2(self, observation, **extra_feed):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
a, v, state, neglogp = self._evaluate([self.action2, self.vf, self.state, self.neglogp], observation, **extra_feed)
if state.size == 0:
state = None
return a, v, state, neglogp
def value(self, ob, *args, **kwargs):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
value estimate
"""
return self._evaluate(self.vf, ob, *args, **kwargs)
def save(self, save_path):
tf_util.save_state(save_path, sess=self.sess)
def load(self, load_path):
tf_util.load_state(load_path, sess=self.sess)
def build_policy(env, policy_network, value_network=None, normalize_observations=False, estimate_q=False, **policy_kwargs):
if isinstance(policy_network, str):
network_type = policy_network
policy_network = mlp(**policy_kwargs)
def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None):
ob_space = env.observation_space
print('Sandeep calling policy_fn:',nbatch,nsteps,ob_space, observ_placeholder)
X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch)
extra_tensors = {}
if normalize_observations and X.dtype == tf.float32:
encoded_x, rms = _normalize_clip_observation(X)
extra_tensors['rms'] = rms
else:
encoded_x = X
encoded_x = encode_observation(ob_space, encoded_x)
with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
policy_latent = policy_network(encoded_x)
if isinstance(policy_latent, tuple):
policy_latent, recurrent_tensors = policy_latent
if recurrent_tensors is not None:
# recurrent architecture, need a few more steps
nenv = nbatch // nsteps
assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps)
policy_latent, recurrent_tensors = policy_network(encoded_x, nenv)
extra_tensors.update(recurrent_tensors)
_v_net = value_network
if _v_net is None or _v_net == 'shared':
vf_latent = policy_latent
else:
if _v_net == 'copy':
_v_net = policy_network
else:
assert callable(_v_net)
with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
# TODO recurrent architectures are not supported with value_network=copy yet
vf_latent = _v_net(encoded_x)
policy = PolicyWithValue(
env=env,
observations=X,
latent=policy_latent,
vf_latent=vf_latent,
sess=sess,
estimate_q=estimate_q,
**extra_tensors
)
return policy
return policy_fn
def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]):
rms = RunningMeanStd(shape=x.shape[1:])
norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range))
return norm_x, rms
import tensorflow as tf
import functools
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.common.tf_util import initialize
MPI = None
class Model(object):
"""
We use this object to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight=1, comm=None, microbatch_size=None):
self.sess = sess = get_session()
if MPI is not None and comm is None:
comm = MPI.COMM_WORLD
with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE):
# CREATE OUR TWO MODELS
# act_model that is used for sampling
act_model = policy(nbatch_act, 1, sess)
# Train model for training
if microbatch_size is None:
train_model = policy(nbatch_train, nsteps, sess)
else:
train_model = policy(microbatch_size, nsteps, sess)
# CREATE THE PLACEHOLDERS
self.A = A = train_model.pdtype.sample_placeholder([None])
self.ADV = ADV = tf.placeholder(tf.float32, [None])
self.R = R = tf.placeholder(tf.float32, [None])
# Keep track of old actor
self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
# Keep track of old critic
self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None])
self.LR = LR = tf.placeholder(tf.float32, [])
# Cliprange
self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
# Calculate the entropy
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# CALCULATE THE LOSS
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Clip the value to reduce variability during Critic training
# Get the predicted value
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
# Unclipped value
vf_losses1 = tf.square(vpred - R)
# Clipped value
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
# Calculate ratio (pi current policy / pi old policy)
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
# Defining Loss = - J is equivalent to max J
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
# Final PG loss
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
# Total loss
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
# UPDATE THE PARAMETERS USING LOSS
# 1. Get the model parameters
params = tf.trainable_variables('ppo2_model')
# 2. Build our trainer
if comm is not None and comm.Get_size() > 1:
self.trainer = MpiAdamOptimizer(comm, learning_rate=LR, mpi_rank_weight=mpi_rank_weight, epsilon=1e-5)
else:
self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
# 3. Calculate the gradients
grads_and_var = self.trainer.compute_gradients(loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
self.grads = grads
self.var = var
self._train_op = self.trainer.apply_gradients(grads_and_var)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac]
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.step2 = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
if MPI is not None:
sync_from_root(sess, global_variables, comm=comm) #pylint: disable=E1101
def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# Returns = R + yV(s')
advs = returns - values
# Normalize the advantages
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {
self.train_model.X : obs,
self.A : actions,
self.ADV : advs,
self.R : returns,
self.LR : lr,
self.CLIPRANGE : cliprange,
self.OLDNEGLOGPAC : neglogpacs,
self.OLDVPRED : values
}
if states is not None:
td_map[self.train_model.S] = states
td_map[self.train_model.M] = masks
return self.sess.run(
self.stats_list + [self._train_op],
td_map
)[:-1]
def frange(start, stop, step):
i = start
while i < stop:
yield i
i += step
def run_environment2(env, model, latency, sampling):
if sampling<env.sampling_interval_min:
sampling = env.sampling_interval_min
env.latency = latency
env.sampling_interval = sampling
obs = env.reset()
rewards = 0
done = False
steps = 0
while not done:
#env.latency = latency
#env.sampling_interval = sampling
#######
#jitter in each step during evaluation
jitter = random.randint(-1,1)
env.latency = latency + jitter*G_lat_inc
if env.latency<0:
env.latency = 0.0
jitter = random.randint(-1,1)
env.sampling_interval = sampling + jitter*G_lat_inc
if env.latency>env.sampling_interval:
env.sampling_interval = env.latency
if env.sampling_interval < env.sampling_interval_min:
env.sampling_interval = env.sampling_interval_min
if G_TS:
obs[28] = env.latency/env.latency_max
obs[29] = env.sampling_interval/env.latency_max
#print(obs[28], obs[29], env.latency/env.latency_max, env.sampling_interval/env.latency_max )
#######
#end jitter in each step during evaluation
#print(obs[28], obs[29], env.latency/env.latency_max, env.sampling_interval/env.latency_max )
actions, _, _, _= model.step2(obs)
obs, rew, done, _ = env.step(actions[0])
rewards = rewards + rew
steps = steps +1
return rewards, steps
def evaluate_model(model):
print('Doing Evaluation ************')
global G_evaluation_env
env = G_evaluation_env
Total_reward = []
Total_steps = []
for i in range(G_num_episodes_evaluation):
for delay in frange(0, (G_delay_max+1), G_evaluation_inc):
rew, steps = run_environment2(env, model, delay, delay)
Total_reward.append(rew)
Total_steps.append(steps)
Total_reward = np.array(Total_reward)
Total_steps = np.array(Total_steps)
return Total_reward.mean(), Total_steps.mean()
import os
import time
import numpy as np
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
#from baselines.common.policies import build_policy
#from baselines.ppo2.model import Model
MPI = None
def constfn(val):
def f(_):
return val
return f
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=1, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs):
#set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
#print('network_kwargs:', network_kwargs, network)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = 1#env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = True
# Instantiate the model object (that creates act_model and train_model)
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
if init_fn is not None:
init_fn()
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
#performance = 0
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update_fn is not None:
update_fn(update)
#do the checkpoint evaluation
if G_use_checkpoint_evaluaion and update%G_evaluate_every==0 or update == 1:
mean_evaluation_reward, mean_evaluation_steps = evaluate_model(model)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
#logger.logkv("performance", performance)
if G_use_checkpoint_evaluaion:
logger.logkv("mean_evaluation_reward", mean_evaluation_reward)
logger.logkv("mean_evaluation_steps", mean_evaluation_steps)
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv('loss/' + lossname, lossval)
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and is_mpi_root:
#performance = do_evaluation(model,env)
#print('Done evaluation performance is:',performance)
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
#savepath = savepath + '_' + str(int(performance))
print('Saving to', savepath)
model.save(savepath)
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
#see value to use
myseed = 0
G_T_Horizon = 1000
G_T_Steps = 10000
G_delay_max = (0.0165*1000.0/4.0)*10.0
G_sampling_min = (0.0165*1000.0/4.0)*1.0
G_max_num_steps = G_T_Horizon
G_Tick = (0.0165*1000.0/4.0)
G_Action_repeated = True
G_policy_selection_sample = True
G_Action_clip = 1.0
G_evaluation_env = None #note this need to set correctly
G_use_checkpoint_evaluaion = True
G_evaluate_every = 1
G_evaluation_inc = (0.0165*1000.0/4.0)
G_num_episodes_evaluation = 1
G_lat_inc = (0.0165*1000.0/4.0)#G_delay_max/(G_T_Steps/G_max_num_steps)
G_lat_inc_steps = 10.0#G_delay_max/G_lat_inc
#print(G_lat_inc,G_lat_inc_steps)
G_enable_latency_jitter = True
#jitter of one tick-rate
G_latency_jitter = 1
G_sampling_jitter = 1
import tensorflow as tf
import random
import numpy as np
tf.set_random_seed(myseed)
np.random.seed(myseed)
random.seed(myseed)
import gym
import numpy as np
import sys
print(sys.executable)
#print(sys.path)
del_path = []
for p in reversed(sys.path):
if 'python2.7' in p:
sys.path.remove(p)
del_path.append(p)
#print(sys.path)
import cv2
for p in del_path:
sys.path.append(p)
from scene_stadium import SinglePlayerStadiumScene
from robot_bases import XmlBasedRobot, MJCFBasedRobot, URDFBasedRobot
import numpy as np
import pybullet
import os
import pybullet_data
from robot_bases import BodyPart
class WalkerBase(MJCFBasedRobot):
def __init__(self, fn, robot_name, action_dim, obs_dim, power):
MJCFBasedRobot.__init__(self, fn, robot_name, action_dim, obs_dim)
self.power = power
self.camera_x = 0
self.start_pos_x, self.start_pos_y, self.start_pos_z = 0, 0, 0
self.walk_target_x = 1e3 # kilometer away
self.walk_target_y = 0
self.body_xyz = [0, 0, 0]
def robot_specific_reset(self, bullet_client):
self._p = bullet_client
for j in self.ordered_joints:
j.reset_current_position(self.np_random.uniform(low=-0.1, high=0.1), 0)
self.feet = [self.parts[f] for f in self.foot_list]
self.feet_contact = np.array([0.0 for f in self.foot_list], dtype=np.float32)
self.scene.actor_introduce(self)
self.initial_z = None
#this is the function where action torque is applied to the joints
def apply_action(self, a):
assert (np.isfinite(a).all())
for n, j in enumerate(self.ordered_joints):
j.set_motor_torque(self.power * j.power_coef * float(np.clip(a[n], -G_Action_clip, +G_Action_clip)))
#IMP: This function gets the next state from the robot
def calc_state(self):
j = np.array([j.current_relative_position() for j in self.ordered_joints],
dtype=np.float32).flatten()
# even elements [0::2] position, scaled to -1..+1 between limits
# odd elements [1::2] angular speed, scaled to show -1..+1
self.joint_speeds = j[1::2]
self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)
body_pose = self.robot_body.pose()
parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
self.body_xyz = (parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2]
) # torso z is more informative than mean z
self.body_real_xyz = body_pose.xyz()
self.body_rpy = body_pose.rpy()
z = self.body_xyz[2]
if self.initial_z == None:
self.initial_z = z
r, p, yaw = self.body_rpy
self.walk_target_theta = np.arctan2(self.walk_target_y - self.body_xyz[1],
self.walk_target_x - self.body_xyz[0])
self.walk_target_dist = np.linalg.norm(
[self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0]])
angle_to_target = self.walk_target_theta - yaw
rot_speed = np.array([[np.cos(-yaw), -np.sin(-yaw), 0], [np.sin(-yaw),
np.cos(-yaw), 0], [0, 0, 1]])
vx, vy, vz = np.dot(rot_speed,
self.robot_body.speed()) # rotate speed back to body point of view
more = np.array(
[
z - self.initial_z,
np.sin(angle_to_target),
np.cos(angle_to_target),
0.3 * vx,
0.3 * vy,
0.3 * vz, # 0.3 is just scaling typical speed into -1..+1, no physical sense here
r,
p
],
dtype=np.float32)
timing_info_holder = np.array([0.0, 0.0], dtype=np.float32)
if G_TS:
#state = np.clip(np.concatenate([more] + [j] + [self.feet_contact], [timing_info_holder]), -5, +5)
state = np.clip(np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
state = np.concatenate((state, timing_info_holder))
#print(state.shape)
else:
state = np.clip(np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
return state
#return np.clip(np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
def calc_potential(self):
# progress in potential field is speed*dt, typical speed is about 2-3 meter per second, this potential will change 2-3 per frame (not per second),
# all rewards have rew/frame units and close to 1.0
debugmode = 0
if (debugmode):
print("calc_potential: self.walk_target_dist")
print(self.walk_target_dist)
print("self.scene.dt")
print(self.scene.dt)
print("self.scene.frame_skip")
print(self.scene.frame_skip)
print("self.scene.timestep")
print(self.scene.timestep)
return -self.walk_target_dist / self.scene.dt
class Ant(WalkerBase):
foot_list = ['front_left_foot', 'front_right_foot', 'left_back_foot', 'right_back_foot']
def __init__(self):
if G_TS:
WalkerBase.__init__(self, "ant.xml", "torso", action_dim=8, obs_dim=30, power=2.5)
else:
WalkerBase.__init__(self, "ant.xml", "torso", action_dim=8, obs_dim=28, power=2.5)
def alive_bonus(self, z, pitch):
return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground
#from scene_stadium import SinglePlayerStadiumScene
from env_bases import MJCFBaseBulletEnv
import numpy as np
import pybullet
class WalkerBaseBulletEnv(MJCFBaseBulletEnv):
def __init__(self, robot, render=False):
# print("WalkerBase::__init__ start")
self.camera_x = 0
self.walk_target_x = 1e3 # kilometer away
self.walk_target_y = 0
self.stateId = -1
MJCFBaseBulletEnv.__init__(self, robot, render)
self.time_tick = G_Tick #1ms
self.latency = 0.0 # save the latency of most recent returned state
self.latency_max = G_delay_max # max latency in ms
self.max_num_steps = G_max_num_steps # for steps latency will be fixed or change on reset or done after G_max_num_steps.
self.latency_steps = 0
self.steps = 0
self.sampling_interval = G_sampling_min
self.sampling_interval_min = G_sampling_min #30 Hz frequency
#increase the latency within thresholds
self.index = 1
#used to evolve the latency
self.prev_action = None
self.original_timestep = (0.0165*1000.0)/4.0
#used to enable jitter
self.episodic_l = 0.0
self.episodic_si = G_sampling_min
def create_single_player_scene(self, bullet_client):
# self.stadium_scene = SinglePlayerStadiumScene(bullet_client,
# gravity=9.8,
# timestep=0.0165 / 4,
# frame_skip=4)
self.stadium_scene = SinglePlayerStadiumScene(bullet_client,
gravity=9.8,
timestep=(self.time_tick/1000.0),
frame_skip=4)
return self.stadium_scene
def reset(self):
if (self.stateId >= 0):
#print("restoreState self.stateId:",self.stateId)
self._p.restoreState(self.stateId)
r = MJCFBaseBulletEnv.reset(self)
self._p.configureDebugVisualizer(pybullet.COV_ENABLE_RENDERING, 0)
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.robot.addToScene(
self._p, self.stadium_scene.ground_plane_mjcf)
self.ground_ids = set([(self.parts[f].bodies[self.parts[f].bodyIndex],
self.parts[f].bodyPartIndex) for f in self.foot_ground_object_names])
self._p.configureDebugVisualizer(pybullet.COV_ENABLE_RENDERING, 1)
if (self.stateId < 0):
self.stateId = self._p.saveState()
#print("saving state self.stateId:",self.stateId)
self.prev_action =[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.steps = 0
#update the state with the timing information
if G_TS:
r[28] = self.latency/self.latency_max
r[29] = self.sampling_interval/self.latency_max
return r
def _isDone(self):
return self._alive < 0
def move_robot(self, init_x, init_y, init_z):
"Used by multiplayer stadium to move sideways, to another running lane."
self.cpp_robot.query_position()
pose = self.cpp_robot.root_part.pose()
pose.move_xyz(
init_x, init_y, init_z
) # Works because robot loads around (0,0,0), and some robots have z != 0 that is left intact
self.cpp_robot.set_pose(pose)
electricity_cost = -2.0 # cost for using motors -- this parameter should be carefully tuned against reward for making progress, other values less improtant
stall_torque_cost = -0.1 # cost for running electric current through a motor even at zero rotational speed, small
foot_collision_cost = -1.0 # touches another leg, or other objects, that cost makes robot avoid smashing feet into itself
foot_ground_object_names = set(["floor"]) # to distinguish ground and other objects
joints_at_limit_cost = -0.1 # discourage stuck joints
#given an action calculate the reward based on the robot current state
def calreward(self,a):
state = self.robot.calc_state() # also calculates self.joints_at_limit
self._alive = float(
self.robot.alive_bonus(
state[0] + self.robot.initial_z,
self.robot.body_rpy[1])) # state[0] is body height above ground, body_rpy[1] is pitch
done = self._isDone()
if not np.isfinite(state).all():
print("~INF~", state)
done = True
potential_old = self.potential
self.potential = self.robot.calc_potential()
progress = float(self.potential - potential_old)
feet_collision_cost = 0.0
for i, f in enumerate(
self.robot.feet
): # TODO: Maybe calculating feet contacts could be done within the robot code
contact_ids = set((x[2], x[4]) for x in f.contact_list())
#print("CONTACT OF '%d' WITH %d" % (contact_ids, ",".join(contact_names)) )
if (self.ground_ids & contact_ids):
#see Issue 63: https://github.com/openai/roboschool/issues/63
#feet_collision_cost += self.foot_collision_cost
self.robot.feet_contact[i] = 1.0
else:
self.robot.feet_contact[i] = 0.0
electricity_cost = self.electricity_cost * float(np.abs(a * self.robot.joint_speeds).mean())
# let's assume we have DC motor with controller, and reverse current braking
electricity_cost += self.stall_torque_cost * float(np.square(a).mean())
joints_at_limit_cost = float(self.joints_at_limit_cost * self.robot.joints_at_limit)
rewards = [
self._alive, progress, electricity_cost, joints_at_limit_cost, feet_collision_cost
]
self.HUD(state, a, done)
rewards= sum(rewards)
return rewards
def step(self, a):
# if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
# self.robot.apply_action(a)
# self.scene.global_step()
self.latency_steps = self.latency_steps + 1
self.steps = self.steps + 1
latency = (self.latency)
reward = 0
local_sim_steps = 0
if G_Action_repeated:
#simulate the latency
if latency>0:
for i in range(int(latency/self.time_tick)):
self.robot.apply_action(self.prev_action)
self.scene.global_step()
reward = reward + self.calreward(a)
local_sim_steps = local_sim_steps + 1
#print('local_sim_steps:', local_sim_steps)
#simulate the sampling interval
if self.sampling_interval>self.latency:
delay = (self.sampling_interval - self.latency)
for i in range(int(delay/self.time_tick)):
self.robot.apply_action(a)
self.scene.global_step()
reward = reward + self.calreward(a)
local_sim_steps = local_sim_steps + 1
if local_sim_steps>0:
reward = reward/local_sim_steps # we are rescaling the reward based on local_sim_steps
#print('local_sim_steps:', local_sim_steps)
self.prev_action = a
#update the latency and sampling as needed
if self.latency_steps == self.max_num_steps:
#print(self.latency, self.sampling_interval)
self.latency = self.index*G_lat_inc
self.sampling_interval = self.sampling_interval_min
if self.latency>self.sampling_interval:
self.sampling_interval = self.latency
self.episodic_l = self.latency #used to maintain jitter for an episode
self.episodic_si = self.sampling_interval ##used to maintain jitter for an episode
self.latency_steps = 0
if self.index==int(G_lat_inc_steps):
self.index = -1
self.index = self.index + 1
state = self.robot.calc_state() # also calculates self.joints_at_limit
self._alive = float(
self.robot.alive_bonus(
state[0] + self.robot.initial_z,
self.robot.body_rpy[1])) # state[0] is body height above ground, body_rpy[1] is pitch
done = self._isDone()
if not np.isfinite(state).all():
print("~INF~", state)
done = True
if self.steps == G_T_Horizon:
done = True
if G_enable_latency_jitter:
#add jitter in latency# 5 ms jitter
jitter = random.randint(-1,1)
self.latency = self.episodic_l + jitter*G_lat_inc
if self.latency<0:
self.latency = 0.0
jitter = random.randint(-1,1)
self.sampling_interval = self.episodic_si + jitter*G_lat_inc
if self.latency>self.sampling_interval:
self.sampling_interval = self.latency
if self.sampling_interval < self.sampling_interval_min:
self.sampling_interval = self.sampling_interval_min
#update the state with the timing information
if G_TS:
state[28] = self.latency/self.latency_max
state[29] = self.sampling_interval/self.latency_max
#print('Rewards:', self.rewards)
#return state, sum(self.rewards), bool(done), {}
return state, reward, bool(done), {}
def camera_adjust(self):
x, y, z = self.robot.body_real_xyz
self.camera_x = x
self.camera.move_and_look_at(self.camera_x, y , 1.4, x, y, 1.0)
class AntBulletEnv(WalkerBaseBulletEnv):
def __init__(self, render=False):
self.robot = Ant()
WalkerBaseBulletEnv.__init__(self, self.robot, render)
G_evaluation_env = AntBulletEnv()#HalfCheetahBulletEnv()
#parameters used
def atari():
return dict(
nsteps=G_T_Steps, nminibatches=50,
lam=0.95, gamma=0.99, noptepochs=10, log_interval=1,
ent_coef=0.0,
lr= 3e-4,
cliprange=0.2,
value_network='copy'
)
args = {'alg':'ppo2', 'env':'', 'env_type':None, 'gamestate':None,
'log_path':None, 'network':None, 'num_env':None, 'num_timesteps':5000000000,
'play':False, 'reward_scale':1.0, 'save_path':None, 'save_video_interval':0,
'save_video_length':None, 'seed':myseed}
def make_env(env_id, env_type, mpi_rank=0, subrank=0, seed=None, reward_scale=1.0, gamestate=None, flatten_dict_observations=True, wrapper_kwargs=None, env_kwargs=None, logger_dir=None, initializer=None):
if initializer is not None:
initializer(mpi_rank=mpi_rank, subrank=subrank)
wrapper_kwargs = wrapper_kwargs or {}
env_kwargs = env_kwargs or {}
env = AntBulletEnv()
env.seed(seed + subrank if seed is not None else None)
env = Monitor(env,
logger_dir and os.path.join(logger_dir, str(mpi_rank) + '.' + str(subrank)),
allow_early_resets=True)
return env
def build_env(args):
ncpu = 1
nenv = 1
alg = args['alg']
seed = args['seed']
env_type, env_id = 'atari', args['env']
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
get_session(config=config)
flatten_dict_observations = alg not in {'her'}
env = make_vec_env(env_id, env_type, 1, seed, reward_scale=args['reward_scale'], flatten_dict_observations=flatten_dict_observations)
return env
def train(args):
env_type, env_id = '', args['env']
total_timesteps = int(args['num_timesteps'])
seed = 0
alg_kwargs = atari()
env = build_env(args)
alg_kwargs['network'] = 'cnn'
print('Training {} on {}:{} with arguments \n{}'.format(args['alg'], env_type, env_id, alg_kwargs))
model = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
**alg_kwargs
)
return model, env
def atari():
return dict(
nsteps=G_T_Steps, nminibatches=50,
lam=0.95, gamma=0.99, noptepochs=10, log_interval=1,
ent_coef=0.0,
lr= 3e-4,
cliprange=0.2,
value_network='copy'
)
# environment building functions
from baselines import logger
# environment building functions
import gym
from baselines.common.atari_wrappers import wrap_deepmind
from baselines.common import retro_wrappers
from baselines.common.wrappers import ClipActionsWrapper
from baselines.bench import Monitor
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
def make_vec_env(env_id, env_type, num_env, seed,
wrapper_kwargs=None,
env_kwargs=None,
start_index=0,
reward_scale=1.0,
flatten_dict_observations=True,
gamestate=None,
initializer=None,
force_dummy=False):
"""
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
"""
wrapper_kwargs = wrapper_kwargs or {}
env_kwargs = env_kwargs or {}
mpi_rank = 0
seed = seed
logger_dir = logger.get_dir()
def make_thunk(rank, initializer=None):
print('make_thunk called:',rank)
return lambda: make_env(
env_id=env_id,
env_type=env_type,
mpi_rank=mpi_rank,
subrank=rank,
seed=seed,
reward_scale=reward_scale,
gamestate=gamestate,
flatten_dict_observations=flatten_dict_observations,
wrapper_kwargs=wrapper_kwargs,
env_kwargs=env_kwargs,
logger_dir=logger_dir,
initializer=initializer
)
#set_global_seeds(seed)
return DummyVecEnv([make_thunk(i + start_index, initializer=None) for i in range(num_env)])
logger.configure(checkpoint_path)
model, env = train(args)
| 32.83989 | 204 | 0.640055 |
2c04c17ae12bc64eb7915fb981bbb05e6e57044f | 1,317 | py | Python | examples/7_extend/extend.py | kzh3ka/japronto | a526277a2f59100388c9f39d4ca22bfb4909955b | [
"MIT"
] | 9,472 | 2017-01-31T13:22:02.000Z | 2022-03-31T13:15:46.000Z | examples/7_extend/extend.py | zishenWang/japronto | f501c9f59fc83d0e4ab928ecdacf1735a2cc5cb6 | [
"MIT"
] | 170 | 2017-01-31T18:50:13.000Z | 2022-03-17T13:32:04.000Z | examples/7_extend/extend.py | zishenWang/japronto | f501c9f59fc83d0e4ab928ecdacf1735a2cc5cb6 | [
"MIT"
] | 739 | 2017-01-31T17:42:03.000Z | 2022-02-24T05:10:32.000Z | from japronto import Application
# This view accesses custom method host_startswith
# and a custom property reversed_agent. Both are registered later.
def extended_hello(request):
if request.host_startswith('api.'):
text = 'Hello ' + request.reversed_agent
else:
text = 'Hello stranger'
return request.Response(text=text)
# This view registers a callback, such callbacks are executed after handler
# exits and the response is ready to be sent over the wire.
def with_callback(request):
def cb(r):
print('Done!')
request.add_done_callback(cb)
return request.Response(text='cb')
# This is a body for reversed_agent property
def reversed_agent(request):
return request.headers['User-Agent'][::-1]
# This is a body for host_startswith method
# Custom methods and properties always accept request
# object.
def host_startswith(request, prefix):
return request.headers['Host'].startswith(prefix)
app = Application()
# Finally register the custom property and method
# By default the names are taken from function names
# unelss you provide `name` keyword parameter.
app.extend_request(reversed_agent, property=True)
app.extend_request(host_startswith)
r = app.router
r.add_route('/', extended_hello)
r.add_route('/callback', with_callback)
app.run()
| 25.823529 | 75 | 0.744875 |
a49898a6863ab6d75c15b4f49044633e9e050726 | 1,833 | py | Python | config.env.py | galenguyer/DiscourseOIDC | 8cf96be8abffabb3f79891b4d86eb2d6a0a876ce | [
"Apache-2.0"
] | null | null | null | config.env.py | galenguyer/DiscourseOIDC | 8cf96be8abffabb3f79891b4d86eb2d6a0a876ce | [
"Apache-2.0"
] | null | null | null | config.env.py | galenguyer/DiscourseOIDC | 8cf96be8abffabb3f79891b4d86eb2d6a0a876ce | [
"Apache-2.0"
] | null | null | null | import os, json
#######################
# Flask Configuration #
#######################
DEBUG = True if os.environ.get('SSO_DEBUG', 'false') == 'true' else False
IP = os.environ.get('SSO_IP', '0.0.0.0')
PORT = int(os.environ.get('SSO_PORT', '8080'))
SERVER_NAME = os.environ.get('SSO_SERVER_NAME', 'discourse-sso.csh.rit.edu')
SECRET_KEY = os.environ.get('SSO_SECRET_KEY', 'thisisntverysecure')
################################
# OpenID Connect Configuration #
################################
OIDC_ISSUER = os.environ.get('SSO_OIDC_ISSUER', 'https://sso.csh.rit.edu/auth/realms/csh')
OIDC_CLIENT_CONFIG = {
'client_id': os.environ.get('SSO_OIDC_CLIENT_ID', 'discourse'),
'client_secret': os.environ.get('SSO_OIDC_CLIENT_SECRET', ''),
'post_logout_redirect_uris': [os.environ.get('SSO_OIDC_LOGOUT_REDIRECT_URI',
'https://' + SERVER_NAME + '/logout')]
}
###########################
# Discourse Configuration #
###########################
# Discourse URL to send the user back
DISCOURSE_URL = os.environ.get('SSO_DISCOURSE_URL', 'http://discuss.example.com')
# Secret key shared with the Discourse server
DISCOURSE_SECRET_KEY = os.environ.get('SSO_DISCOURSE_SECRET', '')
# Override emails returned from the IdP to <username>@<SSO_EMAIL_OVERRIDE_DOMAIN>
SSO_EMAIL_OVERRIDE = True if os.environ.get('SSO_EMAIL_OVERRIDE', 'false') == 'true' else False
SSO_EMAIL_OVERRIDE_DOMAIN = os.environ.get('SSO_EMAIL_OVERRIDE_DOMAIN', '')
# Attribute to read from the environment after user validation
DISCOURSE_USER_MAP = json.loads(os.environ.get('SSO_DISCOURSE_USER_MAP',
'{"name": ["givenName", "sn"], "username": "preferred_username", \
"external_id": "sub", "email": "email"}'))
| 42.627907 | 113 | 0.613202 |
05b22cbe3503e3309079e398e3c6b031c5120521 | 3,716 | py | Python | automox_console_sdk/models/inline_response200.py | AutomoxCommunity/automox-console-sdk-python | 9e921b138d63f90750e071d0a40e1d7edfa06733 | [
"MIT"
] | 1 | 2021-10-05T22:09:10.000Z | 2021-10-05T22:09:10.000Z | automox_console_sdk/models/inline_response200.py | AutomoxCommunity/automox-console-sdk-python | 9e921b138d63f90750e071d0a40e1d7edfa06733 | [
"MIT"
] | 1 | 2021-09-16T06:00:51.000Z | 2021-09-16T06:00:51.000Z | automox_console_sdk/models/inline_response200.py | AutomoxCommunity/automox-console-sdk-python | 9e921b138d63f90750e071d0a40e1d7edfa06733 | [
"MIT"
] | 4 | 2021-09-16T02:35:32.000Z | 2022-02-16T01:09:57.000Z | # coding: utf-8
"""
Automox Console API
API for use with the Automox Console # noqa: E501
OpenAPI spec version: 2021-11-16
Contact: support@automox.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse200(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'results': 'list[ApiKey]',
'size': 'int'
}
attribute_map = {
'results': 'results',
'size': 'size'
}
def __init__(self, results=None, size=None): # noqa: E501
"""InlineResponse200 - a model defined in Swagger""" # noqa: E501
self._results = None
self._size = None
self.discriminator = None
if results is not None:
self.results = results
if size is not None:
self.size = size
@property
def results(self):
"""Gets the results of this InlineResponse200. # noqa: E501
:return: The results of this InlineResponse200. # noqa: E501
:rtype: list[ApiKey]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this InlineResponse200.
:param results: The results of this InlineResponse200. # noqa: E501
:type: list[ApiKey]
"""
self._results = results
@property
def size(self):
"""Gets the size of this InlineResponse200. # noqa: E501
:return: The size of this InlineResponse200. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this InlineResponse200.
:param size: The size of this InlineResponse200. # noqa: E501
:type: int
"""
self._size = size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse200, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse200):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.124088 | 80 | 0.555974 |
45a38c0767104ef87f2770f65f80abdbee07f1de | 65,534 | py | Python | src/rosegraphics.py | SiweiXu/Exam1Practice | e39c0b327e3fdb3159a508ea3fb98f13e17256bb | [
"MIT"
] | null | null | null | src/rosegraphics.py | SiweiXu/Exam1Practice | e39c0b327e3fdb3159a508ea3fb98f13e17256bb | [
"MIT"
] | null | null | null | src/rosegraphics.py | SiweiXu/Exam1Practice | e39c0b327e3fdb3159a508ea3fb98f13e17256bb | [
"MIT"
] | null | null | null | """
rosegraphics.py - a simple Graphics library for Python.
Its key feature is:
-- USING this library provides a simple introduction to USING objects.
Other key features include:
-- It has a rich set of classes, methods and instance variables.
-- In addition to classes like Circles that are natural for
students, it has other kinds of classes like RoseWindow
and FortuneTeller to provide a richer set of examples
than "just" a graphics library.
-- It allows one to do a reasonable set of graphics operations
with reasonable efficiency. The API mimics Java's Shape API
for the most part.
-- It is built on top of tkinter and its extension ttk
(the standard graphics libraries that come with Python).
-- Unlike tkinter, it is NOT event-driven and hence can be used
before students see that paradigm. (There is a behind-the-scenes
facilty for listening for and responding to events,
for those who want to do so.)
-- It attempts to be as bullet-proof as possible, to make it easy
for beginners to use it. In particular, it attempts to provide
reasonable error messages when a student misuses the API.
-- It was inspired by zellegraphics but is a complete re-implemenation
that attempts to:
-- Be more bullet-proof.
-- Provide a richer set of examples for using objects.
-- Have an API that is more like Java's Shape API than tkinter's
(older) API.
-- While it can serve as an example for defining classes,
it is NOT intended to do so for beginners.
It is excellent for helping students learn to USE objects;
it is NOT perfect for helping students learn to WRITE CLASSES.
See the MAIN function below for typical examples of its use.
Authors: David Mutchler, Mark Hays, Michael Wollowswki, Matt Boutell,
Chandan Rupakheti, Claude Anderson and their colleagues,
with thanks to John Zelle for inspiration and hints.
First completed version: September 2014.
"""
# FIXME (errors):
# -- clone() does not really make a copy; it just makes a new one
# but without cloning all the attributes.
# -- _ShapeWithCenter claims that things like Ellipse are subclasses,
# but they are not at this point, I think. In general, need to
# deal with overlap between _ShapeWithCenter and _RectangularShape.
# KEEP both of them to have some classes have corner_1 and corner_2
# while others have center and ...
# FIXME (things that have yet to be implemented):
# -- Allow multiple canvasses.
# -- Better close_on ... ala zellegraphics.
# -- Keyboard.
# -- Better Mouse.
# -- Add type hints.
# -- Catch all Exceptions and react appropriately.
# -- Implement unimplemented classes.
# -- Add and allow FortuneTellers and other non-canvas classes.
import tkinter
from tkinter import font as tkinter_font
import time
import turtle
# ----------------------------------------------------------------------
# All the windows that are constructed during a run share the single
# _master_Tk (a tkinter.Tk object)
# as their common root. The first construction of a RoseWindow
# sets this _master_Tk to a Tkinter.Tk object.
# ----------------------------------------------------------------------
_master_Tk = None
# ----------------------------------------------------------------------
# At the risk of not being Pythonic, we provide a simple type-checking
# facility that attempts to provide meaningful error messages to
# students when they pass arguments that are not of the expected type.
# ----------------------------------------------------------------------
class WrongTypeException(Exception):
""" Not yet implemented. """
pass
def check_types(pairs):
""" Not yet implemented fully. """
for pair in pairs:
value = pair[0]
expected_type = pair[1]
if not isinstance(value, expected_type):
raise WrongTypeException(pair)
# ----------------------------------------------------------------------
# Serialization facility
# ----------------------------------------------------------------------
def _serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
# Idea: dump all the stats on all shapes, return a sorted list for easy comparison.
# Problem: the order in which keys appear in dictionaries is random!
# Solution: sort keys and manually print
shapes = [shape.__dict__ for shape in self.initial_canvas.shapes]
keys_by_shape = [sorted(shape) for shape in shapes]
for k in range(len(shapes)):
shapes[k]['_method_for_drawing'] = None
shapes[k]['shape_id_by_canvas'] = None
result = []
for k in range(len(keys_by_shape)):
shape = shapes[k]
result.append([])
for key in keys_by_shape[k]:
result[-1].append(str(key) + ":" + str(shape[key]))
result[-1] = str(result[-1])
return "\n".join(sorted(result))
# ----------------------------------------------------------------------
# RoseWindow is the top-level object.
# It starts with a single RoseCanvas.
# ----------------------------------------------------------------------
class RoseWindow(object):
"""
A RoseWindow is a window that pops up when constructed.
It can have RoseWidgets on it and starts by default with
a single RoseCanvas upon which one can draw shapes.
To construct a RoseWindow, use:
- rg.RoseWindow()
or use any of its optional arguments, as in these examples:
window = rg.RoseWindow(400, 300) # 400 wide by 300 tall
window = rg.RoseWindow(400, 300, 'Funny window') # with a title
Instance variables include:
width: width of this window (in pixels)
height: width of this window (in pixels)
title: displayed on the window's bar
widgets: the things attached to this window
"""
def __init__(self, width=400, height=300, title='Rose Graphics',
color='black', canvas_color=None,
make_initial_canvas=True):
"""
Pops up a tkinter.Toplevel window with (by default)
a RoseCanvas (and associated tkinter.Canvas) on it.
Arguments are:
-- width, height: dimensions of the window (in pixels).
-- title: title displayed on the windoww.
-- color: background color of the window
-- canvas_color: background color of the canvas
displayed on the window by default
-- make_initial_canvas:
-- If True, a default canvas is placed on the window.
-- Otherwise, no default canvas is placed on the window.
If this is the first RoseWindow constructed, then a
hidden Tk object is constructed to control the event loop.
Preconditions:
:type width: int
:type height: int
:type title: str
:type color: Color
:type canvas_color: Color
:type make_initial_canvas: bool
"""
# check_types([(width, (int, float)),
# (height, (int, float)),
# (title, (Color, str)
# --------------------------------------------------------------
# The _master_Tk controls the mainloop for ALL the RoseWindows.
# If this is the first RoseWindow constructed in this run,
# then construct the _master_Tk object.
# --------------------------------------------------------------
global _master_Tk
if not _master_Tk:
_master_Tk = tkinter.Tk()
_master_Tk.withdraw()
else:
time.sleep(0.1) # Helps the window appear on TOP of Eclipse
# --------------------------------------------------------------
# Has a tkinter.Toplevel, and a tkinter.Canvas on the Toplevel.
# --------------------------------------------------------------
self.toplevel = tkinter.Toplevel(_master_Tk,
background=color,
width=width, height=height)
self.toplevel.title(title)
self._is_closed = False
self.toplevel.protocol("WM_DELETE_WINDOW", self.close)
# FIXME: The next two need to be properties to have
# setting happen correctly. Really belongs to RoseCanvas.
# See comments elsewhere on this.
self.width = width
self.height = height
if make_initial_canvas:
self.initial_canvas = RoseCanvas(self, width, height,
canvas_color)
else:
self.initial_canvas = None
self.widgets = [self.initial_canvas]
# FIXME: Do any other tailoring of the toplevel as desired,
# e.g. borderwidth and style...
# --------------------------------------------------------------
# Catch mouse clicks and key presses.
# --------------------------------------------------------------
self.mouse = Mouse()
self.keyboard = Keyboard()
self.toplevel.bind('<Button>', self._on_mouse_click)
self.toplevel.bind('<KeyPress>', self._on_key_press)
self.update()
def close(self):
""" Closes this RoseWindow. """
if self.toplevel:
self.toplevel.destroy()
self.toplevel = None
self.update()
self._is_closed = True
def update(self):
"""
Checks for and handles events that has happened
in this RoseWindow (e.g. mouse clicks, drawing shapes).
"""
global _master_Tk
_master_Tk.update()
def render(self, seconds_to_pause: object = None) -> object:
"""
Updates all the Shapes attached to RoseCanvas objects associated with this RoseWindow, then draws all those Shapes.
After doing so, pauses the given number of seconds.
:type seconds_to_pause: int
"""
for widget in self.widgets:
if type(widget) == RoseCanvas:
widget.render()
self.update()
if seconds_to_pause:
time.sleep(seconds_to_pause)
def close_on_mouse_click(self):
"""
Displays a message at the bottom center of the window and waits for the user to click the mouse anywhere in the window.
Then closes this RoseWindow.
Returns an rg.Point that specifies where the user clicked the mouse.
"""
message = 'To exit, click anywhere in this window'
click_position = self.continue_on_mouse_click(message=message,
close_it=True)
return click_position
def continue_on_mouse_click(self,
message='To continue, click anywhere in this window',
x_position=None,
y_position=None,
close_it=False,
erase_it=True):
"""
Displays a message at the bottom center of the window and waits for the user to click the mouse, then erases the message.
Optional parameters let you:
-- Display a different message
-- Place the message at a different place in the window (xpos and ypos are as in Text)
-- Close the window after the mouse is clicked (and ignore the GraphicsError that results if the user instead chooses to click the X in the window)
-- NOT erase the message when done
"""
if self._is_closed:
return
if x_position is None:
x_position = self.width / 2
if y_position is None:
y_position = self.height - 20
anchor_point = Point(x_position, y_position)
text = Text(anchor_point, message)
# FIXME: Really should do all this on a per-RoseCanvas basis.
if self.initial_canvas:
text.attach_to(self.initial_canvas)
self.initial_canvas._renderShape(text, render_NOW=True)
click_position = self.get_next_mouse_click()
if erase_it and self.initial_canvas:
text.detach_from(self.initial_canvas)
if close_it:
self.close() # then close the window
return click_position
def get_next_mouse_click(self):
"""
Waits for the user to click in the window.
Then returns the rg.Point that represents the point where the user clicked.
Example:
If this method is called and then the user clicks near the upper-right corner of a 300 x 500 window,
this function would return something like rg.Point(295, 5).
"""
self.mouse.position = None
while True:
if self._is_closed:
return None
if self.mouse.position is not None:
break
self.update()
time.sleep(.05) # allow time for other events to be handled
click_point = self.mouse.position
self.mouse.position = None
return click_point
def _on_mouse_click(self, event):
self.mouse._update(event)
def _on_key_press(self, event):
self.keyboard._update(event)
# def add_canvas(self, width=None, height=None, background_color=0):
# FIXME: Set defaults based on the main canvas.
# new_canvas = RoseCanvas(self, background_color='white')
# self.widgets.append(new_canvas)
#
# _root.update()
def __serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
return _serialize_shapes(self)
class RoseWidget():
"""
A Widget is a thing that one can put on a Window,
e.g. a Canvas, FortuneTeller, etc.
"""
def __init__(self, window):
self._window = window
def get_window(self):
return self._window
class RoseCanvas(RoseWidget):
defaults = {'colors': [None, 'yellow', 'light blue', 'dark grey']}
count = 0
"""
A RoseCanvas is a RoseWidget (i.e., a thing on a RoseWindow)
upon which one can draw shapes and other Drawable things.
"""
def __init__(self, window, width=200, height=200,
background_color=0):
super().__init__(window)
RoseCanvas.count = RoseCanvas.count + 1
# FIXME: Deal with default background colors.
# FIXME: Store background color as a property
# so that modifying it changes the tkinter canvas.
# Ditto width and height.
# if background_color == 0:
# index = RoseCanvas.count % len(defaults['colors'])
# self.background_color = defaults['colors'][index]
# else:
# self.background_color = background_color
tk_canvas = tkinter.Canvas(window.toplevel,
width=width, height=height,
background=background_color)
self._tkinter_canvas = tk_canvas
# FIXME: Automate gridding better.
self._tkinter_canvas.grid(padx=5, pady=5)
self.shapes = []
def render(self, seconds_to_pause=None):
"""
Updates all the Shapes attached to this RoseCanvas, then draws all those Shapes.
After doing so, pauses the given number of seconds.
:type seconds_to_pause: int
"""
self._update_shapes()
self._window.update()
if seconds_to_pause:
time.sleep(seconds_to_pause)
def _renderShape(self, shape, render_NOW=False):
"""Renders a shape."""
coordinates = shape._get_coordinates_for_drawing()
options = shape._get_options_for_drawing()
if shape.shape_id_by_canvas[self] is None:
shape.shape_id_by_canvas[self] = \
shape._method_for_drawing(self._tkinter_canvas, *coordinates)
try:
self._tkinter_canvas.coords(shape.shape_id_by_canvas[self],
*coordinates)
except tkinter.TclError:
msg = 'Could not place the shape\n'
msg += 'on the given window.\n'
msg += 'Did you accidentally close a window\n'
msg += 'that later needed to be rendered again?'
raise Exception(msg) from None
self._tkinter_canvas.itemconfigure(shape.shape_id_by_canvas[self],
options)
if render_NOW:
# redraw NOW
self._window.update()
def _draw(self, shape):
"""Queues a shape for being drawn. Does NOT draw it just yet."""
shapeInList = False
for listShape in self.shapes:
if listShape is shape:
shapeInList = True
break
if not shapeInList:
shape.shape_id_by_canvas[self] = None
self.shapes.append(shape)
def _undraw(self, shape):
if shape in self.shapes:
for i in range(len(self.shapes)):
if self.shapes[i] is shape:
self._tkinter_canvas.delete(shape.shape_id_by_canvas[self])
del self.shapes[i]
break
def _update_shapes(self):
for shape in self.shapes:
self._renderShape(shape)
class Mouse(object):
def __init__(self):
self.position = None
def _update(self, event):
self.position = Point(event.x, event.y)
class Keyboard(object):
def __init__(self):
self.key_pressed = None
def _update(self, event):
pass
class __FreezeClass__ (type):
"""Prevents class variable assignment."""
def __setattr__(self, name, _ignored): # last parameter is the value
err = "You tried to set the instance variable '" + name + "'\n"
err += " on the CLASS '" + self.__name__ + "'"
err += ", which is not an OBJECT.\n"
err += " Did you forget the () after the word "
err += self.__name__ + ",\n"
err += " on the line where you constructed the object?"
raise SyntaxError(err)
class _Shape(object, metaclass=__FreezeClass__):
"""
A Shape is a thing that can be drawn on a RoseCanvas
(which itself draws on a tkinter Canvas).
Its constructor provides the tkinter method to be used to
draw this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image, Line, Path, Polygon,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: None.
Public methods: attach_to.
"""
def __init__(self, method_for_drawing):
""" Arguments:
-- the tkinter method for drawing the Shape.
"""
self._method_for_drawing = method_for_drawing
self.shape_id_by_canvas = {}
def __eq__(self, other):
"""
Two Shape objects are equal (==) if all their attributes
are equal to each other.
"""
# check before we go deleting keys that may or may not exist
if(not isinstance(other, self.__class__)):
return False
self_dict = self.__dict__.copy()
other_dict = other.__dict__.copy()
del self_dict["shape_id_by_canvas"]
del other_dict["shape_id_by_canvas"]
return (self_dict == other_dict)
def __ne__(self, other):
return not self.__eq__(other)
def attach_to(self, window_or_canvas):
"""
'draws' this Shape. More precisely:
Attaches this Shape to the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered, this shape
will appear on that RoseWindow/RoseCanvas.
"""
if isinstance(window_or_canvas, RoseWindow):
window_or_canvas = window_or_canvas.initial_canvas
window_or_canvas._draw(self)
def detach_from(self, rose_canvas):
"""
'undraws' this Shape. More precisely:
Detaches this Shape from the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered,
this shape will no longer appear
on that RoseWindow/RoseCanvas.
"""
if type(rose_canvas) == RoseWindow:
rose_canvas = rose_canvas.initial_canvas
rose_canvas._undraw(self)
class _ShapeWithOutline(object):
"""
A Shape that has an interior (which can be filled with a color)
and an outline (which has a color and thickness).
This abstract type has concrete subclasses that include:
Arc, Circle, Ellipse, Image, Line, Path,
Polygon, Rectangle, Square, Text and Window.
Public data attributes: fill_color, outline_color, outline_thickness.
Public methods: _initialize_options.
"""
defaults = {'fill_color': None,
'outline_color': 'black',
'outline_thickness': 1}
def _initialize_options(self):
self.fill_color = _ShapeWithOutline.defaults['fill_color']
self.outline_color = _ShapeWithOutline.defaults['outline_color']
self.outline_thickness = _ShapeWithOutline.defaults[
'outline_thickness']
def _get_options_for_drawing(self):
options = {'fill': self.fill_color,
'outline': self.outline_color,
'width': self.outline_thickness}
# If a color is None, that means transparent here:
for option in ('fill', 'outline'):
if not options[option]:
options[option] = ''
return options
class _ShapeWithThickness(object):
"""
A Shape that can be (and almost always is) filled with a color
and has a thickness but no outline.
This abstract type has concrete subclasses that include:
Line and Path.
Public data attributes: color, thickness.
Public methods: _initialize_options.
"""
defaults = {'color': 'black',
'thickness': 1,
'arrow': None}
def _initialize_options(self):
self.color = _ShapeWithThickness.defaults['color']
self.thickness = _ShapeWithThickness.defaults['thickness']
self.arrow = _ShapeWithThickness.defaults['arrow']
def _get_options_for_drawing(self):
options = {'fill': self.color,
'width': self.thickness,
'arrow': self.arrow}
# If a color is None, that means 'black' here:
if options['fill'] is None:
options['fill'] = 'black'
return options
class _ShapeWithText(object):
"""
A Shape that has text and a font for displaying that text.
This abstract type has concrete subclasses that include:
Text.
Public data attributes: font_family, font_size,
is_bold, is_italic, is_underline, is_overstrike.
Public methods: _initialize_options.
"""
# FIXME: Add more to the above docstring.
defaults = {'font_family': 'helvetica',
'font_size': 14,
'weight': 'normal',
'slant': 'roman',
'underline': 0,
'overstrike': 0,
'justify': tkinter.CENTER,
'text_box_width': None,
'text_color': 'black',
'text': ''}
def _initialize_options(self):
self.font_family = _ShapeWithText.defaults['font_family']
self.font_size = _ShapeWithText.defaults['font_size']
self.is_bold = _ShapeWithText.defaults['weight'] == 'bold'
self.is_italic = _ShapeWithText.defaults['slant'] == 'italic'
self.is_underline = _ShapeWithText.defaults['underline'] == 1
self.is_overstrike = _ShapeWithText.defaults['overstrike'] == 1
self.justify = _ShapeWithText.defaults['justify']
self.text_box_width = _ShapeWithText.defaults['text_box_width']
self.text_color = _ShapeWithText.defaults['text_color']
self.text = _ShapeWithText.defaults['text']
def _get_options_for_drawing(self):
weight = 'bold' if self.is_bold else 'normal'
slant = 'italic' if self.is_italic else 'roman'
underline = 1 if self.is_underline else 0
overstrike = 1 if self.is_overstrike else 0
font = tkinter_font.Font(family=self.font_family,
size=self.font_size,
weight=weight,
slant=slant,
underline=underline,
overstrike=overstrike)
options = {'font': font,
'justify': self.justify,
'fill': self.text_color,
'text': self.text}
if self.text_box_width:
options['width'] = self.text_box_width
return options
class _ShapeWithCenter(_Shape):
"""
A Shape that has a center (and for which moving its center
moves the entire Shape). Its constructor provides the center
of the Shape along with its method for drawing this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: center.
Public methods: move_by, move_center_to.
"""
def __init__(self, center, method_for_drawing):
"""
Arguments:
-- the Point that is the center of the Shape
(the Shape stores a CLONE of that Point)
-- the tkinter method for drawing the Shape.
"""
# Clone the center argument, so that if the caller
# mutates the argument, it does NOT affect this Shape.
super().__init__(method_for_drawing)
self.center = center.clone()
def move_by(self, dx, dy):
"""
Moves this _Shape to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this shape.
:type dx: float
:type dy: float
"""
self.center.move_by(dx, dy)
def move_center_to(self, x, y):
"""
Moves this _Shape's center to (x, y),
thus translating the entire Shape
by however much its center moved.
:type x: float
:type y: float
"""
self.center.move_to(x, y)
class _RectangularShape(_Shape):
"""
A _Shape determined by its rectangular bounding box (plus possibly
other information).
Concrete sub-classes include: rg.Ellipse, rg.Rectangle.
Examples:
These all assume that the variable shape is a _RectangularShape
(e.g. an rg.Ellipse or a rg.Rectangle):
The methods in these examples all return rg.Point objects that are
copies of a corner/center of the _RectangularShape:
ul = shape.get_upper_left_corner()
ur = shape.get_upper_right_corner()
ll = shape.get_lower_left_corner()
lr = shape.get_lower_right_corner()
center = shape.get_center()
The methods in these examples return a positive number:
h = shape.get_height()
w = shape.get_width()
The method in this example returns an rg.Rectangle that encloses
this _RectangularShape:
bbox = shape.get_bounding_box()
This example moves this _RectangularShape right 100 and up 50:
shape.move_by(100, -50)
This example does the same thing another way:
shape.corner_1 = shape.corner_1 + 100
shape.corner_2 = shape.corner_2 - 50
"""
def __init__(self, corner_1, corner_2, method_for_drawing):
"""
:type corner_1: Point
:type corner_2: Point
:type method_for_drawing: callable(int, int, int, int) -> int
"""
super().__init__(method_for_drawing)
self.corner_1 = corner_1.clone()
self.corner_2 = corner_2.clone()
self._update_corners()
def __repr__(self):
""" Returns a string representation of this shape. """
f_string = ''
f_string += '{}: corner_1=({}, {}), corner_2=({}, {}),'
f_string += ' fill_color={},'
f_string += ' outline_color={}, outline_thickness={}.'
return f_string.format(self.__class__.__name__,
self.corner_1.x, self.corner_1.y,
self.corner_2.x, self.corner_2.y,
self.fill_color, self.outline_color,
self.outline_thickness)
def move_by(self, dx, dy):
"""
Moves this _Shape to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this shape.
:type dx: float
:type dy: float
"""
self.corner_1.x += dx
self.corner_1.y += dy
self.corner_2.x += dx
self.corner_2.y += dy
def clone(self):
"""
Returns a copy of this _RectangularShape.
"""
return self.__class__(self.corner_1.clone(),
self.corner_2.clone())
def get_upper_left_corner(self):
"""
Returns a copy of the ** upper-left **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._upper_left_corner
def get_lower_left_corner(self):
"""
Returns a copy of the ** lower-left **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._lower_left_corner
def get_upper_right_corner(self):
"""
Returns a copy of the ** upper-right **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._upper_right_corner
def get_lower_right_corner(self):
"""
Returns a copy of the ** lower-right **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._lower_right_corner
def get_center(self):
"""
Returns a copy of the ** center ** of this _RectanglarShape.
The returned value is an rg.Point.
"""
return Point((self.corner_1.x + self.corner_2.x) / 2,
(self.corner_1.y + self.corner_2.y) / 2)
def get_height(self):
"""
Returns the height (i.e., the size in
the y-direction) of this _RectangularShape.
The returned value is always positive.
"""
return abs(self.corner_1.y - self.corner_2.y)
def get_width(self):
"""
Returns the width (i.e., the size in
the x-direction) of this _RectangularShape.
The returned value is always positive.
"""
return abs(self.corner_1.x - self.corner_2.x)
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses this _RectangularShape.
"""
return Rectangle(self.corner_1, self.corner_2)
def _update_corners(self):
min_x = min(self.corner_1.x, self.corner_2.x)
min_y = min(self.corner_1.y, self.corner_2.y)
max_x = max(self.corner_1.x, self.corner_2.x)
max_y = max(self.corner_1.y, self.corner_2.y)
self._upper_left_corner = Point(min_x, min_y)
self._upper_right_corner = Point(max_x, min_y)
self._lower_left_corner = Point(min_x, max_y)
self._lower_right_corner = Point(max_x, max_y)
def _get_coordinates_for_drawing(self):
return [self.get_upper_left_corner().x,
self.get_upper_left_corner().y,
self.get_lower_right_corner().x,
self.get_lower_right_corner().y]
class Arc(_RectangularShape, _ShapeWithOutline):
""" Not yet implemented. """
class Bitmap(_Shape):
""" Not yet implemented. """
class Circle(_ShapeWithCenter, _ShapeWithOutline):
"""
A Shape that is an circle.
To construct a Circle, use:
- rg.Circle(center, radius)
where center is an rg.Point object
and radius is a positive integer.
For example:
- rg.Circle(rg.Point(100, 75), 30)
specifies the circle whose center
is at (100, 75) and whose radius is 30.
Instance variables include:
center: An rg.Point that specifies
the center of the Circle.
radius: The radius of the Circle.
fill_color:
The Circle is filled with this color.
Example: circle.fill_color = 'green'
outline_color:
The outline of the Circle is this color.
Example: circle.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Circle.
Examples:
circle = rg.Circle(rg.Point(100, 75), 30)
print(circle.center, circle.radius)
circle.fill_color = 'blue'
circle.outline_color = 'black'
circle.outline_thickness = 5
window = rg.RoseWindow()
circle.attach_to(window)
circle.move_center_to(300, 200)
circle.move_by(-50, 60)
# Another way to move the Circle:
x = circle.center.x
y = circle.center.y
circle.center = rg.Point(x - 50, y + 60)
"""
def __init__(self, center, radius):
"""
:type center: rg.Point
:type radius: int
"""
# The following sets instance variable
# self.center
# to a clone (copy) of the given rg.Point.
super().__init__(center, tkinter.Canvas.create_oval)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
# The radius is also stored in an instance variable:
self.radius = radius
def __repr__(self):
""" Returns a string representation of this Circle. """
f_string = ''
f_string += 'Circle: center=({}, {}), radius={}, fill_color={}, '
f_string += 'outline_color={}, outline_thickness={}.'
return f_string.format(self.center.x, self.center.y,
self.radius,
self.fill_color, self.outline_color,
self.outline_thickness)
def clone(self):
""" Returns a copy of this Circle. """
return Circle(self.center, self.radius)
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses this Circle.
"""
c1 = Point(self.center.x - self.radius,
self.center.y - self.radius)
c2 = Point(self.center.x + self.radius,
self.center.y + self.radius)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Ellipse(_RectangularShape, _ShapeWithOutline):
"""
A Shape that is an ellipse (aka oval).
To construct an Ellipse, use:
- rg.Ellipse(corner1, corner2)
where corner1 and corner2 are
rg.Point objects that specify opposite
corners of the imaginery rectangle that
encloses the Ellipse.
For example:
- rg.Ellipse(rg.Point(100, 50),
- rg.Point(300, 200))
specifies the ellipse whose imaginery
rectangle that encloses the ellipse:
- has upper-left corner (100, 50) and
- lower-right corner(300, 200).
Another example:
- rg.Ellipse(rg.Point(300, 50),
- rg.Point(100, 200))
specifies the same ellipse.
Any two opposite corners can be used.
Instance variables include:
corner_1: An rg.Point that specifies
one corner of the imaginery rectangle
that encloses the Ellipse.
corner_2: An rg.Point that specifies an
opposite corner of the imaginery rectangle
that encloses the Ellipse.
fill_color:
The Ellipse is filled with this color.
Example: ellipse.fill_color = 'green'
outline_color:
The outline of the Ellipse is this color.
Example: ellipse.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Ellipse.
Examples:
p1 = rg.Point(100, 50)
p2 = rg.Point(300, 200)
ellipse = rg.Rectangle(p1, p2)
print(ellipse.corner_1, ellipse.corner_2)
ellipse.fill_color = 'blue'
ellipse.outline_color = 'black'
ellipse.outline_thickness = 5
window = rg.RoseWindow()
ellipse.attach_to(window)
ellipse.move_to(300, 200)
ellipse.move_by(-50, 60)
# Another way to move the Ellipse:
ellipse.corner_1 = rect.corner_1 - 50
ellipse.corner_2 = rect.corner_2 + 60
# To get rg.Points for the corners/center:
ul = ellipse.get_upper_left_corner()
ur = ellipse.get_upper_right_corner()
ll = ellipse.get_lower_left_corner()
lr = ellipse.get_lower_right_corner()
center = ellipse.get_center()
# To get the width/height (always positive):
h = ellipse.get_height()
w = ellipse.get_width()
"""
def __init__(self, corner_1, corner_2):
"""
:type corner_1: rg.Point
:type corner_2: rg.Point
"""
# The following sets instance variables
# self.corner_1
# self.corner_2
# to clones (copies) of the given rg.Points.
super().__init__(corner_1, corner_2,
tkinter.Canvas.create_oval)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
class Line(_Shape, _ShapeWithThickness):
"""
A Shape that is a line segment.
To construct a Line, use:
- rg.Line(start, end)
where start and end are rg.Point objects
that specify the endpoints of the Line.
For example:
- rg.Line(rg.Point(100, 50),
- rg.Point(200, 30)
specifies the Line that starts at (100, 50)
and ends at (200, 30).
Another example:
- rg.Line(rg.Point(200, 30),
- rg.Point(100, 50)
specifies the Line that is the same as the
previous example except that the start and
end points are reversed. This is important
if the Line's "arrow" type is not None.
Instance variables include:
start:
The rg.Point that is one end of the Line.
end:
The rg.Point that is the other end of the Line.
color: The Line is drawn with this color.
thickness: The thickness (in pixels) of the Line.
arrow: Specifies whether or not the Line
is drawn as an arrow. Possible values are:
- None draw the Line without arrow-heads
- 'first' draw an arrow-head at the start
- 'last' draw an arrow-head at the end
- 'both' draw an arrow-head at both
For example, if my_line is a Line, then
- my_line.arrow = 'last'
makes the Line be drawn as an arrow
from its start point to its end point.
Examples:
start = rg.Point(100, 50)
end = rg.Point(200, 30)
line = rg.Line(start, end)
line.color = 'blue'
line.thickness = 3
line.arrow = 'both' # A double-sided arrow
line.arrow = None # Just a line (no arrow)
line.arrow = 'first' # Arrow from end to start
line.arrow = 'last' # Arrow from start to end
window = rg.RoseWindow()
line.attach_to(window)
line.move_by(-50, 60)
"""
def __init__(self, start, end):
"""
:type start: rg.Point
:type end: rg.Point
"""
super().__init__(tkinter.Canvas.create_line)
# The following sets default values for:
# self.color
# self.thickness
# self.arrow
super()._initialize_options()
# The other instance variables are the endpoints:
self.start = start.clone()
self.end = end.clone()
def __repr__(self):
""" Returns a string representation of this Line. """
f_string = ''
f_string += 'Line: start=({}, {}), end=({}, {}), color={}, '
f_string += 'thickness={}, arrow={}.'
return f_string.format(self.start.x, self.start.y,
self.end.x, self.end.y,
self.color, self.thickness, self.arrow)
def clone(self):
""" Returns a copy of this Line. """
return Line(self.start, self.end)
def move_by(self, dx, dy):
"""
Moves both endpoints of this Line
(and hence the entire Line as well)
to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this Line.
:type dx: float
:type dy: float
"""
self.start.move_by(dx, dy)
self.end.move_by(dx, dy)
def get_midpoint(self):
"""
Returns an rg.Point at the midpoint (center) of this Line.
"""
return Point((self.start.x + self.end.x) / 2,
(self.start.y + self.end.y) / 2)
def _get_coordinates_for_drawing(self):
return [self.start.x,
self.start.y,
self.end.x,
self.end.y]
class Path(_Shape, _ShapeWithThickness):
""" Not yet implemented. """
class Point(_Shape, _ShapeWithOutline):
"""
A Shape that is a point in two-dimensional space.
It is drawn as a small circle (dot).
To construct a Point, use:
- rg.Point(x, y)
where x and y are the Point's coordinates.
For example:
- rg.Point(100, 50)
specifies the point whose x value is 100
and whose y value is 50.
Instance variables include the following:
x: The x-coordinate of the Point.
y: The y-coordinate of the Point.
fill_color:
The Point is filled with this color.
Note that a Point is drawn as a small, filled
circle, which is why it has a fill_color, etc.
Example: p.fill_color = 'green'
outline_color:
The outline of the Point is this color.
Example: p.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Point.
Examples:
p = rg.Point(100, 50)
print(p.x, p.y)
window = rg.RoseWindow()
p.attach_to(window)
p.move_to(300, 200)
p.move_by(-50, 60)
# Another way to move the Point:
p.x = p.x - 50
p.y = p.y + 60
p.fill_color = 'blue'
p.outline_color = 'black'
p.outline_thickness = 1
"""
defaults = {'width_for_drawing': 5,
'height_for_drawing': 5,
'fill_color': 'black',
'outline_color': 'black',
'outline_thickness': 1}
def __init__(self, x, y):
"""
:type x: float
:type y: float
"""
super().__init__(tkinter.Canvas.create_oval)
self.fill_color = Point.defaults['fill_color']
self.outline_color = Point.defaults['outline_color']
self.outline_thickness = Point.defaults['outline_thickness']
self.x = x
self.y = y
self.width_for_drawing = Point.defaults['width_for_drawing']
self.height_for_drawing = Point.defaults['height_for_drawing']
def __repr__(self):
""" Returns a string representation of this Point. """
return 'Point({:.1f}, {:.1f})'.format(self.x, self.y)
def clone(self):
""" Returns a copy of this Point. """
return Point(self.x, self.y)
def move_by(self, dx, dy):
"""
Moves this Point to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this Point.
:type dx: float
:type dy: float
"""
self.x = self.x + dx
self.y = self.y + dy
def move_to(self, x, y):
"""
Moves this Point to (x, y).
Does NOT return a value; instead, it mutates this Point.
:type x: float
:type y: float
"""
self.x = x
self.y = y
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses
this Point (viewing it as a dot).
"""
c1 = Point(self.x - self.width_for_drawing / 2,
self.y - self.width_for_drawing / 2)
c2 = Point(self.x + self.height_for_drawing / 2,
self.y + self.height_for_drawing / 2)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Polygon(_Shape, _ShapeWithOutline):
""" Not yet implemented. """
class Rectangle(_RectangularShape, _ShapeWithOutline):
"""
A Shape that is a rectangle.
To construct a Rectangle, use:
- rg.Rectangle(corner1, corner2)
where corner1 and corner2 are
rg.Point objects that specify opposite
corners of the rectangle.
For example:
- rg.Rectangle(rg.Point(100, 50),
- rg.Point(300, 200))
specifies the rectangle:
- whose upper-left corner is (100, 50) and
- whose lower-right corner is (300, 200).
Another example:
- rg.Rectangle(rg.Point(300, 50),
- rg.Point(100, 200))
specifies the same rectangle.
Any two opposite corners can be used.
Instance variables include:
corner_1: An rg.Point that specifies
one corner of the Rectangle.
corner_2: An rg.Point that specifies
an opposite corner of the Rectangle.
fill_color:
The Rectangle is filled with this color.
Example: rect.fill_color = 'green'
outline_color:
The outline of the Rectangle is this color.
Example: rect.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Rectangle.
Examples:
p1 = rg.Point(100, 50)
p2 = rg.Point(300, 200)
rect = rg.Rectangle(p1, p2)
print(rect.corner_1, rect.corner_2)
rect.fill_color = 'blue'
rect.outline_color = 'black'
rect.outline_thickness = 5
window = rg.RoseWindow()
rect.attach_to(window)
rect.move_to(300, 200)
rect.move_by(-50, 60)
# Another way to move the Rectangle:
rect.corner_1 = rect.corner_1 - 50
rect.corner_2 = rect.corner_2 + 60
# To get rg.Points for the corners/center:
ul = rect.get_upper_left_corner()
ur = rect.get_upper_right_corner()
ll = rect.get_lower_left_corner()
lr = rect.get_lower_right_corner()
center = rect.get_center()
# To get the width/height (always positive):
h = rect.get_height()
w = rect.get_width()
"""
def __init__(self, corner_1, corner_2):
"""
:type corner_1: rg.Point
:type corner_2: rg.Point
"""
# The following sets instance variables
# self.corner_1
# self.corner_2
# to clones (copies) of the given rg.Points.
super().__init__(corner_1, corner_2,
tkinter.Canvas.create_rectangle)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
def get_bounding_box(self):
"""
Returns a new rg.Rectangle with the same corners as this one.
"""
return self.clone()
class RoundedRectangle(_RectangularShape, _ShapeWithOutline):
""" Not yet implemented. """
class Square(_ShapeWithCenter, _ShapeWithOutline):
"""
A Shape that is an square.
To construct a Square, use:
- rg.Square(center, length_of_each_side)
where center is an rg.Point object
and length_of_each_side is a positive integer.
For example:
- rg.Square(rg.Point(100, 75), 60)
specifies the square whose center
is at (100, 75) and whose length of
each side is 60. Its corners are at:
(70, 35), (70, 105), (130, 35), (130, 105).
Instance variables include:
center: An rg.Point that specifies
the center of the Square.
radius: The length of each side of the Square.
fill_color:
The Square is filled with this color.
Example: square.fill_color = 'green'
outline_color:
The outline of the Square is this color.
Example: square.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Square.
Examples:
square = rg.Square(rg.Point(100, 75), 60)
print(square.center, square.length_of_each_side)
square.fill_color = 'blue'
square.outline_color = 'black'
square.outline_thickness = 5
window = rg.RoseWindow()
square.attach_to(window)
square.move_center_to(300, 200)
square.move_by(-50, 60)
# Another way to move the Square:
x = square.center.x
y = square.center.y
square.center = rg.Point(x - 50, y + 60)
"""
def __init__(self, center, length_of_each_side):
"""
:type center: rg.Point
:type length_of_each_side: int
"""
# The following sets instance variable
# self.center
# to a clone (copy) of the given rg.Point.
super().__init__(center, tkinter.Canvas.create_rectangle)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
# The length of each side is also stored in an instance variable
self.length_of_each_side = length_of_each_side
def __repr__(self):
""" Returns a string representation of this Square. """
f_string = ''
f_string += 'Square: center=({}, {}), side-lengths={}, '
f_string += 'fill_color={}, outline_color={}, outline_thickness={}.'
return f_string.format(self.center.x, self.center.y,
self.length_of_each_side,
self.fill_color, self.outline_color,
self.outline_thickness)
def clone(self):
""" Returns a copy of this Square. """
return Square(self.center, self.length_of_each_side)
def get_bounding_box(self):
"""
Returns a rg.Rectangle with the same corners as this Square.
"""
c1 = Point(self.center.x - self.length_of_each_side / 2,
self.center.y - self.length_of_each_side / 2)
c2 = Point(self.center.x + self.length_of_each_side / 2,
self.center.y + self.length_of_each_side / 2)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Text(_ShapeWithCenter, _ShapeWithText):
"""
A Shape that has a string of text on it, displayed horizontally.
Its constructor specifies the rg.Point at which the text
is centered and the string that is to be displayed.
Public data attributes: center (an rg.Point),
font_size (an integer, 5 to 80 or so are reasonable values),
is_bold (True if the text is to be displayed in BOLD, else False),
is_italic (True or False),
is_underline (True or False),
is _overstrike (True or False),
text_color (color used to display the text, default is 'black')
text (the string to be displayed).
Public methods: attach_to, move_by, move_center_to.
"""
def __init__(self, center, text):
"""
The first argument must be a rg.Point.
The second argument must be a string.
When this Text object is rendered on a window,
the string (2nd argument) is drawn horizontally on the window,
centered at the rg.Point that is the 1st argument.
Preconditions:
:type center: rg.Point
:type text str
"""
super().__init__(center, tkinter.Canvas.create_text)
super()._initialize_options()
self.text = text
# FIXME: Allow __init__ to set the options.
def __repr__(self):
return "Text displaying '{}' at position {}".format(self.text,
self.center)
# FIXME: Have repr include characteristics??
# FIXME: Do a clone?
# def clone(self):
# return Square(self.center, self.length_of_each_side)
# def get_bounding_box(self):
# return Rectangle(self.center,
# 2 * self.length_of_each_side,
# 2 * self.length_of_each_side)
# FIXME: Implement bounding_box using the tkinter function for it.
def _get_coordinates_for_drawing(self):
return [self.center.x, self.center.y]
# Mark: Window/RoseWindow naming collision is causing mass confusion.
# class Window(_Shape):
# """ Not yet implemented. """
# default_options = {}
# CONSIDER: Are these right for here?
class Button(_Shape):
""" Not yet implemented. """
default_options = {}
class Entry(_Shape):
""" Not yet implemented. """
default_options = {}
class Color(object):
"""
A Color represents a fill or outline color created from custom
amounts of red, green, and blue light. The arguments are:
- The RED component (0-255),
- the GREEN component (0-255),
- the BLUE component (0-255).
This Color can be passed to RoseGraphics colors
such as fill_color and outline_color.
"""
def __init__(self, red, green=None, blue=None):
self.red = red
self.green = green
self.blue = blue
def __repr__(self):
return "#{:02x}{:02x}{:02x}".format(self.red, self.green, self.blue)
# begin STUB code for testing
class _RoseWindowStub(RoseWindow):
def __init__(self, width=400, height=300, title='Rose Graphics',
color='black', canvas_color=None,
make_initial_canvas=True):
canvas_color = "white" # FIXME
self._is_closed = False
self.width = width
self.height = height
self.initial_canvas = _RoseCanvasStub(
self, width, height, canvas_color)
def render(self, seconds_to_pause=None):
pass
def get_next_mouse_click(self):
return Point(0, 0)
def close_on_mouse_click(self):
return None
def continue_on_mouse_click(self,
message='To continue, click anywhere in this window',
x_position=None,
y_position=None,
close_it=False,
erase_it=True):
return None
def _serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
return _serialize_shapes(self)
class _RoseCanvasStub(RoseCanvas):
def __init__(self, window, width, height, canvas_color):
# super().__init__(window, width, height, canvas_color)
# canvases.append(self)
self.shapes = []
def _draw(self, shape):
# super()._draw(shape)
self.shapes.append(shape)
def render(self, seconds_to_pause=None):
# super().render() # don't pause
pass
class TurtleWindow(object):
def __init__(self):
self._screen = turtle.Screen()
turtle.Turtle._screen = self._screen
def close_on_mouse_click(self):
self._screen.exitonclick()
# We may need the statement:
# turtle.TurtleScreen._RUNNING = True
# in case we open a subsequent TurtleWindow during this run.
# The turtle library seems not to allow for that possibility
# (it uses a CLASS variable _RUNNING where I would have expected
# an INSTANCE variable).
# The next statement appeared to have a visible effect
# (something flashed) but nothing worse. At time time
# it is commented-out, since we need only a single TurtleWindow.
# turtle.TurtleScreen._RUNNING = True
def delay(self, milliseconds=None):
self._screen.delay(milliseconds)
def tracer(self, n=None, delay=None):
self._screen.tracer(n, delay)
class ShapesWindow(RoseWindow):
pass
class SimpleTurtle(object):
"""
A SimpleTurtle is a Turtle with restricted (simpler) functionality.
It can move forward/backward (units are pixels), turn (spin)
left/right (units are degrees), and more.
To construct a SimpleTurtle, use:
rg.SimpleTurtle(shape)
where shape is OPTIONAL and can be any of: 'turtle'
'arrow' 'classic' 'square' 'circle' 'triangle' 'blank'
Instance variables include:
speed: An integer from 1 (slowest) to 10 (fastest) that
determines how fast the SimpleTurtle moves.
pen: an rg.Pen object (see example below) that determines
the color and thickness of the line
that the SimpleTurtle draws when moving
paint_bucket: an rg.PaintBucket object (see example below)
that determines the color with which the SimpleTurtle
"fills" shapes indicated by using the begin_fill and
end_fill methods.
Examples:
natacha = rg.SimpleTurtle()
natacha.forward(100)
boris = rg.SimpleTurtle('turtle')
boris.speed = 8
boris.pen = rg.Pen('blue', 5) # blue line 5 pixels thick
boris.paint_bucket = rg.PaintBucket('red')
# Moves with pen down, then with pen up, then with pen down again:
boris.left(90)
boris.forward(-300)
boris.pen_up()
boris.go_to(rg.Point(100, -50)
boris.pen_down()
boris.backward(75)
# Moves with the enclosed space "filled" with the paint_bucket
boris.begin_fill()
... movements ...
boris.end_fill()
"""
def __init__(self, shape='classic'):
"""
What comes in:
A turtle.Shape that determines how the Turtle looks. Defaults to
a Bitmap of the "classic" Turtle (an arrowhead) from early Turtle Graphics.
Side effects: Constructs and stores in self._turtle the "real" Turtle
to do all the work on behalf of this SimpleTurtle. This (purposely)
restricts what this SimpleTurtle knows and can do.
:type shape: str
"""
self.speed = 1
self.pen = Pen('black', 1)
self.paint_bucket = PaintBucket('black')
self._turtle = turtle.Turtle(shape)
self._update_real_turtle()
def forward(self, distance):
"""
Makes this SimpleTurtle go forward the given distance
(in pixels). Example (assuming sally is an rg.SimpleTurtle):
sally.forward(200)
"""
self._update_real_turtle()
self._turtle.forward(distance)
def backward(self, distance):
"""
Makes this SimpleTurtle go backward the given distance
(in pixels). Example (assuming sally is an rg.SimpleTurtle):
sally.backward(200)
"""
self._update_real_turtle()
self._turtle.backward(distance)
def left(self, angle):
"""
Makes this SimpleTurtle turn (i.e. spin) left the given distance
(in degrees). Example (assuming sally is an rg.SimpleTurtle):
sally.left(45)
"""
self._update_real_turtle()
self._turtle.left(angle)
def right(self, angle):
"""
Makes this SimpleTurtle turn (i.e. spin) right the given distance
(in degrees). Example (assuming sally is an rg.SimpleTurtle):
sally.right(45)
"""
self._update_real_turtle()
self._turtle.right(angle)
def go_to(self, point):
"""
Makes this SimpleTurtle go to the given rg.Point.
(0, 0) is at the center of the window.
Example (assuming sally is an rg.SimpleTurtle):
sally.go_to(rg.Point(100, -50))
"""
self._update_real_turtle()
self._turtle.goto(point.x, point.y)
def draw_circle(self, radius):
"""
Makes this SimpleTurtle draw a circle with the given radius.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_circle(40)
"""
self._update_real_turtle()
self._turtle.circle(radius)
def draw_square(self, length_of_sides):
"""
Makes this SimpleTurtle draw a square with the given value
for the length of each of its sides.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_square(100)
"""
for _ in range(4):
self.forward(length_of_sides)
self.left(90)
def draw_regular_polygon(self, number_of_sides, length_of_sides):
"""
Makes this SimpleTurtle draw a regular polygon with the given
number of sides and the given length for each of its sides.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_polygon(8, 75) # octogon
sally.draw_polygon(3, 75) # triangle
"""
for _ in range(number_of_sides):
self.forward(length_of_sides)
self.left(360 / number_of_sides)
def pen_up(self):
"""
Lifts up this SimpleTurtle's pen. Subsequent movements
will NOT draw a line (until pen_down is called).
Example (assuming sally is an rg.SimpleTurtle):
sally.pen_up()
"""
self._update_real_turtle()
self._turtle.penup()
def pen_down(self):
"""
Puts down this SimpleTurtle's pen. Subsequent movements
WILL draw a line using this SimpleTurtle's pen (until pen_up
is called). Example (assuming sally is an rg.SimpleTurtle):
sally.pen_down()
"""
self._update_real_turtle()
self._turtle.pendown()
def x_cor(self):
"""
Returns the x-coordinate of this SimpleTurtle's current position.
Example (assuming sally is an rg.SimpleTurtle):
x = sally.x_cor()
"""
return self._turtle.xcor()
def y_cor(self):
"""
Returns the y-coordinate of this SimpleTurtle's current position.
Example (assuming sally is an rg.SimpleTurtle):
y = sally.y_cor()
"""
return self._turtle.ycor()
def begin_fill(self):
"""
Begins "filling" the shape that this SimpleTurtle draws,
using this SimpleTurtle's paint_bucket as the fill.
Example (assuming sally is an rg.SimpleTurtle) that fills
a triangle with green:
sally.paint_bucket = rg.PaintBucket('green')
sally.begin_fill()
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.end_fill()
"""
self._update_real_turtle()
self._turtle.begin_fill()
def end_fill(self):
"""
Completes "filling" the shape that this SimpleTurtle draws,
using this SimpleTurtle's paint_bucket as the fill.
Example (assuming sally is an rg.SimpleTurtle) that fills
a triangle with green:
sally.paint_bucket = rg.PaintBucket('green')
sally.begin_fill()
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.end_fill()
"""
self._update_real_turtle()
self._turtle.end_fill()
def clear(self):
""" Not yet implemented. """
def clone(self):
""" Not yet implemented. """
pass
def write_text(self):
""" Not yet implemented. """
pass
def _update_real_turtle(self):
self._turtle.pencolor(self.pen.color)
self._turtle.pensize(self.pen.thickness)
self._turtle.fillcolor(self.paint_bucket.color)
self._turtle.speed(self.speed)
class Pen(object):
"""
A Pen has a color and thickness.
SimpleTurtles use a Pen for drawing lines.
To construct a Pen, use:
rg.Pen(color, thickness)
where color is a color (e.g. 'red')
and thickness is a small positive integer.
Instance variables are:
color: The color of the Pen
thickness: The thickness of the Pen
Examples:
thick_blue = rg.Pen('blue', 14)
thin_red = rg.Pen('red', 1)
"""
def __init__(self, color, thickness):
self.thickness = thickness
self.color = color
class PaintBucket(object):
"""
A PaintBucket has a color.
SimpleTurtles use a PaintBucket for filling shapes with color.
To construct a PaintBucket, use:
rg.PaintBucket(color)
where color is a color (e.g. 'red').
Instance variables are:
color: The color of the PaintBucket
Example:
paint = rg.PaintBucket('green')
"""
def __init__(self, color):
self.color = color
| 31.967805 | 161 | 0.598117 |
a88369a8d840ca11750e0301a052d4a26be04ae7 | 960 | py | Python | google-cloud-sdk/lib/surface/compute/https_health_checks/__init__.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/lib/surface/compute/https_health_checks/__init__.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/compute/https_health_checks/__init__.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating HTTPS health checks."""
from googlecloudsdk.calliope import base
class HttpsHealthChecks(base.Group):
"""Read and manipulate HTTPS health checks for load balanced instances."""
HttpsHealthChecks.detailed_help = {
'brief': ('Read and manipulate HTTPS health checks for load balanced '
'instances')
}
| 35.555556 | 76 | 0.753125 |
a74ba4c37582131b3b13a8bcf9ded9e8f7b53969 | 1,898 | py | Python | extensions/value_generators/models/generators_test.py | alexewu/oppia | 57c3c660ab7974835ec068d7c7f5ce5b5f1f25ae | [
"Apache-2.0"
] | null | null | null | extensions/value_generators/models/generators_test.py | alexewu/oppia | 57c3c660ab7974835ec068d7c7f5ce5b5f1f25ae | [
"Apache-2.0"
] | null | null | null | extensions/value_generators/models/generators_test.py | alexewu/oppia | 57c3c660ab7974835ec068d7c7f5ce5b5f1f25ae | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for value generators."""
from core.tests import test_utils
from extensions.value_generators.models import generators
class ValueGeneratorUnitTests(test_utils.GenericTestBase):
"""Test that value generators work correctly."""
def test_copier(self):
generator = generators.Copier()
self.assertEqual(generator.generate_value({}, **{'value': 'a'}), 'a')
self.assertEqual(generator.generate_value(
{}, **{'value': 'a', 'parse_with_jinja': False}), 'a')
self.assertEqual(generator.generate_value(
{}, **{'value': '{{a}}', 'parse_with_jinja': False}), '{{a}}')
self.assertEqual(generator.generate_value(
{'a': 'b'}, **{'value': '{{a}}', 'parse_with_jinja': True}), 'b')
self.assertIn(
'init-args="initArgs" value="customizationArgs.value"',
generator.get_html_template())
def test_random_selector(self):
generator = generators.RandomSelector()
self.assertIn(generator.generate_value(
{}, **{'list_of_values': ['a', 'b', 'c']}), ['a', 'b', 'c'])
self.assertIn(
'schema="SCHEMA" '
'local-value="$parent.$parent.customizationArgs.list_of_values"',
generator.get_html_template())
| 40.382979 | 77 | 0.658588 |
4963cac16c379d85f8698890416235eabb384ad3 | 71 | py | Python | app/library/algorithm/pathwalk.py | imamsolikhin/Python | f2ed5a848a37925bd9172f1f7484fd40f2e0a8a5 | [
"MIT"
] | null | null | null | app/library/algorithm/pathwalk.py | imamsolikhin/Python | f2ed5a848a37925bd9172f1f7484fd40f2e0a8a5 | [
"MIT"
] | null | null | null | app/library/algorithm/pathwalk.py | imamsolikhin/Python | f2ed5a848a37925bd9172f1f7484fd40f2e0a8a5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# TODO: move PathFind PAthWlak to own modules
| 17.75 | 45 | 0.647887 |
4aa47ac250e0557e553614986f28a3aa764ffc1a | 9,270 | py | Python | trello/trelloclient.py | nureineide/py-trello | b9a7366e103316cedfe925ab6150b763e998d37b | [
"BSD-3-Clause"
] | null | null | null | trello/trelloclient.py | nureineide/py-trello | b9a7366e103316cedfe925ab6150b763e998d37b | [
"BSD-3-Clause"
] | null | null | null | trello/trelloclient.py | nureineide/py-trello | b9a7366e103316cedfe925ab6150b763e998d37b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import with_statement, print_function, absolute_import
import json
import requests
from requests_oauthlib import OAuth1
from trello.board import Board
from trello.card import Card
from trello.trellolist import List
from trello.organization import Organization
from trello.member import Member
from trello.webhook import WebHook
from trello.exceptions import *
from trello.label import Label
try:
# PyOpenSSL works around some issues in python ssl modules
# In particular in python < 2.7.9 and python < 3.2
# It is not a hard requirement, so it's not listed in requirements.txt
# More info https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except:
pass
class TrelloClient(object):
""" Base class for Trello API access """
def __init__(self, api_key, api_secret=None, token=None, token_secret=None):
"""
Constructor
:api_key: API key generated at https://trello.com/1/appKey/generate
:api_secret: the secret component of api_key
:token_key: OAuth token generated by the user in
trello.util.create_oauth_token
:token_secret: the OAuth client secret for the given OAuth token
"""
# client key and secret for oauth1 session
if api_key or token:
self.oauth = OAuth1(client_key=api_key, client_secret=api_secret,
resource_owner_key=token, resource_owner_secret=token_secret)
else:
self.oauth = None
self.public_only = token is None
self.api_key = api_key
self.api_secret = api_secret
self.resource_owner_key = token
self.resource_owner_secret = token_secret
def info_for_all_boards(self, actions):
"""
Use this if you want to retrieve info for all your boards in one swoop
"""
if self.public_only:
return None
else:
json_obj = self.fetch_json(
'/members/me/boards/all',
query_params={'actions': actions})
self.all_info = json_obj
def logout(self):
"""Log out of Trello."""
# TODO: This function.
raise NotImplementedError()
def list_boards(self, board_filter="all"):
"""
Returns all boards for your Trello user
:return: a list of Python objects representing the Trello boards.
:rtype: Board
Each board has the following noteworthy attributes:
- id: the board's identifier
- name: Name of the board
- desc: Description of the board (optional - may be missing from the
returned JSON)
- closed: Boolean representing whether this board is closed or not
- url: URL to the board
"""
json_obj = self.fetch_json('/members/me/boards/?filter=%s' % board_filter)
return [Board.from_json(self, json_obj=obj) for obj in json_obj]
def list_organizations(self):
"""
Returns all organizations for your Trello user
:return: a list of Python objects representing the Trello organizations.
:rtype: Organization
Each organization has the following noteworthy attributes:
- id: the organization's identifier
- name: Name of the organization
- desc: Description of the organization (optional - may be missing from the
returned JSON)
- closed: Boolean representing whether this organization is closed or not
- url: URL to the organization
"""
json_obj = self.fetch_json('members/me/organizations')
return [Organization.from_json(self, obj) for obj in json_obj]
def get_organization(self, organization_id):
'''Get organization
:rtype: Organization
'''
obj = self.fetch_json('/organizations/' + organization_id)
return Organization.from_json(self, obj)
def get_board(self, board_id):
'''Get board
:rtype: Board
'''
obj = self.fetch_json('/boards/' + board_id)
return Board.from_json(self, json_obj=obj)
def add_board(self, board_name, source_board=None, organization_id=None):
'''Create board
:param board_name: Name of the board to create
:param source_board: Optional Board to copy
:rtype: Board
'''
post_args={'name': board_name}
if source_board is not None:
post_args['idBoardSource'] = source_board.id
if organization_id is not None:
post_args['idOrganization'] = organization_id
obj = self.fetch_json('/boards', http_method='POST',
post_args=post_args)
return Board.from_json(self, json_obj=obj)
def get_member(self, member_id):
'''Get member
:rtype: Member
'''
return Member(self, member_id).fetch()
def get_card(self, card_id):
'''Get card
:rtype: Card
'''
card_json = self.fetch_json('/cards/' + card_id)
list_json = self.fetch_json('/lists/' + card_json['idList'])
board = self.get_board(card_json['idBoard'])
return Card.from_json(List.from_json(board, list_json), card_json)
def get_label(self, label_id, board_id):
'''Get Label
Requires the parent board id the label is on
:rtype: Label
'''
board = self.get_board(board_id)
label_json = self.fetch_json('/labels/' + label_id)
return Label.from_json(board, label_json)
def fetch_json(
self,
uri_path,
http_method='GET',
headers=None,
query_params=None,
post_args=None,
files=None):
""" Fetch some JSON from Trello """
# explicit values here to avoid mutable default values
if headers is None:
headers = {}
if query_params is None:
query_params = {}
if post_args is None:
post_args = {}
# if files specified, we don't want any data
data = None
if files is None:
data = json.dumps(post_args)
# set content type and accept headers to handle JSON
if http_method in ("POST", "PUT", "DELETE") and not files:
headers['Content-Type'] = 'application/json; charset=utf-8'
headers['Accept'] = 'application/json'
# construct the full URL without query parameters
if uri_path[0] == '/':
uri_path = uri_path[1:]
url = 'https://api.trello.com/1/%s' % uri_path
# perform the HTTP requests, if possible uses OAuth authentication
response = requests.request(http_method, url, params=query_params,
headers=headers, data=data,
auth=self.oauth, files=files)
if response.status_code == 401:
raise Unauthorized("%s at %s" % (response.text, url), response)
if response.status_code != 200:
raise ResourceUnavailable("%s at %s" % (response.text, url), response)
return response.json()
def list_hooks(self, token=None):
"""
Returns a list of all hooks associated with a specific token. If you don't pass in a token,
it tries to use the token associated with the TrelloClient object (if it exists)
"""
token = token or self.resource_owner_key
if token is None:
raise TokenError("You need to pass an auth token in to list hooks.")
else:
url = "/tokens/%s/webhooks" % token
return self._existing_hook_objs(self.fetch_json(url), token)
def _existing_hook_objs(self, hooks, token):
"""
Given a list of hook dicts passed from list_hooks, creates
the hook objects
"""
all_hooks = []
for hook in hooks:
new_hook = WebHook(self, token, hook['id'], hook['description'],
hook['idModel'],
hook['callbackURL'], hook['active'])
all_hooks.append(new_hook)
return all_hooks
def create_hook(self, callback_url, id_model, desc=None, token=None):
"""
Creates a new webhook. Returns the WebHook object created.
There seems to be some sort of bug that makes you unable to create a
hook using httplib2, so I'm using urllib2 for that instead.
"""
token = token or self.resource_owner_key
if token is None:
raise TokenError("You need to pass an auth token in to create a hook.")
url = "https://trello.com/1/tokens/%s/webhooks/" % token
data = {'callbackURL': callback_url, 'idModel': id_model,
'description': desc}
response = requests.post(url, data=data, auth=self.oauth)
if response.status_code == 200:
hook_id = response.json()['id']
return WebHook(self, token, hook_id, desc, id_model, callback_url, True)
else:
return False
| 35.381679 | 99 | 0.610032 |
946add99d10b787bccfec9b524513c8b3a2414c0 | 21,239 | py | Python | stable_baselines/common/base_class.py | ashigirl96/stable-baselines | 0c3478eb9917c0357131913215df7abca6c8d566 | [
"MIT"
] | null | null | null | stable_baselines/common/base_class.py | ashigirl96/stable-baselines | 0c3478eb9917c0357131913215df7abca6c8d566 | [
"MIT"
] | null | null | null | stable_baselines/common/base_class.py | ashigirl96/stable-baselines | 0c3478eb9917c0357131913215df7abca6c8d566 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
import os
import glob
import cloudpickle
import numpy as np
import gym
import tensorflow as tf
from stable_baselines.common import set_global_seeds
from stable_baselines.common.policies import LstmPolicy, get_policy_from_name, ActorCriticPolicy
from stable_baselines.common.vec_env import VecEnvWrapper, VecEnv, DummyVecEnv
from stable_baselines import logger
class BaseRLModel(ABC):
"""
The base RL model
:param policy: (BasePolicy) Policy object
:param env: (Gym environment) The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param requires_vec_env: (bool) Does this model require a vectorized environment
:param policy_base: (BasePolicy) the base policy used by this method
"""
def __init__(self, policy, env, verbose=0, *, requires_vec_env, policy_base):
if isinstance(policy, str):
self.policy = get_policy_from_name(policy_base, policy)
else:
self.policy = policy
self.env = env
self.verbose = verbose
self._requires_vec_env = requires_vec_env
self.observation_space = None
self.action_space = None
self.n_envs = None
self._vectorize_action = False
if env is not None:
if isinstance(env, str):
if self.verbose >= 1:
print("Creating environment from the given name, wrapped in a DummyVecEnv.")
self.env = env = DummyVecEnv([lambda: gym.make(env)])
self.observation_space = env.observation_space
self.action_space = env.action_space
if requires_vec_env:
if isinstance(env, VecEnv):
self.n_envs = env.num_envs
else:
raise ValueError("Error: the model requires a vectorized environment, please use a VecEnv wrapper.")
else:
if isinstance(env, VecEnv):
if env.num_envs == 1:
self.env = _UnvecWrapper(env)
self._vectorize_action = True
else:
raise ValueError("Error: the model requires a non vectorized environment or a single vectorized"
" environment.")
self.n_envs = 1
def get_env(self):
"""
returns the current environment (can be None if not defined)
:return: (Gym Environment) The current environment
"""
return self.env
def set_env(self, env):
"""
Checks the validity of the environment, and if it is coherent, set it as the current environment.
:param env: (Gym Environment) The environment for learning a policy
"""
if env is None and self.env is None:
if self.verbose >= 1:
print("Loading a model without an environment, "
"this model cannot be trained until it has a valid environment.")
return
elif env is None:
raise ValueError("Error: trying to replace the current environment with None")
# sanity checking the environment
assert self.observation_space == env.observation_space, \
"Error: the environment passed must have at least the same observation space as the model was trained on."
assert self.action_space == env.action_space, \
"Error: the environment passed must have at least the same action space as the model was trained on."
if self._requires_vec_env:
assert isinstance(env, VecEnv), \
"Error: the environment passed is not a vectorized environment, however {} requires it".format(
self.__class__.__name__)
assert not issubclass(self.policy, LstmPolicy) or self.n_envs == env.num_envs, \
"Error: the environment passed must have the same number of environments as the model was trained on." \
"This is due to the Lstm policy not being capable of changing the number of environments."
self.n_envs = env.num_envs
else:
# for models that dont want vectorized environment, check if they make sense and adapt them.
# Otherwise tell the user about this issue
if isinstance(env, VecEnv):
if env.num_envs == 1:
env = _UnvecWrapper(env)
self._vectorize_action = True
else:
raise ValueError("Error: the model requires a non vectorized environment or a single vectorized "
"environment.")
else:
self._vectorize_action = False
self.n_envs = 1
self.env = env
@abstractmethod
def setup_model(self):
"""
Create all the functions and tensorflow graphs necessary to train the model
"""
pass
def _setup_learn(self, seed):
"""
check the environment, set the seed, and set the logger
:param seed: (int) the seed value
"""
if self.env is None:
raise ValueError("Error: cannot train the model without a valid environment, please set an environment with"
"set_env(self, env) method.")
if seed is not None:
set_global_seeds(seed)
@abstractmethod
def learn(self, total_timesteps, callback=None, seed=None, log_interval=100, tb_log_name="run"):
"""
Return a trained model.
:param total_timesteps: (int) The total number of samples to train on
:param seed: (int) The initial seed for training, if None: keep current seed
:param callback: (function (dict, dict)) -> boolean function called at every steps with state of the algorithm.
It takes the local and global variables. If it returns False, training is aborted.
:param log_interval: (int) The number of timesteps before logging.
:param tb_log_name: (str) the name of the run for tensorboard log
:return: (BaseRLModel) the trained model
"""
pass
@abstractmethod
def predict(self, observation, state=None, mask=None, deterministic=False):
"""
Get the model's action from an observation
:param observation: (np.ndarray) the input observation
:param state: (np.ndarray) The last states (can be None, used in recurrent policies)
:param mask: (np.ndarray) The last masks (can be None, used in recurrent policies)
:param deterministic: (bool) Whether or not to return deterministic actions.
:return: (np.ndarray, np.ndarray) the model's action and the next state (used in recurrent policies)
"""
pass
@abstractmethod
def action_probability(self, observation, state=None, mask=None):
"""
Get the model's action probability distribution from an observation
:param observation: (np.ndarray) the input observation
:param state: (np.ndarray) The last states (can be None, used in recurrent policies)
:param mask: (np.ndarray) The last masks (can be None, used in recurrent policies)
:return: (np.ndarray) the model's action probability distribution
"""
pass
@abstractmethod
def save(self, save_path):
"""
Save the current parameters to file
:param save_path: (str or file-like object) the save location
"""
# self._save_to_file(save_path, data={}, params=None)
raise NotImplementedError()
@classmethod
@abstractmethod
def load(cls, load_path, env=None, **kwargs):
"""
Load the model from file
:param load_path: (str or file-like) the saved parameter location
:param env: (Gym Envrionment) the new environment to run the loaded model on
(can be None if you only need prediction from a trained model)
:param kwargs: extra arguments to change the model when loading
"""
# data, param = cls._load_from_file(load_path)
raise NotImplementedError()
@staticmethod
def _save_to_file(save_path, data=None, params=None):
if isinstance(save_path, str):
_, ext = os.path.splitext(save_path)
if ext == "":
save_path += ".pkl"
with open(save_path, "wb") as file_:
cloudpickle.dump((data, params), file_)
else:
# Here save_path is a file-like object, not a path
cloudpickle.dump((data, params), save_path)
@staticmethod
def _load_from_file(load_path):
if isinstance(load_path, str):
if not os.path.exists(load_path):
if os.path.exists(load_path + ".pkl"):
load_path += ".pkl"
else:
raise ValueError("Error: the file {} could not be found".format(load_path))
with open(load_path, "rb") as file:
data, params = cloudpickle.load(file)
else:
# Here load_path is a file-like object, not a path
data, params = cloudpickle.load(load_path)
return data, params
@staticmethod
def _softmax(x_input):
"""
An implementation of softmax.
:param x_input: (numpy float) input vector
:return: (numpy float) output vector
"""
x_exp = np.exp(x_input.T - np.max(x_input.T, axis=0))
return (x_exp / x_exp.sum(axis=0)).T
@staticmethod
def _is_vectorized_observation(observation, observation_space):
"""
For every observation type, detects and validates the shape,
then returns whether or not the observation is vectorized.
:param observation: (np.ndarray) the input observation to validate
:param observation_space: (gym.spaces) the observation space
:return: (bool) whether the given observation is vectorized or not
"""
if isinstance(observation_space, gym.spaces.Box):
if observation.shape == observation_space.shape:
return False
elif observation.shape[1:] == observation_space.shape:
return True
else:
raise ValueError("Error: Unexpected observation shape {} for ".format(observation.shape) +
"Box environment, please use {} ".format(observation_space.shape) +
"or (n_env, {}) for the observation shape."
.format(", ".join(map(str, observation_space.shape))))
elif isinstance(observation_space, gym.spaces.Discrete):
if observation.shape == (): # A numpy array of a number, has shape empty tuple '()'
return False
elif len(observation.shape) == 1:
return True
else:
raise ValueError("Error: Unexpected observation shape {} for ".format(observation.shape) +
"Discrete environment, please use (1,) or (n_env, 1) for the observation shape.")
elif isinstance(observation_space, gym.spaces.MultiDiscrete):
if observation.shape == (len(observation_space.nvec),):
return False
elif len(observation.shape) == 2 and observation.shape[1] == len(observation_space.nvec):
return True
else:
raise ValueError("Error: Unexpected observation shape {} for MultiDiscrete ".format(observation.shape) +
"environment, please use ({},) or ".format(len(observation_space.nvec)) +
"(n_env, {}) for the observation shape.".format(len(observation_space.nvec)))
elif isinstance(observation_space, gym.spaces.MultiBinary):
if observation.shape == (observation_space.n,):
return False
elif len(observation.shape) == 2 and observation.shape[1] == observation_space.n:
return True
else:
raise ValueError("Error: Unexpected observation shape {} for MultiBinary ".format(observation.shape) +
"environment, please use ({},) or ".format(observation_space.n) +
"(n_env, {}) for the observation shape.".format(observation_space.n))
else:
raise ValueError("Error: Cannot determine if the observation is vectorized with the space type {}."
.format(observation_space))
class ActorCriticRLModel(BaseRLModel):
"""
The base class for Actor critic model
:param policy: (BasePolicy) Policy object
:param env: (Gym environment) The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param policy_base: (BasePolicy) the base policy used by this method (default=ActorCriticPolicy)
:param requires_vec_env: (bool) Does this model require a vectorized environment
"""
def __init__(self, policy, env, _init_setup_model, verbose=0, policy_base=ActorCriticPolicy,
requires_vec_env=False):
super(ActorCriticRLModel, self).__init__(policy, env, verbose=verbose, requires_vec_env=requires_vec_env,
policy_base=policy_base)
self.sess = None
self.initial_state = None
self.step = None
self.proba_step = None
self.params = None
@abstractmethod
def setup_model(self):
pass
@abstractmethod
def learn(self, total_timesteps, callback=None, seed=None, log_interval=100, tb_log_name="run"):
pass
def predict(self, observation, state=None, mask=None, deterministic=False):
if state is None:
state = self.initial_state
if mask is None:
mask = [False for _ in range(self.n_envs)]
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions, _, states, _ = self.step(observation, state, mask, deterministic=deterministic)
if not vectorized_env:
if state is not None:
raise ValueError("Error: The environment must be vectorized when using recurrent policies.")
actions = actions[0]
return actions, states
def action_probability(self, observation, state=None, mask=None):
if state is None:
state = self.initial_state
if mask is None:
mask = [False for _ in range(self.n_envs)]
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions_proba = self.proba_step(observation, state, mask)
if not vectorized_env:
if state is not None:
raise ValueError("Error: The environment must be vectorized when using recurrent policies.")
actions_proba = actions_proba[0]
return actions_proba
@abstractmethod
def save(self, save_path):
pass
@classmethod
def load(cls, load_path, env=None, **kwargs):
data, params = cls._load_from_file(load_path)
model = cls(policy=data["policy"], env=None, _init_setup_model=False)
model.__dict__.update(data)
model.__dict__.update(kwargs)
model.set_env(env)
model.setup_model()
restores = []
for param, loaded_p in zip(model.params, params):
restores.append(param.assign(loaded_p))
model.sess.run(restores)
return model
class OffPolicyRLModel(BaseRLModel):
"""
The base class for off policy RL model
:param policy: (BasePolicy) Policy object
:param env: (Gym environment) The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param replay_buffer: (ReplayBuffer) the type of replay buffer
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param requires_vec_env: (bool) Does this model require a vectorized environment
:param policy_base: (BasePolicy) the base policy used by this method
"""
def __init__(self, policy, env, replay_buffer, verbose=0, *, requires_vec_env, policy_base):
super(OffPolicyRLModel, self).__init__(policy, env, verbose=verbose, requires_vec_env=requires_vec_env,
policy_base=policy_base)
self.replay_buffer = replay_buffer
@abstractmethod
def setup_model(self):
pass
@abstractmethod
def learn(self, total_timesteps, callback=None, seed=None, log_interval=100, tb_log_name="run"):
pass
@abstractmethod
def predict(self, observation, state=None, mask=None, deterministic=False):
pass
@abstractmethod
def action_probability(self, observation, state=None, mask=None):
pass
@abstractmethod
def save(self, save_path):
pass
@classmethod
@abstractmethod
def load(cls, load_path, env=None, **kwargs):
pass
class _UnvecWrapper(VecEnvWrapper):
def __init__(self, venv):
"""
Unvectorize a vectorized environment, for vectorized environment that only have one environment
:param venv: (VecEnv) the vectorized environment to wrap
"""
super().__init__(venv)
assert venv.num_envs == 1, "Error: cannot unwrap a environment wrapper that has more than one environment."
def reset(self):
return self.venv.reset()[0]
def step_async(self, actions):
self.venv.step_async([actions])
def step_wait(self):
actions, values, states, information = self.venv.step_wait()
return actions[0], float(values[0]), states[0], information[0]
def render(self, mode='human'):
return self.venv.render(mode=mode)
class SetVerbosity:
def __init__(self, verbose=0):
"""
define a region of code for certain level of verbosity
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
"""
self.verbose = verbose
def __enter__(self):
self.tf_level = os.environ.get('TF_CPP_MIN_LOG_LEVEL', '0')
self.log_level = logger.get_level()
self.gym_level = gym.logger.MIN_LEVEL
if self.verbose <= 1:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if self.verbose <= 0:
logger.set_level(logger.DISABLED)
gym.logger.set_level(gym.logger.DISABLED)
def __exit__(self, exc_type, exc_val, exc_tb):
if self.verbose <= 1:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = self.tf_level
if self.verbose <= 0:
logger.set_level(self.log_level)
gym.logger.set_level(self.gym_level)
class TensorboardWriter:
def __init__(self, graph, tensorboard_log_path, tb_log_name):
"""
Create a Tensorboard writer for a code segment, and saves it to the log directory as its own run
:param graph: (Tensorflow Graph) the model graph
:param tensorboard_log_path: (str) the save path for the log (can be None for no logging)
:param tb_log_name: (str) the name of the run for tensorboard log
"""
self.graph = graph
self.tensorboard_log_path = tensorboard_log_path
self.tb_log_name = tb_log_name
self.writer = None
def __enter__(self):
if self.tensorboard_log_path is not None:
save_path = os.path.join(self.tensorboard_log_path,
"{}_{}".format(self.tb_log_name, self._get_latest_run_id() + 1))
self.writer = tf.summary.FileWriter(save_path, graph=self.graph)
logger.info('TF Logging to {} ...'.format(save_path))
return self.writer
def _get_latest_run_id(self):
"""
returns the latest run number for the given log name and log path,
by finding the greatest number in the directories.
:return: (int) latest run number
"""
from pathlib import Path
max_run_id = 0
for path in glob.glob(self.tensorboard_log_path + "/{}_[0-9]*".format(self.tb_log_name)):
file_name = path.split("/")[-1]
ext = file_name.split("_")[-1]
if self.tb_log_name == "_".join(file_name.split("_")[:-1]) and ext.isdigit() and int(ext) > max_run_id:
max_run_id = int(ext)
return max_run_id
def __exit__(self, exc_type, exc_val, exc_tb):
if self.writer is not None:
self.writer.add_graph(self.graph)
self.writer.flush()
| 40.844231 | 120 | 0.622487 |
2f04b8b23a8c2b67d069f97878e79afdea3a68c5 | 3,635 | py | Python | examples/greetings/receiver.py | holytshirt/Brightside | ef78a46a5d1ee0aaf45594b1b353f6a82e6acf0d | [
"MIT"
] | null | null | null | examples/greetings/receiver.py | holytshirt/Brightside | ef78a46a5d1ee0aaf45594b1b353f6a82e6acf0d | [
"MIT"
] | null | null | null | examples/greetings/receiver.py | holytshirt/Brightside | ef78a46a5d1ee0aaf45594b1b353f6a82e6acf0d | [
"MIT"
] | null | null | null | """
File : sender.py
Author : ian
Created : 08-04-2017
Last Modified By : ian
Last Modified On : 08-05-2017
***********************************************************************
The MIT License (MIT)
Copyright 2017 Ian Cooper <ian_hammond_cooper@yahoo.co.uk>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
***********************************************************************
"""
import logging
import sys
import time
from multiprocessing import Queue
from arame.gateway import ArameConsumer
from brightside.connection import Connection
from brightside.command_processor import CommandProcessor, Request
from brightside.dispatch import ConsumerConfiguration, Dispatcher
from brightside.messaging import BrightsideConsumerConfiguration, BrightsideMessage
from brightside.registry import Registry
from arame.messaging import JsonRequestSerializer
from src.core import HelloWorldCommand, HelloWorldCommandHandler
KEYBOARD_INTERRUPT_SLEEP = 3 # How long before checking for a keyhoard interrupt
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def command_processor_factory(channel_name: str):
handler = HelloWorldCommandHandler()
subscriber_registry = Registry()
subscriber_registry.register(HelloWorldCommand, lambda: handler)
command_processor = CommandProcessor(
registry=subscriber_registry
)
return command_processor
def consumer_factory(connection: Connection, consumer_configuration: BrightsideConsumerConfiguration, logger: logging.Logger):
return ArameConsumer(connection=connection, configuration=consumer_configuration, logger=logger)
def map_my_command_to_request(message: BrightsideMessage) -> Request:
return JsonRequestSerializer(request=HelloWorldCommand(), serialized_request=message.body.value).deserialize_from_json()
def run():
pipeline = Queue()
connection = Connection("amqp://guest:guest@localhost:5672//", "paramore.brightside.exchange", is_durable=True)
configuration = BrightsideConsumerConfiguration(pipeline, "examples_greetings_queue", "hello_world")
consumer = ConsumerConfiguration(connection, configuration, consumer_factory, command_processor_factory, map_my_command_to_request)
dispatcher = Dispatcher({"HelloWorldCommand": consumer})
dispatcher.receive()
# poll for keyboard input to allow the user to quit monitoring
while True:
try:
# just sleep unless we receive an interrupt i.e. CTRL+C
time.sleep(KEYBOARD_INTERRUPT_SLEEP)
except KeyboardInterrupt:
dispatcher.end()
sys.exit(1)
if __name__ == "__main__":
run()
| 39.086022 | 135 | 0.751857 |
67222687ef5a1026aca4ea412417e603a1cfd9d9 | 6,412 | py | Python | tests/test_contrib_signaling.py | emcee-ai/aiortc | 9678334784be5872af6ede786d3a4404b265f0ae | [
"BSD-3-Clause"
] | null | null | null | tests/test_contrib_signaling.py | emcee-ai/aiortc | 9678334784be5872af6ede786d3a4404b265f0ae | [
"BSD-3-Clause"
] | 2 | 2018-12-24T19:18:58.000Z | 2018-12-24T20:09:32.000Z | tests/test_contrib_signaling.py | koenvo/aiortc | ac834849b58f852722079436e616562b7d9f787b | [
"BSD-3-Clause"
] | null | null | null | import argparse
import asyncio
from unittest import TestCase
from aiortc import RTCIceCandidate, RTCSessionDescription
from aiortc.contrib.signaling import (add_signaling_arguments,
create_signaling, object_from_string,
object_to_string)
from .utils import run
async def delay(coro):
await asyncio.sleep(0.1)
return await coro()
offer = RTCSessionDescription(sdp='some-offer', type='offer')
answer = RTCSessionDescription(sdp='some-answer', type='answer')
class SignalingTest(TestCase):
def test_copy_and_paste(self):
def mock_print(v=''):
pass
# hijack builtins
original_print = __builtins__['print']
__builtins__['print'] = mock_print
parser = argparse.ArgumentParser()
add_signaling_arguments(parser)
args = parser.parse_args(['-s', 'copy-and-paste'])
sig_server = create_signaling(args)
sig_client = create_signaling(args)
class MockReader:
def __init__(self, queue):
self.queue = queue
async def readline(self):
return await self.queue.get()
class MockWritePipe:
def __init__(self, queue, encoding):
self.encoding = encoding
self.queue = queue
def write(self, msg):
asyncio.ensure_future(self.queue.put(msg.encode(self.encoding)))
def dummy_stdio(encoding):
queue = asyncio.Queue()
return MockReader(queue), MockWritePipe(queue, encoding=encoding)
# mock out reader / write pipe
run(sig_server._connect())
run(sig_client._connect())
sig_server._reader, sig_client._write_pipe = dummy_stdio(sig_server._read_pipe.encoding)
sig_client._reader, sig_server._write_pipe = dummy_stdio(sig_client._read_pipe.encoding)
res = run(asyncio.gather(sig_server.send(offer), delay(sig_client.receive)))
self.assertEqual(res[1], offer)
res = run(asyncio.gather(sig_client.send(answer), delay(sig_server.receive)))
self.assertEqual(res[1], answer)
run(asyncio.gather(sig_server.close(), sig_client.close()))
# restore builtins
__builtins__['print'] = original_print
def test_tcp_socket(self):
parser = argparse.ArgumentParser()
add_signaling_arguments(parser)
args = parser.parse_args(['-s', 'tcp-socket'])
sig_server = create_signaling(args)
sig_client = create_signaling(args)
res = run(asyncio.gather(sig_server.send(offer), delay(sig_client.receive)))
self.assertEqual(res[1], offer)
res = run(asyncio.gather(sig_client.send(answer), delay(sig_server.receive)))
self.assertEqual(res[1], answer)
run(asyncio.gather(sig_server.close(), sig_client.close()))
def test_tcp_socket_abrupt_disconnect(self):
parser = argparse.ArgumentParser()
add_signaling_arguments(parser)
args = parser.parse_args(['-s', 'tcp-socket'])
sig_server = create_signaling(args)
sig_client = create_signaling(args)
res = run(asyncio.gather(sig_server.send(offer), delay(sig_client.receive)))
self.assertEqual(res[1], offer)
# break connection
sig_client._writer.close()
sig_server._writer.close()
res = run(sig_server.receive())
self.assertIsNone(res)
res = run(sig_client.receive())
self.assertIsNone(res)
run(asyncio.gather(sig_server.close(), sig_client.close()))
def test_unix_socket(self):
parser = argparse.ArgumentParser()
add_signaling_arguments(parser)
args = parser.parse_args(['-s', 'unix-socket'])
sig_server = create_signaling(args)
sig_client = create_signaling(args)
res = run(asyncio.gather(sig_server.send(offer), delay(sig_client.receive)))
self.assertEqual(res[1], offer)
res = run(asyncio.gather(sig_client.send(answer), delay(sig_server.receive)))
self.assertEqual(res[1], answer)
run(asyncio.gather(sig_server.close(), sig_client.close()))
def test_unix_socket_abrupt_disconnect(self):
parser = argparse.ArgumentParser()
add_signaling_arguments(parser)
args = parser.parse_args(['-s', 'unix-socket'])
sig_server = create_signaling(args)
sig_client = create_signaling(args)
res = run(asyncio.gather(sig_server.send(offer), delay(sig_client.receive)))
self.assertEqual(res[1], offer)
# break connection
sig_client._writer.close()
sig_server._writer.close()
res = run(sig_server.receive())
self.assertIsNone(res)
res = run(sig_client.receive())
self.assertIsNone(res)
run(asyncio.gather(sig_server.close(), sig_client.close()))
class SignalingUtilsTest(TestCase):
def test_bye_from_string(self):
self.assertEqual(object_from_string('{"type": "bye"}'), None)
def test_bye_to_string(self):
self.assertEqual(object_to_string(None), '{"type": "bye"}')
def test_candidate_from_string(self):
candidate = object_from_string(
'{"candidate": "candidate:0 1 UDP 2122252543 192.168.99.7 33543 typ host", "id": "audio", "label": 0, "type": "candidate"}') # noqa
self.assertEqual(candidate.component, 1)
self.assertEqual(candidate.foundation, '0')
self.assertEqual(candidate.ip, '192.168.99.7')
self.assertEqual(candidate.port, 33543)
self.assertEqual(candidate.priority, 2122252543)
self.assertEqual(candidate.protocol, 'UDP')
self.assertEqual(candidate.sdpMid, 'audio')
self.assertEqual(candidate.sdpMLineIndex, 0)
self.assertEqual(candidate.type, 'host')
def test_candidate_to_string(self):
candidate = RTCIceCandidate(
component=1,
foundation='0',
ip='192.168.99.7',
port=33543,
priority=2122252543,
protocol='UDP',
type='host')
candidate.sdpMid = 'audio'
candidate.sdpMLineIndex = 0
self.assertEqual(
object_to_string(candidate),
'{"candidate": "candidate:0 1 UDP 2122252543 192.168.99.7 33543 typ host", "id": "audio", "label": 0, "type": "candidate"}') # noqa
| 34.28877 | 144 | 0.640674 |
89b0300a33607210c803753615e5d44100acc8b0 | 3,487 | py | Python | pygments/lexers/usd.py | KMilhan/pygments | 5120e9943d137f7aa1d33499b79d5ebd5c9f775d | [
"BSD-2-Clause"
] | 1,198 | 2015-01-02T12:08:49.000Z | 2021-10-07T02:46:59.000Z | pygments/lexers/usd.py | KMilhan/pygments | 5120e9943d137f7aa1d33499b79d5ebd5c9f775d | [
"BSD-2-Clause"
] | 249 | 2015-01-22T13:31:12.000Z | 2021-05-01T08:01:22.000Z | pygments/lexers/usd.py | KMilhan/pygments | 5120e9943d137f7aa1d33499b79d5ebd5c9f775d | [
"BSD-2-Clause"
] | 118 | 2015-01-16T19:13:15.000Z | 2021-07-21T15:09:15.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.usd
~~~~~~~~~~~~~~~~~~~
The module that parses Pixar's Universal Scene Description file format.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.lexer import words as words_
from pygments.lexers._usd_builtins import COMMON_ATTRIBUTES, KEYWORDS, \
OPERATORS, SPECIAL_NAMES, TYPES
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text, Whitespace
__all__ = ["UsdLexer"]
def _keywords(words, type_):
return [(words_(words, prefix=r"\b", suffix=r"\b"), type_)]
_TYPE = r"(\w+(?:\[\])?)"
_BASE_ATTRIBUTE = r"([\w_]+(?:\:[\w_]+)*)(?:(\.)(timeSamples))?"
_WHITESPACE = r"([ \t]+)"
class UsdLexer(RegexLexer):
"""
A lexer that parses Pixar's Universal Scene Description file format.
.. versionadded:: 2.6
"""
name = "USD"
aliases = ["usd", "usda"]
filenames = ["*.usd", "*.usda"]
tokens = {
"root": [
(r"(custom){_WHITESPACE}(uniform)(\s+){}(\s+){}(\s*)(=)".format(
_TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
bygroups(Keyword.Token, Whitespace, Keyword.Token, Whitespace,
Keyword.Type, Whitespace, Name.Attribute, Text,
Name.Keyword.Tokens, Whitespace, Operator)),
(r"(custom){_WHITESPACE}{}(\s+){}(\s*)(=)".format(
_TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
Operator)),
(r"(uniform){_WHITESPACE}{}(\s+){}(\s*)(=)".format(
_TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
Operator)),
(r"{}{_WHITESPACE}{}(\s*)(=)".format(
_TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
bygroups(Keyword.Type, Whitespace, Name.Attribute, Text,
Name.Keyword.Tokens, Whitespace, Operator)),
] +
_keywords(KEYWORDS, Keyword.Tokens) +
_keywords(SPECIAL_NAMES, Name.Builtins) +
_keywords(COMMON_ATTRIBUTES, Name.Attribute) +
[(r"\b\w+:[\w:]+\b", Name.Attribute)] +
_keywords(OPERATORS, Operator) + # more attributes
[(type_ + r"\[\]", Keyword.Type) for type_ in TYPES] +
_keywords(TYPES, Keyword.Type) +
[
(r"[\(\)\[\]{}]", Punctuation),
("#.*?$", Comment.Single),
(",", Punctuation),
(";", Punctuation), # ";"s are allowed to combine separate metadata lines
("=", Operator),
(r"[-]*([0-9]*[.])?[0-9]+(?:e[+-]*\d+)?", Number),
(r"'''(?:.|\n)*?'''", String),
(r'"""(?:.|\n)*?"""', String),
(r"'.*?'", String),
(r'".*?"', String),
(r"<(\.\./)*([\w/]+|[\w/]+\.\w+[\w:]*)>", Name.Namespace),
(r"@.*?@", String.Interpol),
(r'\(.*"[.\\n]*".*\)', String.Doc),
(r"\A#usda .+$", Comment.Hashbang),
(r"\s+", Whitespace),
(r"[\w_]+", Text),
(r"[_:\.]+", Punctuation),
],
}
| 38.318681 | 86 | 0.52452 |
8a2a9011224d2b79539235ba87355c8c544e2f3e | 16,256 | py | Python | lib/model/train_val.py | robtu328/Player_Number | 800d73374fff0dc4546930a795ca043c62f121ce | [
"MIT"
] | null | null | null | lib/model/train_val.py | robtu328/Player_Number | 800d73374fff0dc4546930a795ca043c62f121ce | [
"MIT"
] | null | null | null | lib/model/train_val.py | robtu328/Player_Number | 800d73374fff0dc4546930a795ca043c62f121ce | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen and Zheqi He
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorboardX as tb
from model.config import cfg
import roi_data_layer.roidb as rdl_roidb
from roi_data_layer.layer import RoIDataLayer
import utils.timer
try:
import cPickle as pickle
except ImportError:
import pickle
import torch
import torch.optim as optim
import numpy as np
import os
import sys
import glob
import time
def scale_lr(optimizer, scale):
"""Scale the learning rate of the optimizer"""
for param_group in optimizer.param_groups:
param_group['lr'] *= scale
class SolverWrapper(object):
"""
A wrapper class for the training process
"""
def __init__(self,
network,
imdb,
roidb,
valroidb,
output_dir,
tbdir,
pretrained_model=None):
self.net = network
self.imdb = imdb
self.roidb = roidb
self.valroidb = valroidb
self.output_dir = output_dir
self.tbdir = tbdir
# Simply put '_val' at the end to save the summaries from the validation set
self.tbvaldir = tbdir + '_val'
if not os.path.exists(self.tbvaldir):
os.makedirs(self.tbvaldir)
self.pretrained_model = pretrained_model
def snapshot(self, iter):
net = self.net
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# Store the model snapshot
filename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(
iter) + '.pth'
filename = os.path.join(self.output_dir, filename)
torch.save(self.net.state_dict(), filename)
print('Wrote snapshot to: {:s}'.format(filename))
# Also store some meta information, random state, etc.
nfilename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(
iter) + '.pkl'
nfilename = os.path.join(self.output_dir, nfilename)
# current state of numpy random
st0 = np.random.get_state()
# current position in the database
cur = self.data_layer._cur
# current shuffled indexes of the database
perm = self.data_layer._perm
# current position in the validation database
cur_val = self.data_layer_val._cur
# current shuffled indexes of the validation database
perm_val = self.data_layer_val._perm
# Dump the meta info
with open(nfilename, 'wb') as fid:
pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL)
return filename, nfilename
def from_snapshot(self, sfile, nfile):
print('Restoring model snapshots from {:s}'.format(sfile))
self.net.load_state_dict(torch.load(str(sfile)))
print('Restored.')
# Needs to restore the other hyper-parameters/states for training, (TODO xinlei) I have
# tried my best to find the random states so that it can be recovered exactly
# However the Tensorflow state is currently not available
with open(nfile, 'rb') as fid:
st0 = pickle.load(fid)
cur = pickle.load(fid)
perm = pickle.load(fid)
cur_val = pickle.load(fid)
perm_val = pickle.load(fid)
last_snapshot_iter = pickle.load(fid)
############### <------------------------------------------------ REMETTRE ça apres premier checkpoint
np.random.set_state(st0)
self.data_layer._cur = cur
self.data_layer._perm = perm
self.data_layer_val._cur = cur_val
self.data_layer_val._perm = perm_val
return last_snapshot_iter
def construct_graph(self):
# Set the random seed
torch.manual_seed(cfg.RNG_SEED)
# Build the main computation graph
self.net.create_architecture(
self.imdb.num_classes,
tag='default',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
# Define the loss
# loss = layers['total_loss']
# Set learning rate and momentum
lr = cfg.TRAIN.LEARNING_RATE
params = []
for key, value in dict(self.net.named_parameters()).items():
# print('key:', key)
if value.requires_grad:
if 'bias' in key:
params += [{
'params': [value],
'lr':
lr * (cfg.TRAIN.DOUBLE_BIAS + 1),
'weight_decay':
cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0
}]
## -------------------------------------- remove condition if possible
else:
if 'cls_score_net' in key:
print('bibi')
params += [{
'params': [value],
'lr':
lr*1.0,
'weight_decay':
getattr(value, 'weight_decay', cfg.TRAIN.WEIGHT_DECAY)
}]
else:
params += [{
'params': [value],
'lr':
lr,
'weight_decay':
getattr(value, 'weight_decay', cfg.TRAIN.WEIGHT_DECAY)
}]
self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
# self.optimizer = torch.optim.Adam(params)#, momentum=cfg.TRAIN.MOMENTUM)
# Write the train and validation information to tensorboard
self.writer = tb.writer.FileWriter(self.tbdir)
self.valwriter = tb.writer.FileWriter(self.tbvaldir)
return lr, self.optimizer
def find_previous(self):
sfiles = os.path.join(self.output_dir,
cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pth')
sfiles = glob.glob(sfiles)
sfiles.sort(key=os.path.getmtime)
# Get the snapshot name in pytorch
redfiles = []
for stepsize in cfg.TRAIN.STEPSIZE:
redfiles.append(
os.path.join(
self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX +
'_iter_{:d}.pth'.format(stepsize + 1)))
sfiles = [ss for ss in sfiles if ss not in redfiles]
nfiles = os.path.join(self.output_dir,
cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl')
nfiles = glob.glob(nfiles)
nfiles.sort(key=os.path.getmtime)
redfiles = [redfile.replace('.pth', '.pkl') for redfile in redfiles]
nfiles = [nn for nn in nfiles if nn not in redfiles]
lsf = len(sfiles)
assert len(nfiles) == lsf
return lsf, nfiles, sfiles
def initialize(self):
# Initial file lists are empty
np_paths = []
ss_paths = []
# Fresh train directly from ImageNet weights
print('Loading initial model weights from {:s}'.format(
self.pretrained_model))
self.net.load_pretrained_cnn(torch.load(self.pretrained_model))
print('Loaded.')
# Need to fix the variables before loading, so that the RGB weights are changed to BGR
# For VGG16 it also changes the convolutional weights fc6 and fc7 to
# fully connected weights
last_snapshot_iter = 0
lr = cfg.TRAIN.LEARNING_RATE
stepsizes = list(cfg.TRAIN.STEPSIZE)
return lr, last_snapshot_iter, stepsizes, np_paths, ss_paths
def restore(self, sfile, nfile):
# Get the most recent snapshot and restore
np_paths = [nfile]
ss_paths = [sfile]
# Restore model from snapshots
last_snapshot_iter = self.from_snapshot(sfile, nfile)
# Set the learning rate
lr_scale = 1
stepsizes = []
for stepsize in cfg.TRAIN.STEPSIZE:
if last_snapshot_iter > stepsize:
lr_scale *= cfg.TRAIN.GAMMA
else:
stepsizes.append(stepsize)
scale_lr(self.optimizer, lr_scale)
lr = cfg.TRAIN.LEARNING_RATE * lr_scale
return lr, last_snapshot_iter, stepsizes, np_paths, ss_paths
def remove_snapshot(self, np_paths, ss_paths):
to_remove = len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
nfile = np_paths[0]
os.remove(str(nfile))
np_paths.remove(nfile)
to_remove = len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
sfile = ss_paths[0]
# To make the code compatible to earlier versions of Tensorflow,
# where the naming tradition for checkpoints are different
os.remove(str(sfile))
ss_paths.remove(sfile)
def train_model(self, max_iters):
# Build data layers for both training and validation set
self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)
self.data_layer_val = RoIDataLayer(
self.valroidb, self.imdb.num_classes, random=True)
# Construct the computation graph
lr, train_op = self.construct_graph()
# Find previous snapshots if there is any to restore from
###################
lsf, nfiles, sfiles = self.find_previous()
# Initialize the variables or restore them from the last snapshot
if lsf == 0:
lr, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.initialize(
)
else:
lr, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.restore(
str(sfiles[-1]), str(nfiles[-1]))
iter = last_snapshot_iter + 1
last_summary_time = time.time()
# Make sure the lists are not empty
stepsizes.append(max_iters)
stepsizes.reverse()
next_stepsize = stepsizes.pop()
self.net.train()
self.net.to(self.net._device)
while iter < max_iters + 1:
# Learning rate
if iter == next_stepsize + 1:
# Add snapshot here before reducing the learning rate
self.snapshot(iter)
lr *= cfg.TRAIN.GAMMA
scale_lr(self.optimizer, cfg.TRAIN.GAMMA)
next_stepsize = stepsizes.pop()
utils.timer.timer.tic()
# Get training data, one batch at a time
blobs = self.data_layer.forward()
now = time.time()
# if False:
if iter == 1 or now - last_summary_time > cfg.TRAIN.SUMMARY_INTERVAL: # <---------------------------- ICI pour summary
# Compute the graph with summary
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss, summary = \
self.net.train_step_with_summary(blobs, self.optimizer) # <----------- train step w/ summary
for _sum in summary:
self.writer.add_summary(_sum, float(iter))
# Also check the summary on the validation set
blobs_val = self.data_layer_val.forward()
summary_val = self.net.get_summary(blobs_val)
for _sum in summary_val:
self.valwriter.add_summary(_sum, float(iter))
last_summary_time = now
else:
# Compute the graph without summary # <----------- train step w/o summary
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss = \
self.net.train_step(blobs, self.optimizer)
# print(str(self.net))
# for layer in self.net:
# print(layer.weight.grad)
# for i in self.net.cls_score_net.decoder.reconstruction_layers:
#
# try:
# print(i)
# print(i.weight.grad.mean())
# except:
# print()
# for i in self.net.vgg.features:
#
# try:
# print(i)
# print(i.weight.grad)
# except:
# print()
# print(self.net.cls_score_net.decoder.reconstruction_layers.weight.grad)
# print(self.net.cls_score_net.decoder.reconstruction_layers.weight.grad)
utils.timer.timer.toc()
# Display training information
if iter % (cfg.TRAIN.DISPLAY) == 0:
print('iter: %d / %d, total loss: %.6f\n >>> rpn_loss_cls: %.6f\n '
'>>> rpn_loss_box: %.6f\n >>> loss_cls: %.6f\n >>> loss_box: %.6f\n >>> lr: %f' % \
(iter, max_iters, total_loss, rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, lr))
print('speed: {:.3f}s / iter'.format(
utils.timer.timer.average_time()))
# for k in utils.timer.timer._average_time.keys():
# print(k, utils.timer.timer.average_time(k))
# Snapshotting
if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
ss_path, np_path = self.snapshot(iter)
np_paths.append(np_path)
ss_paths.append(ss_path)
# Remove the old snapshots if there are too many
if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
self.remove_snapshot(np_paths, ss_paths)
iter += 1
if last_snapshot_iter != iter - 1:
self.snapshot(iter - 1)
self.writer.close()
self.valwriter.close()
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0 # <----------------------------------- image is_valid
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print('Filtered {} roidb entries: {} -> {}'.format(num - num_after, num,
num_after))
return filtered_roidb
def train_net(network,
imdb,
roidb,
valroidb,
output_dir,
tb_dir,
pretrained_model=None,
max_iters=300000):
"""Train a Faster R-CNN network."""
roidb = filter_roidb(roidb)
valroidb = filter_roidb(valroidb)
sw = SolverWrapper(
network,
imdb,
roidb,
valroidb,
output_dir,
tb_dir,
pretrained_model=pretrained_model)
print('Solving...')
sw.train_model(max_iters)
print('done solving')
| 37.284404 | 130 | 0.55967 |
8b2eb90bbb8c6dc76d93d3e603894085377f482a | 1,012 | py | Python | exploit/hash_leaking.py | fausecteam/faustctf-2019-responsivesecurity | 65b4e02bdc9de278166c38697ab992638977d511 | [
"0BSD"
] | null | null | null | exploit/hash_leaking.py | fausecteam/faustctf-2019-responsivesecurity | 65b4e02bdc9de278166c38697ab992638977d511 | [
"0BSD"
] | null | null | null | exploit/hash_leaking.py | fausecteam/faustctf-2019-responsivesecurity | 65b4e02bdc9de278166c38697ab992638977d511 | [
"0BSD"
] | null | null | null | import subprocess
import hashlib
import sys
from collections import defaultdict, deque
import re
host = sys.argv[1]
tailf = subprocess.Popen(["ssh", host, "tail", "-f", "/var/log/nginx/access.log"], stdout=subprocess.PIPE)
tree = defaultdict(set)
def expand(prefix):
for nextchar in range(128):
x = prefix + bytes([nextchar])
h = hashlib.sha1(x).hexdigest()[:5].upper().encode()
tree[h].add(x)
expand(b"")
flagre = re.compile(b"FAUST_[a-zA-Z0-9./+=]{32}")
def handle(h):
if h in tree:
for val in tree[h]:
print("?", val.decode(), file=sys.stderr)
if flagre.match(val):
print(val.decode())
else:
expand(val)
print(tree)
q = deque([], 20)
search = re.compile(b"GET /responsivesecurity/pwned/range/([A-F0-9]*)")
for line in tailf.stdout:
m = search.search(line)
if m:
if len(m.group(1)) == 5:
q.append(m.group(1))
for e in q:
handle(e)
| 20.653061 | 106 | 0.565217 |
67903dd9c4346f7596a684e239be9ce3a219f27d | 1,195 | py | Python | tests/test_cast_op.py | shjwudp/paddle-onnx | cc80141bc7d1f29da3eef186f6cc0b63da266af2 | [
"Apache-2.0"
] | 2 | 2021-07-14T00:43:54.000Z | 2021-11-17T11:06:56.000Z | tests/test_cast_op.py | wawltor/paddle-onnx | 037b9eab4d3f817aa838f104571d4a72aea24e2d | [
"Apache-2.0"
] | null | null | null | tests/test_cast_op.py | wawltor/paddle-onnx | 037b9eab4d3f817aa838f104571d4a72aea24e2d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
class TestCastOp(OpTest):
def setUp(self):
input = np.random.random((10, 10))
self.inputs = {'X': input.astype('float32')}
self.outputs = {'Out': input.astype('float64')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.FP32),
'out_dtype': int(core.VarDesc.VarType.FP64)
}
self.op_type = 'cast'
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
| 31.447368 | 74 | 0.688703 |
5cd47f6a248ede3c5ba73ba4018ee689e08fd3d2 | 2,385 | py | Python | backend/dbutils.py | grzegorzcode/dash-analytics-framework | 572fa8d06afd850c5dd1edf11f7c8864cdad7568 | [
"MIT"
] | null | null | null | backend/dbutils.py | grzegorzcode/dash-analytics-framework | 572fa8d06afd850c5dd1edf11f7c8864cdad7568 | [
"MIT"
] | null | null | null | backend/dbutils.py | grzegorzcode/dash-analytics-framework | 572fa8d06afd850c5dd1edf11f7c8864cdad7568 | [
"MIT"
] | null | null | null | from werkzeug.security import check_password_hash
from backend.dbengine import *
from datetime import datetime
from werkzeug.security import generate_password_hash
def check_password(user, password):
with session_scope() as s:
user = s.query(Users).filter_by(username=user).first()
if user:
if check_password_hash(user.password, password):
return True
else:
return False
def check_admin(user):
with session_scope() as s:
user = s.query(Users).filter_by(username=user).first()
if user.admin == 'Y':
return True
else:
return False
def add_user(username, password, admin):
with session_scope() as s:
newuser = Users(username=username, password=generate_password_hash(password, method='sha256'), admin=admin, created=datetime.now())
s.add(newuser)
return True
def show_users():
with session_scope() as s:
users = s.query(Users.username, Users.admin).all()
return users
def del_user(username):
with session_scope() as s:
user = s.query(Users).filter_by(username=username).delete()
return bool(user)
def user_exists(user):
with session_scope() as s:
user = s.query(Users).filter_by(username=user).count()
return bool(user)
def change_password(username, newpassword):
if not user_exists(username):
return False
hashed_password = generate_password_hash(newpassword, method='sha256')
with session_scope() as s:
statement = s.query(Users).filter_by(username=username).update({"password": hashed_password})
return bool(statement)
def add_user_session_info(username, event, sessionid, level=1):
TRACELEVEL = 2 # TODO: move this to config file
if level <= TRACELEVEL:
with session_scope() as s:
if sessionid:
newevent = UsersLogins(username=username, event=event, eventtime=datetime.now(), sessionid=sessionid)
s.add(newevent)
return True
else:
return False
else:
return False
def change_user(first,last,email,engine):
#not implemented
pass
def send_password_key(email,firstname,engine):
# not implemented
pass
def validate_password_key(email, key, engine):
# not implemented
pass
| 26.797753 | 139 | 0.651992 |
eef64b7b01a7fda56b7c56adfe78675847b05b4d | 838 | py | Python | res/versioning.py | tableau/tabcmd | a6a44795b2568933505dfc8c443ee16542c9e4c0 | [
"MIT"
] | 3 | 2022-02-15T03:07:51.000Z | 2022-03-09T13:14:52.000Z | res/versioning.py | tableau/tabcmd | a6a44795b2568933505dfc8c443ee16542c9e4c0 | [
"MIT"
] | 57 | 2022-01-31T22:33:17.000Z | 2022-03-28T22:05:53.000Z | res/versioning.py | tableau/tabcmd | a6a44795b2568933505dfc8c443ee16542c9e4c0 | [
"MIT"
] | 2 | 2022-02-23T23:05:35.000Z | 2022-03-03T21:32:53.000Z | # generates a version data file that will be read by pyinstaller
import pyinstaller_versionfile
import sys
import os
import re
# TODO move this to the do-it scripts
# this file contains the actual version, generated by setuptools_scm
mymodule_dir = os.path.join(os.path.dirname( __file__ ), '..')
sys.path.append(mymodule_dir)
from tabcmd.execution._version import version
print(version)
numeric_version = re.sub(r'[a-z+]', '', version.lower())
print(numeric_version)
output_file = os.path.join(mymodule_dir, "versionfile.txt")
print(output_file)
input_file = os.path.join(mymodule_dir, 'res', "metadata.yml")
pyinstaller_versionfile.create_versionfile_from_input_file(
output_file, input_file,
# optional, can be set to overwrite version information (equivalent to --version when using the CLI)
version=numeric_version
)
| 32.230769 | 104 | 0.78043 |
1660e53e9326fb11ce4ffc472a9da97bcf230a4d | 278 | py | Python | tests/asp/cautious/count.example3.cautious.asp.test.py | bernardocuteri/wasp | 05c8f961776dbdbf7afbf905ee00fc262eba51ad | [
"Apache-2.0"
] | 19 | 2015-12-03T08:53:45.000Z | 2022-03-31T02:09:43.000Z | tests/asp/cautious/count.example3.cautious.asp.test.py | bernardocuteri/wasp | 05c8f961776dbdbf7afbf905ee00fc262eba51ad | [
"Apache-2.0"
] | 80 | 2017-11-25T07:57:32.000Z | 2018-06-10T19:03:30.000Z | tests/asp/cautious/count.example3.cautious.asp.test.py | bernardocuteri/wasp | 05c8f961776dbdbf7afbf905ee00fc262eba51ad | [
"Apache-2.0"
] | 6 | 2015-01-15T07:51:48.000Z | 2020-06-18T14:47:48.000Z | input = """
1 2 0 0
1 3 0 0
1 4 2 1 5 6
1 5 2 1 4 6
1 6 0 0
1 7 2 1 8 9
1 8 2 1 7 9
1 9 0 0
2 11 2 0 1 7 4
2 12 2 0 2 7 4
1 10 2 1 12 11
1 1 1 1 10
1 1 2 0 7 8
1 1 2 0 4 5
0
10 ok
5 -a(1)
8 -a(0)
4 a(1)
7 a(0)
2 i(0)
3 i(1)
0
B+
0
B-
1
0
1
"""
output = """
{i(0), i(1), ok}
"""
| 7.942857 | 16 | 0.467626 |
6921c14f43035f706fceb773e9b84c5193e18bed | 653 | py | Python | dags/gs_list_buckets.py | elifesciences/sciencebeam-airflow | 21f6b747115e11f3de80c3a1e501e1e7c07b6253 | [
"MIT"
] | 4 | 2020-01-03T14:51:36.000Z | 2021-04-06T06:29:30.000Z | dags/gs_list_buckets.py | elifesciences/sciencebeam-airflow | 21f6b747115e11f3de80c3a1e501e1e7c07b6253 | [
"MIT"
] | 62 | 2019-07-02T11:50:48.000Z | 2021-07-15T05:33:12.000Z | dags/gs_list_buckets.py | elifesciences/sciencebeam-airflow | 21f6b747115e11f3de80c3a1e501e1e7c07b6253 | [
"MIT"
] | null | null | null | import airflow
from airflow.operators.python import PythonOperator
from airflow.models import DAG
from gcloud import storage
DEFAULT_ARGS = {
'start_date': airflow.utils.dates.days_ago(2)
}
def gs_list_buckets(*_, **__):
client = storage.Client()
items = list(client.list_buckets())
print('buckets:', items)
return 'buckets: %s' % (items)
def create_dag():
dag = DAG(
dag_id='gs_list_buckets', default_args=DEFAULT_ARGS,
schedule_interval=None
)
PythonOperator(
task_id='gs_list_buckets',
python_callable=gs_list_buckets,
dag=dag)
return dag
MAIN_DAG = create_dag()
| 18.657143 | 60 | 0.678407 |
d2c14e8de41fefcee0c22c212b77fc2793d09c59 | 3,445 | py | Python | src/meltano/core/m5o/reports_service.py | siilats/meltano | 404605c83f441c3fc2b729e26416c6caa8b0ed0b | [
"MIT"
] | 122 | 2021-06-21T17:30:29.000Z | 2022-03-25T06:21:38.000Z | src/meltano/core/m5o/reports_service.py | siilats/meltano | 404605c83f441c3fc2b729e26416c6caa8b0ed0b | [
"MIT"
] | null | null | null | src/meltano/core/m5o/reports_service.py | siilats/meltano | 404605c83f441c3fc2b729e26416c6caa8b0ed0b | [
"MIT"
] | 21 | 2021-06-22T10:08:15.000Z | 2022-03-18T08:57:02.000Z | import json
import os
from meltano.core.utils import slugify
from .dashboards_service import DashboardsService
from .m5o_collection_parser import M5oCollectionParser, M5oCollectionParserTypes
from .m5o_file_parser import MeltanoAnalysisFileParser
class ReportAlreadyExistsError(Exception):
"""Occurs when a report already exists."""
def __init__(self, report, field):
self.report = report
self.field = field
@property
def record(self):
return self.report
class ReportDoesNotExistError(Exception):
"""Occurs when a report does not exist."""
def __init__(self, report):
self.report = report
class ReportsService:
VERSION = "1.0.0"
def __init__(self, project):
self.project = project
def get_reports(self):
reportsParser = M5oCollectionParser(
self.project.analyze_dir("reports"), M5oCollectionParserTypes.Report
)
return reportsParser.parse()
def get_report(self, report_id):
reports = self.get_reports()
report = next(filter(lambda r: r["id"] == report_id, reports), None)
return report
def save_report(self, data):
if "id" in data:
existing_report = self.get_report(data["id"])
if existing_report:
raise ReportAlreadyExistsError(existing_report, "id")
name = data["name"]
slug = slugify(name)
file_path = self.project.analyze_dir("reports", f"{slug}.report.m5o")
if os.path.exists(file_path):
with file_path.open() as f:
existing_report = json.load(f)
raise ReportAlreadyExistsError(existing_report, "slug")
data = MeltanoAnalysisFileParser.fill_base_m5o_dict(
file_path.relative_to(self.project.root), slug, data
)
data["version"] = ReportsService.VERSION
with self.project.file_update():
with file_path.open("w") as f:
json.dump(data, f)
return data
def delete_report(self, data):
report = self.get_report(data["id"])
slug = report["slug"]
file_path = self.project.analyze_dir("reports", f"{slug}.report.m5o")
if not os.path.exists(file_path):
raise ReportDoesNotExistError(data)
DashboardsService(self.project).remove_report_from_dashboards(report["id"])
with self.project.file_update():
os.remove(file_path)
return data
def update_report(self, data):
id = data["id"]
existing_report = self.get_report(id)
slug = existing_report["slug"]
file_path = self.project.analyze_dir("reports", f"{slug}.report.m5o")
new_name = data["name"]
new_slug = slugify(new_name)
new_file_path = self.project.analyze_dir("reports", f"{new_slug}.report.m5o")
is_same_file = new_slug == slug
if not is_same_file and os.path.exists(new_file_path):
with new_file_path.open() as f:
existing_report = json.load(f)
raise ReportAlreadyExistsError(existing_report, "slug")
with self.project.file_update():
os.remove(file_path)
data["slug"] = new_slug
data["path"] = str(new_file_path.relative_to(self.project.root))
with self.project.file_update():
with new_file_path.open("w") as f:
json.dump(data, f)
return data
| 29.956522 | 85 | 0.634253 |
7363540292f1c9a1f279b2301637e3b9ea985773 | 151,006 | py | Python | red_canary/komand_red_canary/triggers/new_events/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | red_canary/komand_red_canary/triggers/new_events/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | red_canary/komand_red_canary/triggers/new_events/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Check for new events"
class Input:
DATE_OFFSET = "date_offset"
FORCE_OFFSET = "force_offset"
FREQUENCY = "frequency"
class Output:
EVENT = "event"
class NewEventsInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"date_offset": {
"type": "string",
"title": "Date Offset",
"displayType": "date",
"description": "Set past date to pull events from that time forward",
"format": "date-time",
"order": 2
},
"force_offset": {
"type": "boolean",
"title": "Force Offset",
"description": "Forces offset no matter whats in the cache",
"order": 3
},
"frequency": {
"type": "integer",
"title": "Frequency",
"description": "How often the trigger should check for new events in seconds",
"default": 5,
"order": 1
}
},
"required": [
"frequency"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class NewEventsOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"event": {
"$ref": "#/definitions/event",
"title": "Event",
"description": "New event",
"order": 1
}
},
"definitions": {
"detector": {
"type": "object",
"title": "detector",
"properties": {
"attributes": {
"$ref": "#/definitions/detector_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 3
},
"id": {
"type": "integer",
"title": "ID",
"description": "Unique identifier of the detector",
"order": 2
},
"relationships": {
"$ref": "#/definitions/detector_relationships",
"title": "Relationships",
"description": "Resources related to this object",
"order": 4
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"detector_attributes": {
"type": "object",
"title": "detector_attributes",
"properties": {
"attack_technique_identifiers": {
"type": "array",
"title": "Attack Technique Identifiers",
"description": "The specific ATT\\u0026CK Techniques the detector maps to",
"items": {
"type": "string"
},
"order": 4
},
"contributing_intelligence": {
"type": "string",
"title": "Contributing Intelligence",
"description": "The type of adversary intelligence supporting this detector",
"order": 3
},
"description": {
"type": "string",
"title": "Description",
"description": "Description of the activity the detector identifies in Markdown format",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the Red Canary detector",
"order": 1
}
}
},
"detector_relationships": {
"type": "object",
"title": "detector_relationships",
"properties": {
"attack_techniques": {
"type": "array",
"title": "Attack Techniques",
"description": "Attack techniques",
"items": {
"$ref": "#/definitions/detector_resource_relationship"
},
"order": 1
}
},
"definitions": {
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_attributes": {
"type": "object",
"title": "detector_attributes",
"properties": {
"attack_technique_identifiers": {
"type": "array",
"title": "Attack Technique Identifiers",
"description": "The specific ATT\\u0026CK Techniques the detector maps to",
"items": {
"type": "string"
},
"order": 4
},
"contributing_intelligence": {
"type": "string",
"title": "Contributing Intelligence",
"description": "The type of adversary intelligence supporting this detector",
"order": 3
},
"description": {
"type": "string",
"title": "Description",
"description": "Description of the activity the detector identifies in Markdown format",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the Red Canary detector",
"order": 1
}
}
},
"detector_relationships": {
"type": "object",
"title": "detector_relationships",
"properties": {
"attack_techniques": {
"type": "array",
"title": "Attack Techniques",
"description": "Attack techniques",
"items": {
"$ref": "#/definitions/detector_resource_relationship"
},
"order": 1
}
},
"definitions": {
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"event": {
"type": "object",
"title": "event",
"properties": {
"attributes": {
"$ref": "#/definitions/event_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 3
},
"id": {
"type": "integer",
"title": "ID",
"description": "Unique identifier of the event",
"order": 2
},
"links": {
"$ref": "#/definitions/event_links",
"title": "Links",
"description": "Resources associated with this object",
"order": 5
},
"relationships": {
"$ref": "#/definitions/event_relationships",
"title": "Relationships",
"description": "Resources related to this object",
"order": 4
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"detector": {
"type": "object",
"title": "detector",
"properties": {
"attributes": {
"$ref": "#/definitions/detector_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 3
},
"id": {
"type": "integer",
"title": "ID",
"description": "Unique identifier of the detector",
"order": 2
},
"relationships": {
"$ref": "#/definitions/detector_relationships",
"title": "Relationships",
"description": "Resources related to this object",
"order": 4
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"detector_attributes": {
"type": "object",
"title": "detector_attributes",
"properties": {
"attack_technique_identifiers": {
"type": "array",
"title": "Attack Technique Identifiers",
"description": "The specific ATT\\u0026CK Techniques the detector maps to",
"items": {
"type": "string"
},
"order": 4
},
"contributing_intelligence": {
"type": "string",
"title": "Contributing Intelligence",
"description": "The type of adversary intelligence supporting this detector",
"order": 3
},
"description": {
"type": "string",
"title": "Description",
"description": "Description of the activity the detector identifies in Markdown format",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the Red Canary detector",
"order": 1
}
}
},
"detector_relationships": {
"type": "object",
"title": "detector_relationships",
"properties": {
"attack_techniques": {
"type": "array",
"title": "Attack Techniques",
"description": "Attack techniques",
"items": {
"$ref": "#/definitions/detector_resource_relationship"
},
"order": 1
}
},
"definitions": {
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_attributes": {
"type": "object",
"title": "detector_attributes",
"properties": {
"attack_technique_identifiers": {
"type": "array",
"title": "Attack Technique Identifiers",
"description": "The specific ATT\\u0026CK Techniques the detector maps to",
"items": {
"type": "string"
},
"order": 4
},
"contributing_intelligence": {
"type": "string",
"title": "Contributing Intelligence",
"description": "The type of adversary intelligence supporting this detector",
"order": 3
},
"description": {
"type": "string",
"title": "Description",
"description": "Description of the activity the detector identifies in Markdown format",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the Red Canary detector",
"order": 1
}
}
},
"detector_relationships": {
"type": "object",
"title": "detector_relationships",
"properties": {
"attack_techniques": {
"type": "array",
"title": "Attack Techniques",
"description": "Attack techniques",
"items": {
"$ref": "#/definitions/detector_resource_relationship"
},
"order": 1
}
},
"definitions": {
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"event_attributes": {
"type": "object",
"title": "event_attributes",
"properties": {
"detectors": {
"type": "array",
"title": "Detectors",
"description": "Detectors and MITRE ATT\\u0026CK tactics associated with the event",
"items": {
"$ref": "#/definitions/detector"
},
"order": 2
},
"process": {
"$ref": "#/definitions/operating_system_process",
"title": "Process",
"description": "The process associated with the event",
"order": 1
}
},
"definitions": {
"detector": {
"type": "object",
"title": "detector",
"properties": {
"attributes": {
"$ref": "#/definitions/detector_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 3
},
"id": {
"type": "integer",
"title": "ID",
"description": "Unique identifier of the detector",
"order": 2
},
"relationships": {
"$ref": "#/definitions/detector_relationships",
"title": "Relationships",
"description": "Resources related to this object",
"order": 4
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"detector_attributes": {
"type": "object",
"title": "detector_attributes",
"properties": {
"attack_technique_identifiers": {
"type": "array",
"title": "Attack Technique Identifiers",
"description": "The specific ATT\\u0026CK Techniques the detector maps to",
"items": {
"type": "string"
},
"order": 4
},
"contributing_intelligence": {
"type": "string",
"title": "Contributing Intelligence",
"description": "The type of adversary intelligence supporting this detector",
"order": 3
},
"description": {
"type": "string",
"title": "Description",
"description": "Description of the activity the detector identifies in Markdown format",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the Red Canary detector",
"order": 1
}
}
},
"detector_relationships": {
"type": "object",
"title": "detector_relationships",
"properties": {
"attack_techniques": {
"type": "array",
"title": "Attack Techniques",
"description": "Attack techniques",
"items": {
"$ref": "#/definitions/detector_resource_relationship"
},
"order": 1
}
},
"definitions": {
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_attributes": {
"type": "object",
"title": "detector_attributes",
"properties": {
"attack_technique_identifiers": {
"type": "array",
"title": "Attack Technique Identifiers",
"description": "The specific ATT\\u0026CK Techniques the detector maps to",
"items": {
"type": "string"
},
"order": 4
},
"contributing_intelligence": {
"type": "string",
"title": "Contributing Intelligence",
"description": "The type of adversary intelligence supporting this detector",
"order": 3
},
"description": {
"type": "string",
"title": "Description",
"description": "Description of the activity the detector identifies in Markdown format",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the Red Canary detector",
"order": 1
}
}
},
"detector_relationships": {
"type": "object",
"title": "detector_relationships",
"properties": {
"attack_techniques": {
"type": "array",
"title": "Attack Techniques",
"description": "Attack techniques",
"items": {
"$ref": "#/definitions/detector_resource_relationship"
},
"order": 1
}
},
"definitions": {
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"operating_system_process": {
"type": "object",
"title": "operating_system_process",
"properties": {
"attributes": {
"$ref": "#/definitions/operating_system_process_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"operating_system_process_attributes": {
"type": "object",
"title": "operating_system_process_attributes",
"properties": {
"command_line": {
"$ref": "#/definitions/process_command_line",
"title": "Command Line",
"description": "The command line associated with the process",
"order": 4
},
"image": {
"$ref": "#/definitions/file",
"title": "Image",
"description": "The process image associated with the activity, if applicable and known",
"order": 3
},
"operating_system_pid": {
"type": "integer",
"title": "Operating System PID",
"description": "The PID of the process as reported by the operating system",
"order": 2
},
"started_at": {
"type": "string",
"title": "Started At",
"displayType": "date",
"description": "The time the process started",
"format": "date-time",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"operating_system_process_attributes": {
"type": "object",
"title": "operating_system_process_attributes",
"properties": {
"command_line": {
"$ref": "#/definitions/process_command_line",
"title": "Command Line",
"description": "The command line associated with the process",
"order": 4
},
"image": {
"$ref": "#/definitions/file",
"title": "Image",
"description": "The process image associated with the activity, if applicable and known",
"order": 3
},
"operating_system_pid": {
"type": "integer",
"title": "Operating System PID",
"description": "The PID of the process as reported by the operating system",
"order": 2
},
"started_at": {
"type": "string",
"title": "Started At",
"displayType": "date",
"description": "The time the process started",
"format": "date-time",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"event_links": {
"type": "object",
"title": "event_links",
"properties": {
"self": {
"$ref": "#/definitions/resource_link",
"title": "Self",
"description": "Self",
"order": 1
}
},
"definitions": {
"resource_link": {
"type": "object",
"title": "resource_link",
"properties": {
"href": {
"type": "string",
"title": "Href",
"description": "Link to a resource",
"order": 1
}
}
}
}
},
"event_relationships": {
"type": "object",
"title": "event_relationships",
"properties": {
"endpoint": {
"$ref": "#/definitions/resource_relationship",
"title": "Endpoint",
"description": "Endpoint",
"order": 1
},
"endpoint_user": {
"$ref": "#/definitions/resource_relationship",
"title": "Endpoint User",
"description": "Endpoint user",
"order": 2
}
},
"definitions": {
"resource_relationship": {
"type": "object",
"title": "resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"resource_relationship_data": {
"type": "object",
"title": "resource_relationship_data",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"resource_relationship_data": {
"type": "object",
"title": "resource_relationship_data",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"operating_system_process": {
"type": "object",
"title": "operating_system_process",
"properties": {
"attributes": {
"$ref": "#/definitions/operating_system_process_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"operating_system_process_attributes": {
"type": "object",
"title": "operating_system_process_attributes",
"properties": {
"command_line": {
"$ref": "#/definitions/process_command_line",
"title": "Command Line",
"description": "The command line associated with the process",
"order": 4
},
"image": {
"$ref": "#/definitions/file",
"title": "Image",
"description": "The process image associated with the activity, if applicable and known",
"order": 3
},
"operating_system_pid": {
"type": "integer",
"title": "Operating System PID",
"description": "The PID of the process as reported by the operating system",
"order": 2
},
"started_at": {
"type": "string",
"title": "Started At",
"displayType": "date",
"description": "The time the process started",
"format": "date-time",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"operating_system_process_attributes": {
"type": "object",
"title": "operating_system_process_attributes",
"properties": {
"command_line": {
"$ref": "#/definitions/process_command_line",
"title": "Command Line",
"description": "The command line associated with the process",
"order": 4
},
"image": {
"$ref": "#/definitions/file",
"title": "Image",
"description": "The process image associated with the activity, if applicable and known",
"order": 3
},
"operating_system_pid": {
"type": "integer",
"title": "Operating System PID",
"description": "The PID of the process as reported by the operating system",
"order": 2
},
"started_at": {
"type": "string",
"title": "Started At",
"displayType": "date",
"description": "The time the process started",
"format": "date-time",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
},
"resource_link": {
"type": "object",
"title": "resource_link",
"properties": {
"href": {
"type": "string",
"title": "Href",
"description": "Link to a resource",
"order": 1
}
}
},
"resource_relationship": {
"type": "object",
"title": "resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"resource_relationship_data": {
"type": "object",
"title": "resource_relationship_data",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"resource_relationship_data": {
"type": "object",
"title": "resource_relationship_data",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"event_attributes": {
"type": "object",
"title": "event_attributes",
"properties": {
"detectors": {
"type": "array",
"title": "Detectors",
"description": "Detectors and MITRE ATT\\u0026CK tactics associated with the event",
"items": {
"$ref": "#/definitions/detector"
},
"order": 2
},
"process": {
"$ref": "#/definitions/operating_system_process",
"title": "Process",
"description": "The process associated with the event",
"order": 1
}
},
"definitions": {
"detector": {
"type": "object",
"title": "detector",
"properties": {
"attributes": {
"$ref": "#/definitions/detector_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 3
},
"id": {
"type": "integer",
"title": "ID",
"description": "Unique identifier of the detector",
"order": 2
},
"relationships": {
"$ref": "#/definitions/detector_relationships",
"title": "Relationships",
"description": "Resources related to this object",
"order": 4
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"detector_attributes": {
"type": "object",
"title": "detector_attributes",
"properties": {
"attack_technique_identifiers": {
"type": "array",
"title": "Attack Technique Identifiers",
"description": "The specific ATT\\u0026CK Techniques the detector maps to",
"items": {
"type": "string"
},
"order": 4
},
"contributing_intelligence": {
"type": "string",
"title": "Contributing Intelligence",
"description": "The type of adversary intelligence supporting this detector",
"order": 3
},
"description": {
"type": "string",
"title": "Description",
"description": "Description of the activity the detector identifies in Markdown format",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the Red Canary detector",
"order": 1
}
}
},
"detector_relationships": {
"type": "object",
"title": "detector_relationships",
"properties": {
"attack_techniques": {
"type": "array",
"title": "Attack Techniques",
"description": "Attack techniques",
"items": {
"$ref": "#/definitions/detector_resource_relationship"
},
"order": 1
}
},
"definitions": {
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_attributes": {
"type": "object",
"title": "detector_attributes",
"properties": {
"attack_technique_identifiers": {
"type": "array",
"title": "Attack Technique Identifiers",
"description": "The specific ATT\\u0026CK Techniques the detector maps to",
"items": {
"type": "string"
},
"order": 4
},
"contributing_intelligence": {
"type": "string",
"title": "Contributing Intelligence",
"description": "The type of adversary intelligence supporting this detector",
"order": 3
},
"description": {
"type": "string",
"title": "Description",
"description": "Description of the activity the detector identifies in Markdown format",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the Red Canary detector",
"order": 1
}
}
},
"detector_relationships": {
"type": "object",
"title": "detector_relationships",
"properties": {
"attack_techniques": {
"type": "array",
"title": "Attack Techniques",
"description": "Attack techniques",
"items": {
"$ref": "#/definitions/detector_resource_relationship"
},
"order": 1
}
},
"definitions": {
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship": {
"type": "object",
"title": "detector_resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/detector_resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"detector_resource_relationship_data": {
"type": "object",
"title": "detector_resource_relationship_data",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"operating_system_process": {
"type": "object",
"title": "operating_system_process",
"properties": {
"attributes": {
"$ref": "#/definitions/operating_system_process_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"operating_system_process_attributes": {
"type": "object",
"title": "operating_system_process_attributes",
"properties": {
"command_line": {
"$ref": "#/definitions/process_command_line",
"title": "Command Line",
"description": "The command line associated with the process",
"order": 4
},
"image": {
"$ref": "#/definitions/file",
"title": "Image",
"description": "The process image associated with the activity, if applicable and known",
"order": 3
},
"operating_system_pid": {
"type": "integer",
"title": "Operating System PID",
"description": "The PID of the process as reported by the operating system",
"order": 2
},
"started_at": {
"type": "string",
"title": "Started At",
"displayType": "date",
"description": "The time the process started",
"format": "date-time",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"operating_system_process_attributes": {
"type": "object",
"title": "operating_system_process_attributes",
"properties": {
"command_line": {
"$ref": "#/definitions/process_command_line",
"title": "Command Line",
"description": "The command line associated with the process",
"order": 4
},
"image": {
"$ref": "#/definitions/file",
"title": "Image",
"description": "The process image associated with the activity, if applicable and known",
"order": 3
},
"operating_system_pid": {
"type": "integer",
"title": "Operating System PID",
"description": "The PID of the process as reported by the operating system",
"order": 2
},
"started_at": {
"type": "string",
"title": "Started At",
"displayType": "date",
"description": "The time the process started",
"format": "date-time",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"event_links": {
"type": "object",
"title": "event_links",
"properties": {
"self": {
"$ref": "#/definitions/resource_link",
"title": "Self",
"description": "Self",
"order": 1
}
},
"definitions": {
"resource_link": {
"type": "object",
"title": "resource_link",
"properties": {
"href": {
"type": "string",
"title": "Href",
"description": "Link to a resource",
"order": 1
}
}
}
}
},
"event_relationships": {
"type": "object",
"title": "event_relationships",
"properties": {
"endpoint": {
"$ref": "#/definitions/resource_relationship",
"title": "Endpoint",
"description": "Endpoint",
"order": 1
},
"endpoint_user": {
"$ref": "#/definitions/resource_relationship",
"title": "Endpoint User",
"description": "Endpoint user",
"order": 2
}
},
"definitions": {
"resource_relationship": {
"type": "object",
"title": "resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"resource_relationship_data": {
"type": "object",
"title": "resource_relationship_data",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"resource_relationship_data": {
"type": "object",
"title": "resource_relationship_data",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"operating_system_process": {
"type": "object",
"title": "operating_system_process",
"properties": {
"attributes": {
"$ref": "#/definitions/operating_system_process_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"operating_system_process_attributes": {
"type": "object",
"title": "operating_system_process_attributes",
"properties": {
"command_line": {
"$ref": "#/definitions/process_command_line",
"title": "Command Line",
"description": "The command line associated with the process",
"order": 4
},
"image": {
"$ref": "#/definitions/file",
"title": "Image",
"description": "The process image associated with the activity, if applicable and known",
"order": 3
},
"operating_system_pid": {
"type": "integer",
"title": "Operating System PID",
"description": "The PID of the process as reported by the operating system",
"order": 2
},
"started_at": {
"type": "string",
"title": "Started At",
"displayType": "date",
"description": "The time the process started",
"format": "date-time",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"operating_system_process_attributes": {
"type": "object",
"title": "operating_system_process_attributes",
"properties": {
"command_line": {
"$ref": "#/definitions/process_command_line",
"title": "Command Line",
"description": "The command line associated with the process",
"order": 4
},
"image": {
"$ref": "#/definitions/file",
"title": "Image",
"description": "The process image associated with the activity, if applicable and known",
"order": 3
},
"operating_system_pid": {
"type": "integer",
"title": "Operating System PID",
"description": "The PID of the process as reported by the operating system",
"order": 2
},
"started_at": {
"type": "string",
"title": "Started At",
"displayType": "date",
"description": "The time the process started",
"format": "date-time",
"order": 1
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line": {
"type": "object",
"title": "process_command_line",
"properties": {
"attributes": {
"$ref": "#/definitions/process_command_line_attributes",
"title": "Attributes",
"description": "Attributes of the resource",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
},
"definitions": {
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
},
"process_command_line_attributes": {
"type": "object",
"title": "process_command_line_attributes",
"properties": {
"command_line": {
"type": "string",
"title": "Command Line",
"description": "The command line associated with the activity, if applicable and known",
"order": 1
},
"command_line_decoded": {
"type": "string",
"title": "Command Line Decoded",
"description": "The command line associated with the activity, if applicable and known, after passing through several decoding attempts",
"order": 2
},
"identified_encodings": {
"type": "array",
"title": "Identifier Encodings",
"description": "Possible encodings that may have been used throughout the command line",
"items": {
"type": "string"
},
"order": 3
}
}
},
"resource_link": {
"type": "object",
"title": "resource_link",
"properties": {
"href": {
"type": "string",
"title": "Href",
"description": "Link to a resource",
"order": 1
}
}
},
"resource_relationship": {
"type": "object",
"title": "resource_relationship",
"properties": {
"data": {
"$ref": "#/definitions/resource_relationship_data",
"title": "Data",
"description": "Data",
"order": 2
},
"links": {
"$ref": "#/definitions/resource_relationship_links",
"title": "Links",
"description": "Links",
"order": 1
}
},
"definitions": {
"resource_relationship_data": {
"type": "object",
"title": "resource_relationship_data",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
},
"resource_relationship_data": {
"type": "object",
"title": "resource_relationship_data",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "ID",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type of object",
"order": 1
}
}
},
"resource_relationship_links": {
"type": "object",
"title": "resource_relationship_links",
"properties": {
"related": {
"type": "string",
"title": "Related",
"description": "Link to resource",
"order": 1
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 35.749527 | 167 | 0.375257 |
b079950c486b6cc2602034b5fb168c792c98c250 | 2,851 | py | Python | flaskapp/forms.py | dushyantkhosla/flask-4-ds | af7b1543c78a730a45020d1a64e4eb0005f4c11e | [
"MIT"
] | null | null | null | flaskapp/forms.py | dushyantkhosla/flask-4-ds | af7b1543c78a730a45020d1a64e4eb0005f4c11e | [
"MIT"
] | 7 | 2020-03-24T17:35:31.000Z | 2022-01-13T01:39:56.000Z | flaskapp/forms.py | dushyantkhosla/flask-4-ds | af7b1543c78a730a45020d1a64e4eb0005f4c11e | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from flask_login import current_user
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flaskapp.models import User, Post
class RegistrationForm(FlaskForm):
"""
"""
username = \
StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = \
StringField('Email',
validators=[DataRequired(), Email()])
password = \
PasswordField('Password',
validators=[DataRequired()])
confirm_password = \
PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = \
SubmitField('Sign Up')
# def validate_field(self, field):
# if True:
# raise ValidationError('Validation Message')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is already taken, please choose another.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is already taken, please choose another.')
class LoginForm(FlaskForm):
"""
"""
email = \
StringField('Email',
validators=[DataRequired(), Email()])
password = \
PasswordField('Password',
validators=[DataRequired()])
remember = \
BooleanField('Remember Me')
submit = \
SubmitField('Log In')
class UpdateAccountForm(FlaskForm):
username = \
StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = \
StringField('Email', validators=[DataRequired(), Email()])
picture = \
FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = \
SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class PostForm(FlaskForm):
"""
"""
title = \
StringField('Title', validators=[DataRequired()])
content = \
TextAreaField('Content', validators=[DataRequired()])
submit = \
SubmitField('Post')
| 31.32967 | 95 | 0.638022 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.