hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
165e5478bb41b24d4a9ab5bce186c085b7367f24
4,937
py
Python
app/api/admin_sales/discounted.py
akashtalole/python-flask-restful-api
475d8fd7be1724183716a197aac4257f8fbbeac4
[ "MIT" ]
3
2019-09-05T05:28:49.000Z
2020-06-10T09:03:37.000Z
app/api/admin_sales/discounted.py
akashtalole/python-flask-restful-api
475d8fd7be1724183716a197aac4257f8fbbeac4
[ "MIT" ]
null
null
null
app/api/admin_sales/discounted.py
akashtalole/python-flask-restful-api
475d8fd7be1724183716a197aac4257f8fbbeac4
[ "MIT" ]
null
null
null
from sqlalchemy import func from flask_rest_jsonapi import ResourceList from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities import dasherize from app.api.bootstrap import api from app.models import db from app.models.discount_code import DiscountCode from app.models.event import Event from app.models.order import Order, OrderTicket from app.models.user import User def sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status + '_sales'), func.sum(OrderTicket.quantity).label(status + '_tickets')) \ .filter(Event.id == Order.event_id) \ .filter(Order.marketer_id == User.id) \ .filter(Order.discount_code_id == DiscountCode.id) \ .filter(Order.status == status) \ .group_by(Event) \ .group_by(DiscountCode) \ .group_by(User) \ .group_by(Order.status) \ .cte() class AdminSalesDiscountedSchema(Schema): """ Discounted sales by event Provides Event name, discount code, marketer mail, count of tickets and total sales for orders grouped by status """ class Meta: type_ = 'admin-sales-discounted' self_view = 'v1.admin_sales_discounted' inflect = dasherize id = fields.String() code = fields.String() email = fields.String() event_name = fields.String() payment_currency = fields.String() sales = fields.Method('calc_sales') @staticmethod def calc_sales(obj): """ Returns sales (dictionary with total sales and ticket count) for placed, completed and pending orders """ res = {'placed': {}, 'completed': {}, 'pending': {}} res['placed']['sales_total'] = obj.placed_sales or 0 res['placed']['ticket_count'] = obj.placed_tickets or 0 res['completed']['sales_total'] = obj.completed_sales or 0 res['completed']['ticket_count'] = obj.completed_tickets or 0 res['pending']['sales_total'] = obj.pending_sales or 0 res['pending']['ticket_count'] = obj.pending_tickets or 0 return res class AdminSalesDiscountedList(ResourceList): """ Resource for sales by marketer. Joins event marketer and orders and subsequently accumulates sales by status """ def query(self, _): pending = sales_per_marketer_and_discount_by_status('pending') completed = sales_per_marketer_and_discount_by_status('completed') placed = sales_per_marketer_and_discount_by_status('placed') discounts = self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email')) \ .filter(Event.id == Order.event_id) \ .filter(Order.marketer_id == User.id) \ .filter(Order.discount_code_id == DiscountCode.id) \ .cte() return self.session.query(discounts, pending, completed, placed) \ .outerjoin(pending, (pending.c.event_id == discounts.c.event_id) & (pending.c.discount_code_id == discounts.c.discount_code_id) & (pending.c.marketer_id == discounts.c.marketer_id)) \ .outerjoin(completed, (completed.c.event_id == discounts.c.event_id) & (completed.c.discount_code_id == discounts.c.discount_code_id) & (completed.c.marketer_id == discounts.c.marketer_id)) \ .outerjoin(placed, (placed.c.event_id == discounts.c.event_id) & (placed.c.discount_code_id == discounts.c.discount_code_id) & (placed.c.marketer_id == discounts.c.marketer_id)) methods = ['GET'] decorators = (api.has_permission('is_admin'), ) schema = AdminSalesDiscountedSchema data_layer = { 'model': Event, 'session': db.session, 'methods': { 'query': query } }
41.838983
102
0.552157
500
4,937
5.252
0.198
0.037319
0.053313
0.034273
0.302742
0.286748
0.261615
0.144326
0.113861
0.069307
0
0.002172
0.347174
4,937
117
103
42.196581
0.812597
0.074134
0
0.141176
0
0
0.078484
0.010479
0
0
0
0
0
1
0.035294
false
0
0.129412
0.011765
0.352941
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
165f2a4da2ed50464bfa13f0495fc689063e0199
1,189
py
Python
api/skill/serializer.py
zaubermaerchen/imas_cg_api
45ebdde8c47ff4fabbf58b75721721f142afb46b
[ "MIT" ]
2
2016-02-01T21:03:53.000Z
2018-10-20T09:15:12.000Z
api/skill/serializer.py
zaubermaerchen/imas_cg_api
45ebdde8c47ff4fabbf58b75721721f142afb46b
[ "MIT" ]
1
2020-01-05T12:50:35.000Z
2020-01-05T12:50:35.000Z
api/skill/serializer.py
zaubermaerchen/imas_cg_api
45ebdde8c47ff4fabbf58b75721721f142afb46b
[ "MIT" ]
null
null
null
# coding: utf-8 from rest_framework import serializers from data.models import Skill, SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True) class Meta: model = Skill fields = [ 'skill_id', 'target_unit', 'target_member', 'target_type', 'target_num', 'target_param', 'skill_value_id', 'skill_value_list', 'comment' ] @staticmethod def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def __init__(self, name, count): self.name = name self.count = count class CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255) count = serializers.IntegerField() def create(self, validated_data): return Costar(**validated_data) def update(self, instance, validated_data): instance.name = validated_data.get('name', instance.name) instance.count = validated_data.get('count', instance.count) return instance
26.422222
72
0.64508
126
1,189
5.84127
0.428571
0.067935
0.057065
0
0
0
0
0
0
0
0
0.004566
0.263246
1,189
44
73
27.022727
0.835616
0.010934
0
0
0
0
0.094549
0
0
0
0
0
0
1
0.121212
false
0
0.060606
0.060606
0.484848
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1660d7a15a18998c6c8ae4f9e573b184061a0341
5,061
py
Python
Codes/Converting_RGB_to_GreyScale.py
sichkar-valentyn/Image_processing_in_Python
43d7c979bcd742cc202a28c2dea6ea5bc87562a2
[ "MIT" ]
3
2018-12-02T03:59:51.000Z
2019-11-20T18:37:41.000Z
Codes/Converting_RGB_to_GreyScale.py
sichkar-valentyn/Image_processing_in_Python
43d7c979bcd742cc202a28c2dea6ea5bc87562a2
[ "MIT" ]
null
null
null
Codes/Converting_RGB_to_GreyScale.py
sichkar-valentyn/Image_processing_in_Python
43d7c979bcd742cc202a28c2dea6ea5bc87562a2
[ "MIT" ]
2
2018-10-18T07:01:26.000Z
2022-03-22T08:22:33.000Z
# File: Converting_RGB_to_GreyScale.py # Description: Opening RGB image as array, converting to GreyScale and saving result into new file # Environment: PyCharm and Anaconda environment # # MIT License # Copyright (c) 2018 Valentyn N Sichkar # github.com/sichkar-valentyn # # Reference to: # Valentyn N Sichkar. Image processing in Python // GitHub platform. DOI: 10.5281/zenodo.1343603 # Opening RGB image as array, converting to GreyScale and saving result into new file # Importing needed libraries import numpy as np from PIL import Image import matplotlib.pyplot as plt from skimage import color from skimage import io import scipy.misc # Creating an array from image data image_RGB = Image.open("images/eagle.jpg") image_np = np.array(image_RGB) # Checking the type of the array print(type(image_np)) # <class 'numpy.ndarray'> # Checking the shape of the array print(image_np.shape) # Showing image with every channel separately channel_R = image_np[:, :, 0] channel_G = image_np[:, :, 1] channel_B = image_np[:, :, 2] # Creating a figure with subplots f, ax = plt.subplots(nrows=2, ncols=2) # ax is (2, 2) np array and to make it easier to read we use 'flatten' function # Or we can call each time ax[0, 0] ax0, ax1, ax2, ax3 = ax.flatten() # Adjusting first subplot ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red channel') # Adjusting second subplot ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green channel') # Adjusting third subplot ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue channel') # Adjusting fourth subplot ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original image') # Function to make distance between figures plt.tight_layout() # Giving the name to the window with figure f.canvas.set_window_title('Eagle image in three channels R, G and B') # Showing the plots plt.show() # Converting RGB image into GrayScale image # Using formula: # Y' = 0.299 R + 0.587 G + 0.114 B image_RGB = Image.open("images/eagle.jpg") image_np = np.array(image_RGB) image_GreyScale = image_np[:, :, 0] * 0.299 + image_np[:, :, 1] * 0.587 + image_np[:, :, 2] * 0.114 # Checking the type of the array print(type(image_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of the array print(image_GreyScale.shape) # Giving the name to the window with figure plt.figure('GreyScaled image from RGB') # Showing the image by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Preparing array for saving - creating three channels with the same data in each # Firstly, creating array with zero elements # And by 'image_GreyScale.shape + tuple([3])' we add one more element '3' to the tuple # Now the shape will be (1080, 1920, 3) - which is tuple type image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3])) # Secondly, reshaping GreyScale image from 2D to 3D x = image_GreyScale.reshape((1080, 1920, 1)) # Finally, writing all data in three channels image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0] # Saving image into a file from obtained 3D array scipy.misc.imsave("images/result_1.jpg", image_GreyScale_with_3_channels) # Checking that image was written with three channels and they are identical result_1 = Image.open("images/result_1.jpg") result_1_np = np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1])) print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2])) # Showing saved resulted image # Giving the name to the window with figure plt.figure('GreyScaled image from RGB') # Here we don't need to specify the map like cmap='Greys' plt.imshow(result_1_np) plt.show() # Another way to convert RGB image into GreyScale image image_RGB = io.imread("images/eagle.jpg") image_GreyScale = color.rgb2gray(image_RGB) # Checking the type of the array print(type(image_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of the array print(image_GreyScale.shape) # Giving the name to the window with figure plt.figure('GreyScaled image from RGB') # Showing the image by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Saving converted image into a file from processed array scipy.misc.imsave("images/result_2.jpg", image_GreyScale) # One more way for converting image_RGB_as_GreyScale = io.imread("images/eagle.jpg", as_gray=True) # Checking the type of the array print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of the array print(image_RGB_as_GreyScale.shape) # Giving the name to the window with figure plt.figure('GreyScaled image from RGB') # Showing the image by using obtained array plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show() # Saving converted image into a file from processed array scipy.misc.imsave("images/result_3.jpg", image_RGB_as_GreyScale)
33.966443
99
0.752223
823
5,061
4.490887
0.246659
0.064394
0.021645
0.032468
0.45671
0.404491
0.395833
0.366071
0.356872
0.34632
0
0.028813
0.135942
5,061
148
100
34.195946
0.816373
0.473227
0
0.267606
0
0
0.138196
0
0
0
0
0
0
1
0
false
0
0.084507
0
0.084507
0.15493
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1661f7c0c438355d7d875aa2c983973094881c84
3,193
py
Python
template_renderer.py
hamza-gheggad/gcp-iam-collector
02b46453b9ec23af07a0d81f7250f1de61e0ee23
[ "Apache-2.0" ]
null
null
null
template_renderer.py
hamza-gheggad/gcp-iam-collector
02b46453b9ec23af07a0d81f7250f1de61e0ee23
[ "Apache-2.0" ]
null
null
null
template_renderer.py
hamza-gheggad/gcp-iam-collector
02b46453b9ec23af07a0d81f7250f1de61e0ee23
[ "Apache-2.0" ]
null
null
null
import colorsys import json from jinja2 import Environment, PackageLoader import graph def create_html(formatted_nodes, formatted_edges, role_color_map, output_name): env = Environment(loader=PackageLoader('visualisation', '.')) template = env.get_template('visualisation.template') default_filters = list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles) html = template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles) with open(output_name, "w+") as resource_file: resource_file.write(html) def get_description(node): desc = node.get_type_name() + "</br>" if node.title: desc = desc + node.title + "</br>" if node.properties: for k, v in node.properties.items(): desc = desc + k + ": " + str(v) + "</br>" return desc def render(nodes, edges, output_name): color_map = roles_to_color_map(edges=edges) formatted_nodes, formatted_edges = format_graph(nodes, edges, color_map) create_html(formatted_nodes, formatted_edges, color_map, output_name) def color_for_role(role, all_roles): hue = float(all_roles.index(role)) / len(all_roles) return '#%02x%02x%02x' % tuple(int(c) * 255 for c in colorsys.hsv_to_rgb(hue, 1, 0.85)) def sanitise_role(role): return str(role).replace('roles/', '') \ .lower() \ .replace('writer', 'editor') \ .replace('reader', 'viewer') def roles_to_color_map(edges): all_roles = list({sanitise_role(e.role) for e in edges if e.role}) role_map = {} for role in all_roles: role_map[role] = color_for_role(role, all_roles) role_map['other'] = '#00c0ff' return role_map def format_graph(nodes, edges, role_color_map): nodes_list = [] node_ids = {} for counter, node in enumerate(nodes): node_ids[node.id] = counter value = { 'id': counter, 'shape': 'icon', 'label': node.name, 'type': node.node_type, 'icon': { 'face': 'Font Awesome 5 Free', 'code': node.get_font_code(), 'size': node.get_size(), 'color': node.get_color(), 'weight': 'bold' } } description = get_description(node) if description: value['title'] = description nodes_list.append(json.dumps(value).replace("\\\\", "\\")) edges_list = [] for edge in edges: value = { 'from': node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id], 'arrows': 'to', } if edge.label: value['label'] = edge.label if edge.title: value['title'] = edge.title value['role'] = sanitise_role(edge.role) if edge.role else 'other' value['color'] = role_color_map[value['role']] edges_list.append(json.dumps(value)) return nodes_list, edges_list
31.303922
91
0.593173
389
3,193
4.642674
0.262211
0.044297
0.063677
0.062016
0.117386
0.06866
0
0
0
0
0
0.007809
0.278108
3,193
102
92
31.303922
0.775705
0
0
0.024691
0
0
0.072949
0.006888
0
0
0
0
0
1
0.08642
false
0
0.049383
0.012346
0.197531
0.012346
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
166293ba707b563d24827825716e3e79a6848c40
13,007
py
Python
powerapi/cli/tools.py
danglotb/powerapi
67b2508588bfe1e20d90f9fe6bccda34d3455262
[ "BSD-3-Clause" ]
null
null
null
powerapi/cli/tools.py
danglotb/powerapi
67b2508588bfe1e20d90f9fe6bccda34d3455262
[ "BSD-3-Clause" ]
null
null
null
powerapi/cli/tools.py
danglotb/powerapi
67b2508588bfe1e20d90f9fe6bccda34d3455262
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2018, INRIA # Copyright (c) 2018, University of Lille # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys import logging from functools import reduce from powerapi.exception import PowerAPIException from powerapi.cli.parser import MainParser, ComponentSubParser from powerapi.cli.parser import store_true from powerapi.cli.parser import BadValueException, MissingValueException from powerapi.cli.parser import BadTypeException, BadContextException from powerapi.cli.parser import UnknowArgException from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB from powerapi.puller import PullerActor from powerapi.pusher import PusherActor def enable_log(arg, val, args, acc): acc[arg] = logging.DEBUG return args, acc def check_csv_files(files): return reduce(lambda acc, f: acc and os.access(f, os.R_OK), files.split(','), True) def extract_file_names(arg, val, args, acc): acc[arg] = val.split(',') return args, acc class CommonCLIParser(MainParser): def __init__(self): MainParser.__init__(self) self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET, help='enable verbose mode') self.add_argument('s', 'stream', flag=True, action=store_true, default=False, help='enable stream mode') subparser_mongo_input = ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database name', ) subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_input.add_argument('n', 'name', help='specify puller name', default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model', help='specify data type that will be storen in the database', default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') subparser_csv_input = ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files', help='specify input csv files with this format : file1,file2,file3', action=extract_file_names, default=[], check=check_csv_files, check_msg='one or more csv files couldn\'t be read') subparser_csv_input.add_argument('m', 'model', help='specify data type that will be storen in the database', default='HWPCReport') subparser_csv_input.add_argument('n', 'name', help='specify puller name', default='puller_csv') self.add_component_subparser('input', subparser_csv_input, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') subparser_mongo_output = ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database name') subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_output.add_argument('m', 'model', help='specify data type that will be storen in the database', default='PowerReport') subparser_mongo_output.add_argument('n', 'name', help='specify puller name', default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output, help_str='specify a database output : --db_output database_name ARG1 ARG2 ...') subparser_csv_output = ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory', help='specify directory where where output csv files will be writen') subparser_csv_output.add_argument('m', 'model', help='specify data type that will be storen in the database', default='PowerReport') subparser_csv_output.add_argument('n', 'name', help='specify puller name', default='pusher_csv') self.add_component_subparser('output', subparser_csv_output, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') subparser_influx_output = ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri') subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database name') subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection port', type=int) subparser_influx_output.add_argument('m', 'model', help='specify data type that will be storen in the database', default='PowerReport') subparser_influx_output.add_argument('n', 'name', help='specify puller name', default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') subparser_opentsdb_output = ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host') subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection port', type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify metric name') subparser_opentsdb_output.add_argument('m', 'model', help='specify data type that will be storen in the database', default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name', default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') def parse_argv(self): try: return self.parse(sys.argv[1:]) except BadValueException as exn: msg = 'CLI error : argument ' + exn.argument_name + ' : ' + exn.msg print(msg, file=sys.stderr) except MissingValueException as exn: msg = 'CLI error : argument ' + exn.argument_name + ' : expect a value' print(msg, file=sys.stderr) except BadTypeException as exn: msg = 'CLI error : argument ' + exn.argument_name + ' : expect ' msg += exn.article + ' ' + exn.type_name print(msg, file=sys.stderr) except UnknowArgException as exn: msg = 'CLI error : unknow argument ' + exn.argument_name print(msg, file=sys.stderr) except BadContextException as exn: msg = 'CLI error : argument ' + exn.argument_name msg += ' not used in the correct context\nUse it with the following arguments :' for main_arg_name, context_name in exn.context_list: msg += '\n --' + main_arg_name + ' ' + context_name print(msg, file=sys.stderr) sys.exit() class Generator: def __init__(self, component_group_name): self.component_group_name = component_group_name def generate(self, config): if self.component_group_name not in config: print('CLI error : no ' + self.component_group_name + ' specified', file=sys.stderr) sys.exit() actors = {} for component_type, components_list in config[self.component_group_name].items(): for component_name, component_config in components_list.items(): try: actors[component_name] = self._gen_actor(component_type, component_config, config) except KeyError as exn: msg = 'CLI error : argument ' + exn.args[0] msg += ' needed with --output ' + component_type print(msg, file=sys.stderr) sys.exit() return actors def _gen_actor(self, component_name, component_config, main_config): raise NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException): """ Exception raised when attempting to add to a DBActorGenerator a model factory with a name already bound to another model factory in the DBActorGenerator """ class ModelNameAlreadyUsed(PowerAPIException): """ Exception raised when attempting to add to a DBActorGenerator a database factory with a name already bound to another database factory in the DBActorGenerator """ class DBActorGenerator(Generator): def __init__(self, component_group_name): Generator.__init__(self, component_group_name) self.model_factory = { 'HWPCReport': HWPCModel(), 'PowerReport': PowerModel(), 'FormulaReport': FormulaModel(), 'ControlReport': ControlModel(), } self.db_factory = { 'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']), 'csv': lambda db_config: CsvDB(current_path=os.getcwd() if 'directory' not in db_config else db_config['directory'], files=[] if 'files' not in db_config else db_config['files']), 'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']), 'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']), } def add_model_factory(self, model_name, model_factory): if model_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[model_name] = model_factory def add_db_factory(self, db_name, db_factory): if db_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[db_name] = db_factory def _generate_db(self, db_name, db_config, main_config): return self.db_factory[db_name](db_config) def _gen_actor(self, db_name, db_config, main_config): db = self._generate_db(db_name, db_config, main_config) model = self.model_factory[db_config['model']] name = db_config['name'] return self._actor_factory(name, db, model, main_config['stream'], main_config['verbose']) def _actor_factory(self, name, db, model, stream_mode, level_logger): raise NotImplementedError() class PullerGenerator(DBActorGenerator): def __init__(self, report_filter): DBActorGenerator.__init__(self, 'input') self.report_filter = report_filter def _actor_factory(self, name, db, model, stream_mode, level_logger): return PullerActor(name, db, self.report_filter, model, stream_mode, level_logger) class PusherGenerator(DBActorGenerator): def __init__(self): DBActorGenerator.__init__(self, 'output') def _actor_factory(self, name, db, model, stream_mode, level_logger): return PusherActor(name, model, db, level_logger)
49.268939
128
0.667948
1,546
13,007
5.412031
0.189521
0.036811
0.036572
0.018406
0.506872
0.422852
0.366918
0.307159
0.285766
0.276443
0
0.002531
0.240486
13,007
263
129
49.456274
0.844417
0.140693
0
0.216374
0
0
0.196724
0
0
0
0
0
0
1
0.105263
false
0
0.081871
0.023392
0.280702
0.040936
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16635cf724808862aeb33d75c907fed77d96d1fc
857
py
Python
1 plainProgrammingBug/start 1 plainProgrammingBug.py
vishalbelsare/SLAPP3
da187b771831aaaabaee16a26ad341db2e968104
[ "CC0-1.0" ]
8
2017-10-18T05:19:17.000Z
2020-03-24T21:23:52.000Z
1 plainProgrammingBug/start 1 plainProgrammingBug.py
vishalbelsare/SLAPP3
da187b771831aaaabaee16a26ad341db2e968104
[ "CC0-1.0" ]
null
null
null
1 plainProgrammingBug/start 1 plainProgrammingBug.py
vishalbelsare/SLAPP3
da187b771831aaaabaee16a26ad341db2e968104
[ "CC0-1.0" ]
4
2017-10-25T09:07:49.000Z
2019-08-18T09:17:58.000Z
# start 1 plainProgrammingBug.py import random def SimpleBug(): # the environment worldXSize = 80 worldYSize = 80 # the bug xPos = 40 yPos = 40 # the action for i in range(100): xPos += randomMove() yPos += randomMove() xPos = (xPos + worldXSize) % worldXSize yPos = (yPos + worldYSize) % worldYSize print ("I moved to X = ", xPos, " Y = ", yPos) # returns -1, 0, 1 with equal probability def randomMove(): return random.randint(-1, 1) SimpleBug() """ you can eliminate the randomMove() function substituting xPos += randomMove() yPos += randomMove() with xPos += random.randint(-1, 1) yPos += random.randint(-1, 1) but the use of the function allows us to use here a self-explanatory name """
19.930233
69
0.568261
99
857
4.919192
0.515152
0.080082
0.086242
0.092402
0
0
0
0
0
0
0
0.036458
0.327888
857
42
70
20.404762
0.809028
0.123687
0
0
0
0
0.047059
0
0
0
0
0
0
1
0.133333
false
0
0.066667
0.066667
0.266667
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1665f41d1c03f32167e2cea236d3cf7a022b6b61
3,202
py
Python
google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py
bopopescu/Social-Lite
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
[ "Apache-2.0" ]
null
null
null
google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py
bopopescu/Social-Lite
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
[ "Apache-2.0" ]
4
2020-07-21T12:51:46.000Z
2022-01-22T10:29:25.000Z
google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py
bopopescu/Social-Lite
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
[ "Apache-2.0" ]
1
2020-07-25T18:17:57.000Z
2020-07-25T18:17:57.000Z
# -*- coding: utf-8 -*- # # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Create resource policy command.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import utils as compute_api from googlecloudsdk.api_lib.util import apis from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute import flags as compute_flags from googlecloudsdk.command_lib.compute.resource_policies import flags from googlecloudsdk.command_lib.compute.resource_policies import util def _CommonArgs(parser, api_version): """A helper function to build args based on different API version.""" messages = apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand): """Create a Google Compute Engine Group Placement Resource Policy.""" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages = holder.client.messages resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages) create_request = messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region) service = holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service, 'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement): """Create a Google Compute Engine Group Placement Resource Policy.""" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help = { 'DESCRIPTION': """\ Create a Google Compute Engine Group Placement Resource Policy. """, 'EXAMPLES': """\ To create a Google Compute Engine Group Placement Resource policy with 2 VMs and 2 availability domains, run: $ {command} my-resource-policy --region=REGION --vm-count=2 --availability-domain-count=2 """ }
37.232558
109
0.777327
387
3,202
6.29199
0.405685
0.045996
0.021355
0.032854
0.240246
0.225873
0.195483
0.195483
0.195483
0.100205
0
0.00508
0.139288
3,202
85
110
37.670588
0.878447
0.254841
0
0.086957
0
0
0.015466
0
0
0
0
0
0
1
0.086957
false
0
0.217391
0
0.369565
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16661518293e1bbad26be3766a9addb9bc564758
629
py
Python
paperoni/io.py
notoraptor/paperoni
acdf2d3d790b98d6a171177ffd9d6342f86bc7ea
[ "MIT" ]
88
2020-08-27T17:58:58.000Z
2021-12-01T19:29:56.000Z
paperoni/io.py
notoraptor/paperoni
acdf2d3d790b98d6a171177ffd9d6342f86bc7ea
[ "MIT" ]
8
2020-08-27T02:54:11.000Z
2022-02-01T13:35:41.000Z
paperoni/io.py
notoraptor/paperoni
acdf2d3d790b98d6a171177ffd9d6342f86bc7ea
[ "MIT" ]
6
2020-08-25T16:43:28.000Z
2021-12-08T16:41:02.000Z
import json from .papers import Papers from .researchers import Researchers def ResearchersFile(filename): """Parse a file containing researchers.""" try: with open(filename, "r") as file: data = json.load(file) except FileNotFoundError: data = {} return Researchers(data, filename=filename) def PapersFile(filename, researchers=None): """Parse a file containing papers.""" try: with open(filename, "r") as file: data = json.load(file) except FileNotFoundError: data = {} return Papers(data, filename=filename, researchers=researchers)
25.16
67
0.655008
68
629
6.058824
0.352941
0.029126
0.048544
0.097087
0.364078
0.364078
0.364078
0.364078
0.364078
0.364078
0
0
0.243243
629
24
68
26.208333
0.865546
0.108108
0
0.588235
0
0
0.003636
0
0
0
0
0
0
1
0.117647
false
0
0.176471
0
0.411765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
166739b28ed7ffa22c5f71499709f1fd302bd933
1,914
py
Python
config_model.py
Asha-ai/BERT_abstractive_proj
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
[ "Apache-2.0" ]
17
2020-01-11T15:15:21.000Z
2021-12-08T10:03:36.000Z
config_model.py
Asha-ai/BERT_abstractive_proj
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
[ "Apache-2.0" ]
6
2020-03-01T17:14:58.000Z
2021-05-21T16:05:03.000Z
config_model.py
Asha-ai/BERT_abstractive_proj
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
[ "Apache-2.0" ]
8
2020-05-11T21:24:51.000Z
2021-07-23T09:18:46.000Z
import texar.tf as tx beam_width = 5 hidden_dim = 768 bert = { 'pretrained_model_name': 'bert-base-uncased' } # See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder = {} # From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with adjustments for BERT decoder = { 'dim': hidden_dim, 'num_blocks': 6, 'multihead_attention': { 'num_heads': 8, 'output_dim': hidden_dim }, 'initializer': { 'type': 'variance_scaling_initializer', 'kwargs': { 'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', }, }, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence = 0.9 opt = { 'optimizer': { 'type': 'AdamOptimizer', 'kwargs': { 'beta1': 0.9, 'beta2': 0.997, 'epsilon': 1e-9 } } } lr = { # The 'learning_rate_schedule' can have the following 3 values: # - 'static' -> A simple static learning rate, specified by 'static_lr' # - 'aiayn' -> The learning rate used in the "Attention is all you need" paper. # - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for Texar's Transformer example 'learning_rate_schedule': 'aiayn', # The learning rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'lr_constant': 2 * (hidden_dim ** -0.5), # The warmup steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'warmup_steps': 4000, # The static learning rate, when 'static' is used. 'static_lr': 1e-3, # A multiplier that can be applied to the 'aiayn' learning rate. 'aiayn_multiplier': 0.2 }
31.377049
128
0.653083
239
1,914
5.046025
0.514644
0.099502
0.049751
0.062189
0.119403
0.119403
0.119403
0.086235
0.086235
0
0
0.040995
0.222571
1,914
60
129
31.9
0.769489
0.462382
0
0.047619
0
0
0.31133
0.069951
0
0
0
0
0
1
0
false
0
0.02381
0
0.02381
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
166903b8515452d27e1a1b1b4a84d3d174d4f220
708
py
Python
scrap_instagram.py
genaforvena/nn_scrapper
897766a52202aa056afd657995ed39b2b91e1fe2
[ "Apache-2.0" ]
null
null
null
scrap_instagram.py
genaforvena/nn_scrapper
897766a52202aa056afd657995ed39b2b91e1fe2
[ "Apache-2.0" ]
null
null
null
scrap_instagram.py
genaforvena/nn_scrapper
897766a52202aa056afd657995ed39b2b91e1fe2
[ "Apache-2.0" ]
null
null
null
import urllib.request import json access_token = "265791501.a4af066.f45a9f44719a4b2cb2d137118524e32b" api_url = "https://api.instagram.com/v1" nn_lat = 56.296504 nn_lng = 43.936059 def request(endpoint, req_params = ""): req = api_url + endpoint + "?access_token=" + access_token + "&" + req_params print(req) raw_response = urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8')) locations = request("/locations/search", "lat=" + str(nn_lat) + "&lng=" + str(nn_lng))["data"] print(locations) for location in locations: location_id = location["id"] location_media = request("/locations/" + str(location_id) + "/media/recent") print(location_media)
29.5
94
0.706215
91
708
5.307692
0.494505
0.068323
0.074534
0
0
0
0
0
0
0
0
0.087171
0.141243
708
23
95
30.782609
0.707237
0
0
0
0
0
0.216407
0.070721
0
0
0
0
0
1
0.058824
false
0
0.117647
0
0.235294
0.176471
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
166ccaa355ece2f923c461999fa3eb16171b7163
350
py
Python
mechroutines/models/_flux.py
keceli/mechdriver
978994ba5c77b6df00078b639c4482dacf269440
[ "Apache-2.0" ]
1
2022-03-22T20:47:04.000Z
2022-03-22T20:47:04.000Z
mechroutines/models/_flux.py
keceli/mechdriver
978994ba5c77b6df00078b639c4482dacf269440
[ "Apache-2.0" ]
1
2021-02-12T21:11:16.000Z
2021-12-07T21:32:14.000Z
mechroutines/models/_flux.py
keceli/mechdriver
978994ba5c77b6df00078b639c4482dacf269440
[ "Apache-2.0" ]
8
2019-12-18T20:09:46.000Z
2020-11-14T16:37:28.000Z
""" NEW: Handle flux files """ import autofile def read_flux(ts_save_path, vrc_locs=(0,)): """ Read the geometry from the filesys """ vrc_fs = autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str = vrc_fs[-1].file.flux.read(vrc_locs) else: flux_str = None return flux_str
18.421053
54
0.64
55
350
3.818182
0.509091
0.1
0.095238
0.095238
0.133333
0
0
0
0
0
0
0.011111
0.228571
350
18
55
19.444444
0.766667
0.177143
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0.125
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
166ddfdb964d4dc41f4f840af0cda8cfbfe5a687
4,990
py
Python
RandomForest/RandomForest.py
nachiket273/ML_Algo_Implemented
74ae47fdf620545fdf8c934c5997784faadaebb7
[ "MIT" ]
7
2020-08-03T13:43:53.000Z
2022-02-18T20:38:51.000Z
RandomForest/RandomForest.py
nachiket273/ML_Algo_Implemented
74ae47fdf620545fdf8c934c5997784faadaebb7
[ "MIT" ]
null
null
null
RandomForest/RandomForest.py
nachiket273/ML_Algo_Implemented
74ae47fdf620545fdf8c934c5997784faadaebb7
[ "MIT" ]
2
2020-09-06T21:54:16.000Z
2022-01-22T19:59:33.000Z
import math import numpy as np import pandas as pd from sklearn.base import BaseEstimator import sys import os sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree import DecisionTree class RandomForest(BaseEstimator): """ Simple implementation of Random Forest. This class has implementation for Random Forest classifier and regressor. Dataset bagging is done by simple numpy random choice with replacement. For classification the prediction is by majority vote. For regression tree the prediction is averge of all estimator predictions. Args: n_estimators Number of base estimators (Decision Trees here) max_features Maximum features to be used to construct tree. Default: - If classifier, default is square root of total features. - If regressor, default is total number of features. max_depth The maximum depth to which estimators needs to be constructed. Default: np.inf min_samples_split Minimum number of samples need to present for split at the node. Default: 2 criterion criterion to be used for split. For classification tree following criterion are supported: - gini - entropy For regression tree following criterion are supported: - mse (mean squared error) - mae (mean absolute error) Default: gini random_seed random seed value for numpy operations. Default: 0 """ def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2, criterion='gini', random_seed=0): self.n_estimators = n_estimators self.max_features = max_features self.max_depth = max_depth self.min_samples_split = min_samples_split self.criterion = criterion self.random_seed = random_seed self.idxs = [] self.trees = [] for i in range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split, max_features = self.max_features, criterion=self.criterion, random_seed = self.random_seed)) self.is_classification_forest = False if self.criterion == 'gini' or self.criterion == 'entropy': self.is_classification_forest = True elif self.criterion == 'mse' or self.criterion == 'mae': self.is_classification_forest = False else: raise Exception("Invalid criterion: {}".format(self.criterion)) def get_subsets(self, X, y, num=1): subsets = [] if len(np.shape(y)) == 1: y = np.expand_dims(y, axis=1) Xy = np.concatenate((X, y), axis=1) num_samples = X.shape[0] np.random.shuffle(Xy) rng = np.random.default_rng(seed= self.random_seed) for _ in range(num): idx = rng.choice( range(num_samples), size = np.shape(range(int(num_samples)), ), replace=True ) subsets.append([X[idx], y[idx]]) return subsets def fit(self, X, y): np.random.seed(self.random_seed) if isinstance(X, pd.DataFrame): X = X.to_numpy() subsets = self.get_subsets(X, y, self.n_estimators) if self.max_features == 0: if self.is_classification_forest: self.max_features = int(math.sqrt(X.shape[1])) else: self.max_features = int(X.shape[1]) # Bagging - choose random features for each estimator # if max_features is provided, else use square root of # total number of features. for i, _ in enumerate(self.trees): self.trees[i].max_features = self.max_features X_sub, y_sub = subsets[i] self.trees[i].fit(X_sub, y_sub) def predict(self, X): all_preds = np.empty((X.shape[0], self.n_estimators)) for i, tree in enumerate(self.trees): preds = tree.predict(X) all_preds[:, i] = preds y_preds = [] for preds in all_preds: if self.is_classification_forest: y_preds.append(np.bincount(preds.astype('int')).argmax()) else: y_preds.append(np.average(preds)) return y_preds
40.901639
93
0.546293
560
4,990
4.725
0.269643
0.049887
0.034014
0.049131
0.123205
0
0
0
0
0
0
0.004519
0.379158
4,990
122
94
40.901639
0.84958
0.327856
0
0.094595
0
0
0.018559
0
0
0
0
0
0
1
0.054054
false
0
0.094595
0
0.189189
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
166e1671aebcb4e327d8e4f8b8b62dc58ec16062
556
py
Python
tests/basics/generator_pend_throw.py
iotctl/pycopy
eeb841afea61b19800d054b3b289729665fc9aa4
[ "MIT" ]
663
2018-12-30T00:17:59.000Z
2022-03-14T05:03:41.000Z
tests/basics/generator_pend_throw.py
iotctl/pycopy
eeb841afea61b19800d054b3b289729665fc9aa4
[ "MIT" ]
41
2019-06-06T08:31:19.000Z
2022-02-13T16:53:41.000Z
tests/basics/generator_pend_throw.py
iotctl/pycopy
eeb841afea61b19800d054b3b289729665fc9aa4
[ "MIT" ]
60
2019-06-01T04:25:00.000Z
2022-02-25T01:47:31.000Z
def gen(): i = 0 while 1: yield i i += 1 g = gen() try: g.pend_throw except AttributeError: print("SKIP") raise SystemExit print(next(g)) print(next(g)) g.pend_throw(ValueError()) v = None try: v = next(g) except Exception as e: print("raised", repr(e)) print("ret was:", v) # It's legal to pend exception in a just-started generator, just the same # as it's legal to .throw() into it. g = gen() g.pend_throw(ValueError()) try: next(g) except ValueError: print("ValueError from just-started gen")
15.444444
73
0.624101
89
556
3.865169
0.460674
0.05814
0.087209
0.116279
0
0
0
0
0
0
0
0.007092
0.239209
556
35
74
15.885714
0.806147
0.190647
0
0.346154
0
0
0.111857
0
0
0
0
0
0
1
0.038462
false
0
0
0
0.038462
0.230769
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
166e4003ce5bc54874ebae493377303b4c270f29
4,511
py
Python
src/UnitTypes/ProjectileModule.py
USArmyResearchLab/ARL_Battlespace
2f17a478f62c20a4db387d5d3e4bbeaa3197cd49
[ "MIT" ]
1
2022-03-31T19:15:04.000Z
2022-03-31T19:15:04.000Z
src/UnitTypes/ProjectileModule.py
USArmyResearchLab/ARL_Battlespace
2f17a478f62c20a4db387d5d3e4bbeaa3197cd49
[ "MIT" ]
null
null
null
src/UnitTypes/ProjectileModule.py
USArmyResearchLab/ARL_Battlespace
2f17a478f62c20a4db387d5d3e4bbeaa3197cd49
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Dec 15 09:49:47 2020 @author: james.z.hare """ from src.UnitModule import UnitClass, advance from copy import deepcopy import math class ProjectileClass(UnitClass): """ The Projectile Class This is a subclass to the UnitClass Virtual Functions ----------------- - `__copy__()` to make shallow copies - `__deepcopy__(memo)` to make deep copies - `possibleActions(State)` to identify legal actions - `observe(Unit)` to observe units located within VisibleRange - `overlaps(Unit)` to identify if the unit overlaps with another unit - `execute(Action, State)` to execute the action Attributes ---------- ID: a unique identifier of this unit Owner: the player the unit belongs to Health: the health of the unit Extent: the space occupied by unit Position: location of unit Orientation: as the name says VisibleRange: how far the unit can observe Actions: dict dictionary of actions common accross all units ActionOptions: list of list of action options. Attack: int that defines whether the unit is attacking in an advance action RemaingLifetime: int that defines the total number of turns until the unit is dead """ def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf): UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1)) self.Actions = { "advance": lambda x: advance(self, x) } self.ActionOptions = ( ( "advance", ), ) self.Attack = None self.RemainingLifetime = RemainingLifetime def __copy__(self): Duplicate = ProjectileClass(self.ID, self.Owner, self.Health) Duplicate.Position = self.Position Duplicate.Orientation = self.Orientation Duplicate.Attack = self.Attack Duplicate.RemainingLifetime = self.RemainingLifetime return Duplicate def __deepcopy__(self, memo): Default = None Exists = memo.get(self, Default) if Exists is not Default: return Exists Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo)) Duplicate.Position = deepcopy(self.Position, memo) Duplicate.Orientation = deepcopy(self.Orientation, memo) Duplicate.Attack = deepcopy(self.Attack, memo) Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo) memo[self] = Duplicate return Duplicate def possibleActions(self, State): """ Identifies the set of feasible actions given the board size and position of the unit Parameters ---------- State: StateClass Returns ------- TrueActions: list[str] A list of the feasible actions """ return self.ActionOptions def observe(self, Unit): if Unit.ID == self.ID: return Unit return None def overlaps(self, Unit): MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0]) for y in range(self.Extent[1]) ]) #print(Unit) TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self, Actions, State): """ Execute `Actions` on `State`. Parameters ---------- Actions : list[str] A set of actions to be performed on `State`. State : StateClass State on which to inflict actions. Returns ------- Changes : list Resulting state of executed `Actions`. """ NewState = deepcopy(State) Changes = [] for Action in Actions: ActionResult = self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -= 1 if isinstance(ActionResult, list): Changes += ActionResult else: Changes.append(ActionResult) return Changes # Will be used as the projectile for the missile launcher unit class MissileClass(ProjectileClass): def __init__(self, ID, Owner, Position, Life=1): ProjectileClass.__init__(self, ID, Owner, Positon=Position, Life=Life)
32.221429
158
0.62137
509
4,511
5.444008
0.310413
0.034645
0.014435
0.021653
0.050162
0.027427
0
0
0
0
0
0.008962
0.282642
4,511
140
159
32.221429
0.847342
0.353802
0
0.037736
0
0
0.005454
0
0
0
0
0
0
1
0.150943
false
0
0.056604
0
0.396226
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16730d6f4856a5911d4dfcf4a29a2f5449a0ddb0
3,536
py
Python
tests/test_authentication.py
movermeyer/cellardoor
25192b07224ff7bd33fd29ebac07340bef53a2ed
[ "MIT" ]
null
null
null
tests/test_authentication.py
movermeyer/cellardoor
25192b07224ff7bd33fd29ebac07340bef53a2ed
[ "MIT" ]
3
2015-01-31T14:53:06.000Z
2015-02-01T19:04:30.000Z
tests/test_authentication.py
movermeyer/cellardoor
25192b07224ff7bd33fd29ebac07340bef53a2ed
[ "MIT" ]
2
2015-01-31T14:54:28.000Z
2018-03-05T17:33:42.000Z
import unittest from mock import Mock import base64 from cellardoor import errors from cellardoor.authentication import * from cellardoor.authentication.basic import BasicAuthIdentifier class FooIdentifier(Identifier): pass class BarAuthenticator(Authenticator): pass class TestAuthentication(unittest.TestCase): def test_abstract_identifier(self): id = Identifier() with self.assertRaises(NotImplementedError): id.identify({}) def test_abstract_authenticator(self): auth = Authenticator() with self.assertRaises(NotImplementedError): auth.authenticate({}) def test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())]) def test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)]) def test_middleware(self): identifier = FooIdentifier() identifier.identify = Mock(return_value='foo') authenticator = BarAuthenticator() authenticator.authenticate = Mock(return_value='bar') app = Mock(return_value=[]) middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)]) environ = {'skidoo':23} middleware(environ, lambda: None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'}) def test_middleware_skip(self): id_one = FooIdentifier() id_one.identify = Mock(return_value=None) id_two = FooIdentifier() id_two.identify = Mock(return_value='two') id_three = FooIdentifier() id_three.identify = Mock(return_value='three') auth_one = BarAuthenticator() auth_one.authenticate = Mock(return_value='one') auth_two = BarAuthenticator() auth_two.authenticate = Mock(return_value='two') auth_three = BarAuthenticator() auth_three.authenticate = Mock(return_value='three') app = Mock(return_value=[]) middleware = AuthenticationMiddleware( app, pairs=[ (id_one, auth_one), (id_two, auth_two), (id_three, auth_three) ] ) environ = {} middleware(environ, lambda: None) self.assertEquals(environ, {'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({}) self.assertEquals(credentials, None) def test_skip_if_not_a_pair(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None) def test_skip_if_not_basic(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials, None) def test_error_if_not_base64(self): identifier = BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \x000'}) def test_error_if_malformed(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) def test_pass(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foo:bar') identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) self.assertEquals(identified_credentials, {'username':'foo', 'password':'bar'})
30.747826
95
0.756505
374
3,536
6.949198
0.200535
0.03232
0.057715
0.084648
0.374375
0.370142
0.287033
0.21085
0.164679
0.06541
0
0.007129
0.127262
3,536
115
96
30.747826
0.835062
0
0
0.218391
0
0
0.06955
0
0
0
0
0
0.16092
1
0.137931
false
0.045977
0.068966
0
0.252874
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
167422ad1c22d904c1fb3127c28d48e06243100c
2,698
py
Python
configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py
Westlake-AI/openmixup
ea81250819e740dd823e30cb7ce382d14a3c1b91
[ "Apache-2.0" ]
10
2021-12-30T10:22:27.000Z
2022-03-30T02:31:38.000Z
configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py
Westlake-AI/openmixup
ea81250819e740dd823e30cb7ce382d14a3c1b91
[ "Apache-2.0" ]
3
2022-01-20T21:02:48.000Z
2022-03-19T13:49:45.000Z
configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py
Westlake-AI/openmixup
ea81250819e740dd823e30cb7ce382d14a3c1b91
[ "Apache-2.0" ]
null
null
null
_base_ = [ '../../../_base_/datasets/imagenet/swin_sz224_4xbs256.py', '../../../_base_/default_runtime.py', ] # model settings model = dict( type='MixUpClassification', pretrained=None, alpha=0.2, mix_mode="cutmix", mix_args=dict( attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in this repo (use pre-trained) automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False), manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size if CUDA out of memory mp=None, block_num=4, # block_num<=4 and mp=2/4 for fast training beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1, 0.8), use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock ), backbone=dict( type='ConvNeXt', arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True, ), head=dict( type='ClsMixupHead', # mixup CE + label smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0), with_avg_pool=False, # gap_before_final_norm is True in_channels=768, num_classes=1000) ) # interval for accumulate gradient update_interval = 2 # total: 8 x bs256 x 2 accumulates = bs4096 # additional hooks custom_hooks = [ dict(type='EMAHook', # EMA_W = (1 - m) * EMA_W + m * W momentum=0.9999, warmup='linear', warmup_iters=20 * 626, warmup_ratio=0.9, # warmup 20 epochs. update_interval=update_interval, ), ] # optimizer optimizer = dict( type='AdamW', lr=4e-3, # lr = 5e-4 * (256 * 4) * 4 accumulate / 1024 = 4e-3 / bs4096 weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_options={ '(bn|ln|gn)(\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias': dict(weight_decay=0.), }) # apex use_fp16 = True fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16) # lr scheduler lr_config = dict( policy='CosineAnnealing', by_epoch=False, min_lr=1e-5, warmup='linear', warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs. warmup_ratio=1e-6, ) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=300)
34.151899
111
0.640474
394
2,698
4.187817
0.464467
0.048485
0.021818
0.018182
0.084848
0.030303
0.030303
0
0
0
0
0.069986
0.210897
2,698
78
112
34.589744
0.705026
0.192735
0
0.078125
0
0
0.124652
0.055144
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16748f009db0117be1d076ddc5a413db7e45e64c
2,274
py
Python
mcstasscript/interface/reader.py
PaNOSC-ViNYL/McStasScript
bd94ebc6cac290c3c9662871df40d76edbe4a44e
[ "BSD-3-Clause" ]
3
2019-08-29T14:15:06.000Z
2021-03-04T12:08:48.000Z
mcstasscript/interface/reader.py
PaNOSC-ViNYL/McStasScript
bd94ebc6cac290c3c9662871df40d76edbe4a44e
[ "BSD-3-Clause" ]
37
2019-03-05T12:28:32.000Z
2022-03-22T10:11:23.000Z
mcstasscript/interface/reader.py
PaNOSC-ViNYL/McStasScript
bd94ebc6cac290c3c9662871df40d76edbe4a44e
[ "BSD-3-Clause" ]
6
2019-10-21T20:19:10.000Z
2022-03-09T10:12:16.000Z
import os from mcstasscript.instr_reader.control import InstrumentReader from mcstasscript.interface.instr import McStas_instr class McStas_file: """ Reader of McStas files, can add to an existing McStasScript instrument instance or create a corresponding McStasScript python file. Methods ------- add_to_instr(Instr) Add information from McStas file to McStasScript Instr instance write_python_file(filename) Write python file named filename that reproduce the McStas instr """ def __init__(self, filename): """ Initialization of McStas_file class, needs McStas instr filename Parameters ---------- filename (str) Name of McStas instrument file to be read """ # Check filename if not os.path.isfile(filename): raise ValueError("Given filename, \"" + filename + "\" could not be found.") self.Reader = InstrumentReader(filename) def add_to_instr(self, Instr): """ Adds information from the McStas file to McStasScript instr Parameters ---------- Instr (McStasScript McStas_instr instance) McStas_instr instance to add instrument information to """ # Check Instr if not isinstance(Instr, McStas_instr): raise TypeError("Given object is not of type McStas_instr!") self.Reader.add_to_instr(Instr) def write_python_file(self, filename, **kwargs): """ Writes python file that reproduces McStas instrument file Parameters ---------- filename (str) Filename of python file to be written """ if "force" in kwargs: force = kwargs["force"] else: force = False # Check product_filename is available if os.path.isfile(filename): if force: os.remove(filename) else: raise ValueError("Filename \"" + filename + "\" already exists, you can overwrite with " + "force=True") self.Reader.generate_py_version(filename)
28.425
79
0.579595
233
2,274
5.549356
0.343348
0.059551
0.023202
0.023202
0.044857
0
0
0
0
0
0
0
0.343887
2,274
79
80
28.78481
0.866622
0.39358
0
0.076923
0
0
0.07732
0
0
0
0
0
0
1
0.115385
false
0
0.115385
0
0.269231
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16766ccc57f251df7ba9394a55b7eabdd7d12e46
2,925
py
Python
country_capital_guesser.py
NathanMH/ComputerClub
197585c1a77f71ee363547740d6e09f945e7526f
[ "MIT" ]
null
null
null
country_capital_guesser.py
NathanMH/ComputerClub
197585c1a77f71ee363547740d6e09f945e7526f
[ "MIT" ]
null
null
null
country_capital_guesser.py
NathanMH/ComputerClub
197585c1a77f71ee363547740d6e09f945e7526f
[ "MIT" ]
null
null
null
#! /usr/bin/env python3 ####################### """#################### Index: 1. Imports and Readme 2. Functions 3. Main 4. Testing ####################""" ####################### ################################################################### # 1. IMPORTS AND README ################################################################### import easygui import country_list_getter ################################################################### # 2. FUNCTIONS ################################################################### # Dictionary. It has keys (Canada, France etc...) and Values (Paris, Ottawa) country_list_getter.main() COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST def ask_to_play(): return easygui.ynbox("Do you want to play a game?", "Country Guesser", ("Yes", "No")) def ask_to_replay(correct_answers, total_questions): score = round(((correct_answers / total_questions) * 100), 2) if score >= 50: return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/happy_puppy.jpg", ["Yes", "No"]) else: return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/sad_puppy.jpg", ["Yes", "No"]) def main_question_box(country): return easygui.enterbox("What is the capital of: " + country + "?", "Country Capital Guesser!!") ################################################################### # 3. MAIN ################################################################### def funtime(): playing = 1 correct_answers = 0 total_questions = 0 ask_to_play() while playing: for key, value in COUNTRIES_CAPITALS.items(): answer = main_question_box(key) # answer = input("Name the capital of: " + key + "\n").lower() total_questions += 1 # Short for total_questions = total_questions + 1 if answer == COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]: correct_answers += 1 print("Correct!") else: print("Wrong!") # Should we keep playing? response = input("Would you like to play again?: \n") if response.lower() == "yes" or response == "y": playing = 1 else: playing = 0 #score_screen(correct_answers, total_questions) ask_to_replay(correct_answers, total_questions) #print("You scored " + str(correct_answers)+ "/" + str(total_questions) + " (" + str(correct_percent) + "%)") ################################################################### # 4. TESTING ################################################################### # COUNTRIES_CAPITALS = {"Canada": "Ottawa", "United States": "Washington", "France": "Paris"} def test_1(): pass # ask_to_play() # main_question_box("Canada") funtime()
33.62069
160
0.494017
286
2,925
4.881119
0.388112
0.090258
0.054441
0.080229
0.189828
0.179083
0.179083
0.123209
0.123209
0.123209
0
0.01008
0.185983
2,925
86
161
34.011628
0.576228
0.236239
0
0.135135
0
0
0.202082
0.056338
0
0
0
0
0
1
0.135135
false
0.027027
0.054054
0.054054
0.297297
0.054054
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1676c1cee546273be3e4746fcf8ddcdf0ca583bb
2,288
py
Python
data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py
aframires/freesound-loop-annotator
a24e0c23bfc671e41e8627150e7b9fcae5c8cb13
[ "Apache-2.0" ]
18
2020-01-22T14:58:18.000Z
2022-02-21T12:07:51.000Z
data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py
aframires/freesound-loop-annotator
a24e0c23bfc671e41e8627150e7b9fcae5c8cb13
[ "Apache-2.0" ]
2
2020-02-24T13:14:05.000Z
2020-09-21T13:34:53.000Z
data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py
aframires/freesound-loop-annotator
a24e0c23bfc671e41e8627150e7b9fcae5c8cb13
[ "Apache-2.0" ]
1
2020-01-22T14:55:36.000Z
2020-01-22T14:55:36.000Z
# Need this to import from parent directory when running outside pycharm import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from ac_utils.general import save_to_json, load_from_json import click import xml.etree.ElementTree from urllib import unquote def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection = rekordbox_file.find('COLLECTION') found = False for document in collection: if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]): found = document break return found @click.command() @click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path): """ Read information from rekordbox_rhythm.xml present in dataset_path and convert it into analsysis_rhythm_rekordbox.json to be stored in the same folder and compatible with our evaluation framework. """ rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis = dict() with click.progressbar(metadata_file.keys(), label="Converting...") as metadata_keys: for key in metadata_keys: entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if entry is not False: tempo_entry = entry.find('TEMPO') if tempo_entry is not None: bpm_raw = float(tempo_entry.attrib['Bpm']) else: bpm_raw = 0.0 analysis[key] = {"RekBox": { "bpm": bpm_raw, } } save_to_json(out_file_path, analysis, verbose=True) if __name__ == '__main__': rekordbox_file_to_analysis_file()
39.448276
119
0.660402
287
2,288
5.006969
0.355401
0.025052
0.027836
0.037578
0.231733
0.150313
0.150313
0.150313
0.118302
0.118302
0
0.003977
0.230769
2,288
57
120
40.140351
0.8125
0.117133
0
0.136364
0
0
0.091182
0.01503
0
0
0
0
0
1
0.045455
false
0
0.136364
0
0.204545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
167719b0cc59eef9b7fff6f4ce109cd0d2fe8bc1
12,932
py
Python
tests/test_web_urldispatcher.py
avstarkov/aiohttp
b0a03cffccf677bf316227522a9b841c15dcb869
[ "Apache-2.0" ]
null
null
null
tests/test_web_urldispatcher.py
avstarkov/aiohttp
b0a03cffccf677bf316227522a9b841c15dcb869
[ "Apache-2.0" ]
null
null
null
tests/test_web_urldispatcher.py
avstarkov/aiohttp
b0a03cffccf677bf316227522a9b841c15dcb869
[ "Apache-2.0" ]
null
null
null
import functools import os import shutil import tempfile from unittest import mock from unittest.mock import MagicMock import pytest from aiohttp import abc, web from aiohttp.web_urldispatcher import SystemRoute @pytest.fixture(scope='function') def tmp_dir_path(request): """ Give a path for a temporary directory The directory is destroyed at the end of the test. """ # Temporary directory. tmp_dir = tempfile.mkdtemp() def teardown(): # Delete the whole directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return tmp_dir @pytest.mark.parametrize( "show_index,status,prefix,data", [pytest.param(False, 403, '/', None, id="index_forbidden"), pytest.param(True, 200, '/', b'<html>\n<head>\n<title>Index of /.</title>\n' b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n' b'<li><a href="/my_dir">my_dir/</a></li>\n' b'<li><a href="/my_file">my_file</a></li>\n' b'</ul>\n</body>\n</html>', id="index_root"), pytest.param(True, 200, '/static', b'<html>\n<head>\n<title>Index of /.</title>\n' b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n' b'<li><a href="/static/my_dir">my_dir/</a></li>\n' b'<li><a href="/static/my_file">my_file</a></li>\n' b'</ul>\n</body>\n</html>', id="index_static")]) async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index, status, prefix, data): """ Tests the operation of static file server. Try to access the root of static file server, and make sure that correct HTTP statuses are returned depending if we directory index should be shown or not. """ # Put a file inside tmp_dir_path: my_file_path = os.path.join(tmp_dir_path, 'my_file') with open(my_file_path, 'w') as fw: fw.write('hello') my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write('world') app = web.Application() # Register global static route: app.router.add_static(prefix, tmp_dir_path, show_index=show_index) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get(prefix) assert r.status == status if data: assert r.headers['Content-Type'] == "text/html; charset=utf-8" read_ = (await r.read()) assert read_ == data async def test_follow_symlink(tmp_dir_path, aiohttp_client): """ Tests the access to a symlink, in static folder """ data = 'hello world' my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write(data) my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_symlink_path) app = web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, follow_symlinks=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/my_symlink/my_file_in_dir') assert r.status == 200 assert (await r.text()) == data @pytest.mark.parametrize('dir_name,filename,data', [ ('', 'test file.txt', 'test text'), ('test dir name', 'test dir file .txt', 'test text file folder') ]) async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name, filename, data): """ Checks operation of static files with spaces """ my_dir_path = os.path.join(tmp_dir_path, dir_name) if dir_name: os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, filename) with open(my_file_path, 'w') as fw: fw.write(data) app = web.Application() url = os.path.join('/', dir_name, filename) app.router.add_static('/', tmp_dir_path) client = await aiohttp_client(app) r = await client.get(url) assert r.status == 200 assert (await r.text()) == data async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client): """ Tests accessing non-existing resource Try to access a non-exiting resource and make sure that 404 HTTP status returned. """ app = web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/non_existing_resource') assert r.status == 404 @pytest.mark.parametrize('registered_path,request_url', [ ('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab'), ]) async def test_url_escaping(aiohttp_client, registered_path, request_url): """ Tests accessing a resource with """ app = web.Application() async def handler(request): return web.Response() app.router.add_get(registered_path, handler) client = await aiohttp_client(app) r = await client.get(request_url) assert r.status == 200 async def test_handler_metadata_persistence(): """ Tests accessing metadata of a handler after registering it on the app router. """ app = web.Application() async def async_handler(request): """Doc""" return web.Response() def sync_handler(request): """Doc""" return web.Response() app.router.add_get('/async', async_handler) with pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler) for resource in app.router.resources(): for route in resource: assert route.handler.__doc__ == 'Doc' async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): """ Tests the unauthorized access to a folder of static file server. Try to list a folder content of static file server when server does not have permissions to do so for the folder. """ my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: path = MagicMock() path.joinpath.return_value = path path.resolve.return_value = path path.iterdir.return_value.__iter__.side_effect = PermissionError() path_constructor.return_value = path # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/my_dir') assert r.status == 403 async def test_access_symlink_loop(tmp_dir_path, aiohttp_client): """ Tests the access to a looped symlink, which could not be resolved. """ my_dir_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_dir_path) app = web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/my_symlink') assert r.status == 404 async def test_access_special_resource(tmp_dir_path, aiohttp_client): """ Tests the access to a resource that is neither a file nor a directory. Checks that if a special resource is accessed (f.e. named pipe or UNIX domain socket) then 404 HTTP status returned. """ app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: special = MagicMock() special.is_dir.return_value = False special.is_file.return_value = False path = MagicMock() path.joinpath.side_effect = lambda p: (special if p == 'special' else path) path.resolve.return_value = path special.resolve.return_value = special path_constructor.return_value = path # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/special') assert r.status == 404 async def test_partialy_applied_handler(aiohttp_client): app = web.Application() async def handler(data, request): return web.Response(body=data) with pytest.warns(DeprecationWarning): app.router.add_route('GET', '/', functools.partial(handler, b'hello')) client = await aiohttp_client(app) r = await client.get('/') data = (await r.read()) assert data == b'hello' def test_system_route(): route = SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError): route.url_for() assert route.name is None assert route.resource is None assert "<SystemRoute 201: test>" == repr(route) assert 201 == route.status assert 'test' == route.reason async def test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter): async def resolve(self, request): raise web.HTTPPreconditionFailed() app = web.Application(router=MyRouter()) client = await aiohttp_client(app) resp = await client.get('/') assert resp.status == 412 async def test_allow_head(aiohttp_client): """ Test allow_head on routes. """ app = web.Application() async def handler(_): return web.Response() app.router.add_get('/a', handler, name='a') app.router.add_get('/b', handler, allow_head=False, name='b') client = await aiohttp_client(app) r = await client.get('/a') assert r.status == 200 await r.release() r = await client.head('/a') assert r.status == 200 await r.release() r = await client.get('/b') assert r.status == 200 await r.release() r = await client.head('/b') assert r.status == 405 await r.release() @pytest.mark.parametrize("path", [ '/a', '/{a}', ]) def test_reuse_last_added_resource(path): """ Test that adding a route with the same name and path of the last added resource doesn't create a new resource. """ app = web.Application() async def handler(request): return web.Response() app.router.add_get(path, handler, name="a") app.router.add_post(path, handler, name="a") assert len(app.router.resources()) == 1 def test_resource_raw_match(): app = web.Application() async def handler(request): return web.Response() route = app.router.add_get("/a", handler, name="a") assert route.resource.raw_match("/a") route = app.router.add_get("/{b}", handler, name="b") assert route.resource.raw_match("/{b}") resource = app.router.add_static("/static", ".") assert not resource.raw_match("/static") async def test_add_view(aiohttp_client): app = web.Application() class MyView(web.View): async def get(self): return web.Response() async def post(self): return web.Response() app.router.add_view("/a", MyView) client = await aiohttp_client(app) r = await client.get("/a") assert r.status == 200 await r.release() r = await client.post("/a") assert r.status == 200 await r.release() r = await client.put("/a") assert r.status == 405 await r.release() async def test_decorate_view(aiohttp_client): routes = web.RouteTableDef() @routes.view("/a") class MyView(web.View): async def get(self): return web.Response() async def post(self): return web.Response() app = web.Application() app.router.add_routes(routes) client = await aiohttp_client(app) r = await client.get("/a") assert r.status == 200 await r.release() r = await client.post("/a") assert r.status == 200 await r.release() r = await client.put("/a") assert r.status == 405 await r.release() async def test_web_view(aiohttp_client): app = web.Application() class MyView(web.View): async def get(self): return web.Response() async def post(self): return web.Response() app.router.add_routes([ web.view("/a", MyView) ]) client = await aiohttp_client(app) r = await client.get("/a") assert r.status == 200 await r.release() r = await client.post("/a") assert r.status == 200 await r.release() r = await client.put("/a") assert r.status == 405 await r.release()
27.514894
79
0.634009
1,786
12,932
4.422732
0.134379
0.033675
0.029118
0.042537
0.570579
0.536017
0.498797
0.450563
0.438537
0.408406
0
0.009592
0.24219
12,932
469
80
27.573561
0.796429
0.055444
0
0.485714
0
0.007143
0.094214
0.038998
0
0
0
0
0.132143
1
0.021429
false
0
0.032143
0
0.117857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1678ba6ffacdb3dc2a1730ee864aab5b2813d801
13,683
py
Python
R-GMM-VGAE/model_citeseer.py
nairouz/R-GAE
acc7bfe36153a4c7d6f68e21a557bb4d99dab639
[ "MIT" ]
26
2021-07-18T01:31:48.000Z
2022-03-31T03:23:11.000Z
R-GMM-VGAE/model_citeseer.py
Fawzidev/R-GAE
80988ddf951f1723091a04b617ce4fc6d20ab9ce
[ "MIT" ]
3
2021-10-01T07:24:42.000Z
2021-11-03T14:25:55.000Z
R-GMM-VGAE/model_citeseer.py
Fawzidev/R-GAE
80988ddf951f1723091a04b617ce4fc6d20ab9ce
[ "MIT" ]
7
2021-07-18T01:47:01.000Z
2022-01-24T21:09:10.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Authors : Nairouz Mrabah (mrabah.nairouz@courrier.uqam.ca) & Mohamed Fawzi Touati (touati.mohamed_fawzi@courrier.uqam.ca) # @Paper : Rethinking Graph Autoencoder Models for Attributed Graph Clustering # @License : MIT License import torch import numpy as np import torch.nn as nn import scipy.sparse as sp import torch.nn.functional as F from tqdm import tqdm from torch.optim import Adam from sklearn.mixture import GaussianMixture from torch.optim.lr_scheduler import StepLR from preprocessing import sparse_to_tuple from sklearn.neighbors import NearestNeighbors from sklearn import metrics from munkres import Munkres def random_uniform_init(input_dim, output_dim): init_range = np.sqrt(6.0 / (input_dim + output_dim)) initial = torch.rand(input_dim, output_dim)*2*init_range - init_range return nn.Parameter(initial) def q_mat(X, centers, alpha=1.0): X = X.detach().numpy() centers = centers.detach().numpy() if X.size == 0: q = np.array([]) else: q = 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) / alpha)) q = q ** ((alpha + 1.0) / 2.0) q = np.transpose(np.transpose(q) / np.sum(q, axis=1)) return q def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2): unconf_indices = [] conf_indices = [] q = q_mat(emb, centers_emb, alpha=1.0) confidence1 = q.max(1) confidence2 = np.zeros((q.shape[0],)) a = np.argsort(q, axis=1) for i in range(q.shape[0]): confidence1[i] = q[i,a[i,-1]] confidence2[i] = q[i,a[i,-2]] if (confidence1[i]) > beta1 and (confidence1[i] - confidence2[i]) > beta2: unconf_indices.append(i) else: conf_indices.append(i) unconf_indices = np.asarray(unconf_indices, dtype=int) conf_indices = np.asarray(conf_indices, dtype=int) return unconf_indices, conf_indices class clustering_metrics(): def __init__(self, true_label, predict_label): self.true_label = true_label self.pred_label = predict_label def clusteringAcc(self): # best mapping between true_label and predict label l1 = list(set(self.true_label)) numclass1 = len(l1) l2 = list(set(self.pred_label)) numclass2 = len(l2) if numclass1 != numclass2: print('Class Not equal, Error!!!!') return 0 cost = np.zeros((numclass1, numclass2), dtype=int) for i, c1 in enumerate(l1): mps = [i1 for i1, e1 in enumerate(self.true_label) if e1 == c1] for j, c2 in enumerate(l2): mps_d = [i1 for i1 in mps if self.pred_label[i1] == c2] cost[i][j] = len(mps_d) # match two clustering results by Munkres algorithm m = Munkres() cost = cost.__neg__().tolist() indexes = m.compute(cost) # get the match results new_predict = np.zeros(len(self.pred_label)) for i, c in enumerate(l1): # correponding label in l2: c2 = l2[indexes[i][1]] # ai is the index with label==c2 in the pred_label list ai = [ind for ind, elm in enumerate(self.pred_label) if elm == c2] new_predict[ai] = c acc = metrics.accuracy_score(self.true_label, new_predict) f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro') precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro') recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro') f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro') precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro') recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro') return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro def evaluationClusterModelFromLabel(self): nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label) acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc() print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore)) fh = open('recoder.txt', 'a') fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore) ) fh.write('\r\n') fh.flush() fh.close() return acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro class GraphConvSparse(nn.Module): def __init__(self, input_dim, output_dim, activation = F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight = random_uniform_init(input_dim, output_dim) self.activation = activation def forward(self, inputs, adj): x = inputs x = torch.mm(x,self.weight) x = torch.mm(adj, x) outputs = self.activation(x) return outputs class ReGMM_VGAE(nn.Module): def __init__(self, **kwargs): super(ReGMM_VGAE, self).__init__() self.num_neurons = kwargs['num_neurons'] self.num_features = kwargs['num_features'] self.embedding_size = kwargs['embedding_size'] self.nClusters = kwargs['nClusters'] # VGAE training parameters self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons) self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) # GMM training parameters self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def pretrain(self, adj, features, adj_label, y, weight_tensor, norm, epochs, lr, save_path, dataset): opti = Adam(self.parameters(), lr=lr) epoch_bar = tqdm(range(epochs)) gmm = GaussianMixture(n_components = self.nClusters , covariance_type = 'diag') for _ in epoch_bar: opti.zero_grad() _,_, z = self.encode(features, adj) x_ = self.decode(z) loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor) loss.backward() opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data = torch.from_numpy(gmm.weights_) self.mu_c.data = torch.from_numpy(gmm.means_) self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_)) self.logstd = self.mean def ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1): pi = self.pi mu_c = self.mu_c log_sigma2_c = self.log_sigma2_c det = 1e-2 Loss = 1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor) Loss = Loss * features.size(0) yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c = yita_c / (yita_c.sum(1).view(-1,1)) KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 = KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -= KL2 return Loss, Loss1, Loss+Loss1 def generate_centers(self, emb_unconf): y_pred = self.predict(emb_unconf) nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices = nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred] def update_graph(self, adj, labels, emb, unconf_indices, conf_indices): k = 0 y_pred = self.predict(emb) emb_unconf = emb[unconf_indices] adj = adj.tolil() idx = unconf_indices[self.generate_centers(emb_unconf)] for i, k in enumerate(unconf_indices): adj_k = adj[k].tocsr().indices if not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]]) : adj[k, idx[i]] = 1 for j in adj_k: if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]): adj[k, j] = 0 adj = adj.tocsr() adj_label = adj + sp.eye(adj.shape[0]) adj_label = sparse_to_tuple(adj_label) adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask = adj_label.to_dense().view(-1) == 1 weight_tensor = torch.ones(weight_mask.size(0)) pos_weight_orig = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() weight_tensor[weight_mask] = pos_weight_orig return adj, adj_label, weight_tensor def train(self, adj_norm, adj, features, y, norm, epochs, lr, beta1, beta2, save_path, dataset): self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk')) opti = Adam(self.parameters(), lr=lr, weight_decay = 0.089) lr_s = StepLR(opti, step_size=10, gamma=0.9) import os, csv epoch_bar = tqdm(range(epochs)) previous_unconflicted = [] previous_conflicted = [] epoch_stable = 0 for epoch in epoch_bar: opti.zero_grad() z_mu, z_sigma2_log, emb = self.encode(features, adj_norm) x_ = self.decode(emb) unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2) if epoch == 0: adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) if len(previous_unconflicted) < len(unconflicted_ind) : z_mu = z_mu[unconflicted_ind] z_sigma2_log = z_sigma2_log[unconflicted_ind] emb_unconf = emb[unconflicted_ind] emb_conf = emb[conflicted_ind] previous_conflicted = conflicted_ind previous_unconflicted = unconflicted_ind else : epoch_stable += 1 z_mu = z_mu[previous_unconflicted] z_sigma2_log = z_sigma2_log[previous_unconflicted] emb_unconf = emb[previous_unconflicted] emb_conf = emb[previous_conflicted] if epoch_stable >= 15: epoch_stable = 0 beta1 = beta1 * 0.96 beta2 = beta2 * 0.98 if epoch % 50 == 0 and epoch <= 200 : adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred = self.predict(emb) cm = clustering_metrics(y, y_pred) acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step() lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for c in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2): c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c def predict(self, z): pi = self.pi log_sigma2_c = self.log_sigma2_c mu_c = self.mu_c det = 1e-2 yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita = yita_c.detach().numpy() return np.argmax(yita, axis=1) def encode(self, x_features, adj): hidden = self.base_gcn(x_features, adj) self.mean = self.gcn_mean(hidden, adj) self.logstd = self.gcn_logstddev(hidden, adj) gaussian_noise = torch.randn(x_features.size(0), self.embedding_size) sampled_z = gaussian_noise * torch.exp(self.logstd) + self.mean return self.mean, self.logstd ,sampled_z @staticmethod def decode(z): A_pred = torch.sigmoid(torch.matmul(z,z.t())) return A_pred
46.699659
259
0.625448
1,892
13,683
4.29334
0.17019
0.015758
0.020805
0.019943
0.320694
0.269112
0.242275
0.198449
0.194756
0.151668
0
0.021224
0.25623
13,683
293
260
46.699659
0.776948
0.037857
0
0.094262
0
0.008197
0.032395
0
0
0
0
0
0
1
0.077869
false
0
0.057377
0
0.209016
0.008197
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16796b947c516147ed6529d69a08e17bbd4afe73
3,005
py
Python
odoo-13.0/addons/stock_account/models/account_chart_template.py
VaibhavBhujade/Blockchain-ERP-interoperability
b5190a037fb6615386f7cbad024d51b0abd4ba03
[ "MIT" ]
null
null
null
odoo-13.0/addons/stock_account/models/account_chart_template.py
VaibhavBhujade/Blockchain-ERP-interoperability
b5190a037fb6615386f7cbad024d51b0abd4ba03
[ "MIT" ]
null
null
null
odoo-13.0/addons/stock_account/models/account_chart_template.py
VaibhavBhujade/Blockchain-ERP-interoperability
b5190a037fb6615386f7cbad024d51b0abd4ba03
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, models, _ import logging _logger = logging.getLogger(__name__) class AccountChartTemplate(models.Model): _inherit = "account.chart.template" @api.model def generate_journals(self, acc_template_ref, company, journals_dict=None): journal_to_add = [{'name': _('Inventory Valuation'), 'type': 'general', 'code': 'STJ', 'favorite': False, 'sequence': 8}] return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add) def generate_properties(self, acc_template_ref, company, property_list=None): res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj = self.env['ir.property'] # Property Stock Journal value = self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=', 'STJ'), ('type', '=', 'general')], limit=1) if value: field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=', 'product.category'), ('relation', '=', 'account.journal')], limit=1) vals = { 'name': 'property_stock_journal', 'company_id': company.id, 'fields_id': field.id, 'value': 'account.journal,%s' % value.id, } properties = PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=', company.id)]) if properties: # the property exist: modify it properties.write(vals) else: # create the property PropertyObj.create(vals) todo_list = [ # Property Stock Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ] for record in todo_list: account = getattr(self, record) value = account and 'account.account,' + str(acc_template_ref[account.id]) or False if value: field = self.env['ir.model.fields'].search([('name', '=', record), ('model', '=', 'product.category'), ('relation', '=', 'account.account')], limit=1) vals = { 'name': record, 'company_id': company.id, 'fields_id': field.id, 'value': value, } properties = PropertyObj.search([('name', '=', record), ('company_id', '=', company.id)], limit=1) if not properties: # create the property PropertyObj.create(vals) elif not properties.value_reference: # update the property if False properties.write(vals) return res
47.698413
180
0.577704
308
3,005
5.431818
0.308442
0.053796
0.058577
0.053796
0.340705
0.253437
0.185296
0.185296
0.095637
0.050209
0
0.002791
0.284526
3,005
62
181
48.467742
0.775349
0.079867
0
0.25
0
0
0.20537
0.071843
0
0
0
0
0
1
0.041667
false
0
0.041667
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
167a0dd80799c1a419238ba6164d01472b85e5d4
6,094
py
Python
lib/roi_data/loader.py
BarneyQiao/pcl.pytorch
4e0280e5e1470f705e620eda26f881d627c5016c
[ "MIT" ]
233
2019-05-10T07:17:42.000Z
2022-03-30T09:24:16.000Z
lib/roi_data/loader.py
Michael-Steven/Crack_Image_WSOD
4e8591a7c0768cee9eb7240bb9debd54824f5b33
[ "MIT" ]
78
2019-05-10T21:10:47.000Z
2022-03-29T13:57:32.000Z
lib/roi_data/loader.py
Michael-Steven/Crack_Image_WSOD
4e8591a7c0768cee9eb7240bb9debd54824f5b33
[ "MIT" ]
57
2019-05-10T07:17:37.000Z
2022-03-24T04:43:24.000Z
import math import numpy as np import numpy.random as npr import torch import torch.utils.data as data import torch.utils.data.sampler as torch_sampler from torch.utils.data.dataloader import default_collate from torch._six import int_classes as _int_classes from core.config import cfg from roi_data.minibatch import get_minibatch import utils.blob as blob_utils # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes class RoiDataLoader(data.Dataset): def __init__(self, roidb, num_classes, training=True): self._roidb = roidb self._num_classes = num_classes self.training = training self.DATA_SIZE = len(self._roidb) def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db, self._num_classes) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim # for key in blobs: # if key != 'roidb': # blobs[key] = blobs[key].squeeze(axis=0) blobs['data'] = blobs['data'].squeeze(axis=0) return blobs def __len__(self): return self.DATA_SIZE def cal_minibatch_ratio(ratio_list): """Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU. Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob` and 2) cfg.TRAIN.SCALES containing SINGLE scale. Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can pad and batch images base on that. """ DATA_SIZE = len(ratio_list) ratio_list_minibatch = np.empty((DATA_SIZE,)) num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers for i in range(num_minibatch): left_idx = i * cfg.TRAIN.IMS_PER_BATCH right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1) if ratio_list[right_idx] < 1: # for ratio < 1, we preserve the leftmost in each batch. target_ratio = ratio_list[left_idx] elif ratio_list[left_idx] > 1: # for ratio > 1, we preserve the rightmost in each batch. target_ratio = ratio_list[right_idx] else: # for ratio cross 1, we make it to be 1. target_ratio = 1 ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio return ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler): def __init__(self, ratio_list, ratio_index): self.ratio_list = ratio_list self.ratio_index = ratio_index self.num_data = len(ratio_list) def __iter__(self): rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def __len__(self): return self.num_data class BatchSampler(torch_sampler.BatchSampler): r"""Wraps another sampler to yield a mini-batch of indices. Args: sampler (Sampler): Base sampler. batch_size (int): Size of mini-batch. drop_last (bool): If ``True``, the sampler will drop the last batch if its size would be less than ``batch_size`` Example: >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) [[0, 1, 2], [3, 4, 5], [6, 7, 8]] """ def __init__(self, sampler, batch_size, drop_last): if not isinstance(sampler, torch_sampler.Sampler): raise ValueError("sampler should be an instance of " "torch.utils.data.Sampler, but got sampler={}" .format(sampler)) if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \ batch_size <= 0: raise ValueError("batch_size should be a positive integeral value, " "but got batch_size={}".format(batch_size)) if not isinstance(drop_last, bool): raise ValueError("drop_last should be a boolean value, but got " "drop_last={}".format(drop_last)) self.sampler = sampler self.batch_size = batch_size self.drop_last = drop_last def __iter__(self): batch = [] for idx in self.sampler: batch.append(idx) # Difference: batch.append(int(idx)) if len(batch) == self.batch_size: yield batch batch = [] if len(batch) > 0 and not self.drop_last: yield batch def __len__(self): if self.drop_last: return len(self.sampler) // self.batch_size else: return (len(self.sampler) + self.batch_size - 1) // self.batch_size def collate_minibatch(list_of_blobs): """Stack samples seperately and return a list of minibatches A batch contains NUM_GPUS minibatches and image size in different minibatch may be different. Hence, we need to stack smaples from each minibatch seperately. """ Batch = {key: [] for key in list_of_blobs[0]} # Because roidb consists of entries of variable length, it can't be batch into a tensor. # So we keep roidb in the type of "list of ndarray". lists = [] for blobs in list_of_blobs: lists.append({'data' : blobs.pop('data'), 'rois' : blobs.pop('rois'), 'labels' : blobs.pop('labels')}) for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)] minibatch = default_collate(mini_list) for key in minibatch: Batch[key].append(minibatch[key]) return Batch
38.56962
97
0.639317
848
6,094
4.375
0.248821
0.048518
0.018868
0.018868
0.14097
0.086253
0.075472
0.04097
0.026954
0
0
0.010293
0.266656
6,094
157
98
38.815287
0.81987
0.282245
0
0.113402
0
0
0.056272
0.005862
0
0
0
0.006369
0
1
0.113402
false
0
0.113402
0.020619
0.340206
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
167a8c5cf5187907cc0dbc578ad93057948ece69
28,272
py
Python
venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py
andywu113/fuhe_predict
7fd816ae83467aa659d420545cd3e25a5e933d5f
[ "MIT" ]
3
2019-06-05T12:11:20.000Z
2022-01-17T13:53:06.000Z
venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py
kevinten10/Clothing-Classification
9aac6e339651137179f4e4da36fe7743cf4bdca4
[ "MIT" ]
3
2021-06-08T20:58:27.000Z
2022-03-12T00:16:49.000Z
venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py
kevinten10/Clothing-Classification
9aac6e339651137179f4e4da36fe7743cf4bdca4
[ "MIT" ]
1
2019-02-11T22:36:12.000Z
2019-02-11T22:36:12.000Z
import warnings from distutils.version import LooseVersion import numpy as np import pytest from scipy import linalg from sklearn.model_selection import train_test_split from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.utils.testing import TempMemmap from sklearn.exceptions import ConvergenceWarning from sklearn import linear_model, datasets from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC # TODO: use another dataset that has multiple drops diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) n_samples = y.size def test_simple(): # Principle of Lars is to keep covariances tied and decreasing # also test verbose output from io import StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() _, _, coef_path_ = linear_model.lars_path( X, y, method='lar', verbose=10) sys.stdout = old_stdout for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert ocur == i + 1 else: # no more than max_pred variables can go into the active set assert ocur == X.shape[1] finally: sys.stdout = old_stdout def test_simple_precomputed(): # The same, with precomputed Gram matrix _, _, coef_path_ = linear_model.lars_path( X, y, Gram=G, method='lar') for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert ocur == i + 1 else: # no more than max_pred variables can go into the active set assert ocur == X.shape[1] def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2)) for o1, o2 in zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X, y, Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test that lars_path with no X and Gram raises exception Xy = np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, Xy=Xy) def test_all_precomputed(): # Test that lars_path with precomputed Gram and Xy gives the right answer G = np.dot(X.T, X) Xy = np.dot(X.T, y) for method in 'lar', 'lasso': output = linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for expected, got in zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will change') # numpy deprecation def test_lars_lstsq(): # Test that Lars gives least square solution at the end # of the path X1 = 3 * X # use un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid FutureWarning about default value change when numpy >= 1.14 rcond = None if LooseVersion(np.__version__) >= '1.14' else -1 coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change') # numpy deprecation def test_lasso_gives_lstsq_solution(): # Test that Lars Lasso gives least square solution at the end # of the path _, _, coef_path_ = linear_model.lars_path(X, y, method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): # Check that lars_path is robust to collinearity in input X = np.array([[3., 3., 1.], [2., 2., 0.], [1., 1., 0]]) y = np.array([1., 0., 0]) rng = np.random.RandomState(0) f = ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1]) - y assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded n_samples = 10 X = rng.rand(n_samples, 5) y = np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test that the ``return_path=False`` option returns the correct output alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar') alpha_, _, coef = linear_model.lars_path( X, y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_precomputed(): # Test that the ``return_path=False`` option with Gram remains correct alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar', Gram=G) alpha_, _, coef = linear_model.lars_path( X, y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_all_precomputed(): # Test that the ``return_path=False`` option with Gram and Xy remains # correct X, y = 3 * diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _, coef = linear_model.lars_path( X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check for different values of precompute G = np.dot(X.T, X) clf = classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_ for precompute in [True, False, 'auto', None]: clf = classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix(): # Test when input is a singular matrix X1 = np.array([[1, 1.], [1., 1.]]) y1 = np.array([1, 1]) _, _, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def test_rank_deficient_design(): # consistency test that checks that LARS Lasso is handling rank # deficient input data (with n_features < rank) in the same way # as coordinate descent Lasso y = [5, 0, 5] for X in ( [[5, 0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0], [0, 0, 1]] ): # To be able to use the coefs to compute the objective function, # we need to turn off normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars = (1. / (2. * 3.) * linalg.norm(y - np.dot(X, coef_lars_)) ** 2 + .1 * linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2 + .1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd * (1. + 1e-8)) def test_lasso_lars_vs_lasso_cd(): # Test that LassoLars and Lasso using coordinate descent give the # same results. X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # similar test, with the classifiers for alpha in np.linspace(1e-2, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # same test, with normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars and Lasso using coordinate descent give the # same results when early stopping is used. # (test : before, in the middle, and in the last part of the path) alphas_min = [10, 0.9, 1e-4] for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) # same test, with normalization for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length(): # Test that the path length of the LassoLars is right lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the sequence of alphas is always decreasing assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on a very ill-conditioned design, and check that # it does not blow up, and stays somewhat close to a solution given # by the coordinate descent solver # Also test that lasso_path (using lars_path output style) gives # the same result as lars_path and previous lasso output style # under these conditions. rng = np.random.RandomState(42) # Generate data n, m = 70, 100 k = 5 X = rng.randn(n, m) w = np.zeros((m, 1)) i = np.arange(0, m) rng.shuffle(i) supp = i[:k] w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1) y = np.dot(X, w) sigma = 0.2 y += sigma * rng.rand(*y.shape) y = y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation in which the LARS has to go # far in the path to converge, and check that LARS and coordinate # descent give the same answers # Note it used to be the case that Lars had to use the drop for good # strategy for this but this is no longer the case with the # equality_tolerance checks X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]] y = [10, 10, 1] alpha = .0001 def objective_function(coef): return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2 + alpha * linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1. + 1e-8)) def test_lars_add_features(): # assure that at least some features get added if necessary # test for 6d2b4c # Hilbert matrix n = 5 H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path should be of length 6 + 1 in a Lars going down to 6 # non-zero coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings def test_multitarget(): # Assure that estimators receiving multidimensional y do the right thing Y = np.vstack([y, y ** 2]).T n_targets = Y.shape[1] estimators = [ linear_model.LassoLars(), linear_model.Lars(), # regression test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator in estimators: estimator.fit(X, Y) Y_pred = estimator.predict(X) alphas, active, coef, path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k in range(n_targets): estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def test_lars_cv(): # Test the LassoLarsCV object by checking that the optimal alpha # increases as the number of samples increases. # This property is not actually guaranteed in general and is just a # property of the given dataset, with the given steps chosen. old_alpha = 0 lars_cv = linear_model.LassoLarsCV() for length in (400, 200, 100): X = diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as w: rng = np.random.RandomState(42) x = rng.randn(len(y)) X = diabetes.data X = np.c_[X, x, x] # add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert len(w) == 0 def test_lasso_lars_ic(): # Test the LassoLarsIC object by checking that # - some good features are selected. # - alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def test_lars_path_readonly_data(): # When using automated memory mapping on large input, the # fold data is in read-only mode # This is a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): # The following should not fail despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22 def test_lars_path_positive_constraint(): # this is the main test for the positive parameter on the lars_path method # the estimator classes just make use of this function # we do the test on the diabetes dataset # ensure that we get negative coefficients when positive=False # and all positive when positive=True # for method 'lar' (default) and lasso # Once deprecation of LAR + positive option is done use these: # assert_raises(ValueError, linear_model.lars_path, diabetes['data'], # diabetes['target'], method='lar', positive=True) with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar', positive=True) method = 'lasso' _, _, coefs = \ linear_model.lars_path(X, y, return_path=True, method=method, positive=False) assert coefs.min() < 0 _, _, coefs = \ linear_model.lars_path(X, y, return_path=True, method=method, positive=True) assert coefs.min() >= 0 # now we gonna test the positive option for all estimator classes default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def test_estimatorclasses_positive_constraint(): # testing the transmissibility for the positive option of all estimator # classes in this same function here default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} for estname in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y) assert estimator.coef_.min() < 0 estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y) assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars and Lasso using coordinate descent give the # same results when using the positive option # This test is basically a copy of the above with additional positive # option. However for the middle part, the comparison of coefficient values # for a range of alphas, we had to make an adaptations. See below. # not normalized data X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # The range of alphas chosen for coefficient comparison here is restricted # as compared with the above test without the positive option. This is due # to the circumstance that the Lars-Lasso algorithm does not converge to # the least-squares-solution for small alphas, see 'Least Angle Regression' # by Efron et al 2004. The coefficients are typically in congruence up to # the smallest alpha reached by the Lars-Lasso algorithm and start to # diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation(): # Test that sklearn LassoLars implementation agrees with the LassoLars # implementation available in R (lars library) under the following # scenarios: # 1) fit_intercept=False and normalize=False # 2) fit_intercept=True and normalize=True # Let's generate the data used in the bug report 7778 y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) x = np.array([[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0], [0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X = x.T ########################################################################### # Scenario 1: Let's compare R vs sklearn when fit_intercept=False and # normalize=False ########################################################################### # # The R result was obtained using the following code: # # library(lars) # model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE, # trace=TRUE, normalize=FALSE) # r = t(model_lasso_lars$beta) # r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ########################################################################### # Scenario 2: Let's compare R vs sklearn when fit_intercept=True and # normalize=True # # Note: When normalize is equal to True, R returns the coefficients in # their original units, that is, they are rescaled back, whereas sklearn # does not do that, therefore, we need to do this step before comparing # their results. ########################################################################### # # The R result was obtained using the following code: # # library(lars) # model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE, # trace=TRUE, normalize=TRUE) # r2 = t(model_lasso_lars2$beta) r2 = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026], [0, 0, 0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_ # Let's rescale back the coefficients returned by sklearn before comparing # against the R result (read the note above) temp = X - np.mean(X, axis=0) normx = np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X): """ Test that user input regarding copy_X is not being overridden (it was until at least version 0.21) """ lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y) assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): """ Test that user input to .fit for copy_X overrides default __init__ value """ lasso_lars = LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X) assert copy_X == np.array_equal(X, X_copy)
38.360923
79
0.616051
3,889
28,272
4.288506
0.150424
0.04353
0.02968
0.031898
0.429488
0.374745
0.319223
0.277791
0.257945
0.242895
0
0.060671
0.262521
28,272
736
80
38.413043
0.739233
0.222482
0
0.311301
0
0
0.023927
0.000983
0
0
0
0.001359
0.142857
1
0.072495
false
0
0.042644
0.002132
0.117271
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
167b4e3bb5a00625d3f0b289e41e2bc170fabc61
3,128
py
Python
parser.py
FeroxTL/pynginxconfig-new
71cb78c635930b0a764d3274646d436e8d2f1c4d
[ "MIT" ]
8
2016-03-25T04:22:39.000Z
2022-02-12T21:46:47.000Z
parser.py
Winnerer/pynginxconfig
71cb78c635930b0a764d3274646d436e8d2f1c4d
[ "MIT" ]
null
null
null
parser.py
Winnerer/pynginxconfig
71cb78c635930b0a764d3274646d436e8d2f1c4d
[ "MIT" ]
3
2019-01-26T15:54:54.000Z
2022-02-12T21:46:47.000Z
#coding: utf8 import copy import re from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location def parse(s, parent_block): config = copy.copy(s) pos, brackets_level, param_start = 0, 0, 0 while pos < len(config): if config[pos] == '#' and brackets_level == 0: re_sharp_comment = re.search('(?P<offset>[\s\n]*)#(?P<comment>.*)$', config, re.M) sharp_comment = re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config = config[re_sharp_comment.end():] pos, param_start = 0, 0 continue if config[pos] == ';' and brackets_level == 0: re_option = re.search('\s*(?P<param_name>\w+)\s*(?P<param_options>.*?);', config[param_start:], re.S) if not re_option: raise Exception('Wrong option') option = re_option.groupdict() parent_block[option['param_name']] = KeyValueOption(re.sub('[ \n]+', ' ', option['param_options'])) config = config[re_option.end():] pos, param_start = 0, 0 continue if config[pos] == '{': brackets_level += 1 elif config[pos] == '}': brackets_level -= 1 if brackets_level == 0 and param_start is not None: re_block = re.search( '(?P<param_name>\w+)\s*(?P<param_options>.*)\s*{(\n){0,1}(?P<block>(.|\n)*)}', config[param_start:pos + 1], ) block = re_block.groupdict() if block['param_name'].lower() == 'location': new_block = Location(block['param_options']) parent_block.add_location(new_block) else: new_block = Block() parent_block[block['param_name']] = new_block if block['block']: parse(block['block'], new_block) config = config[re_block.end():] pos, param_start = 0, 0 continue pos += 1 if brackets_level != 0: raise Exception('Not closed bracket') qwe = EmptyBlock() parse("""#{ asd #qweqeqwe{} servername qweqweqweqweqwe; # comment {lalalal} #1 server { listen 8080 tls; root /data/up1; location / { l200; } location /qwe{ s 500; }#123 }#qweqwe""", qwe) print(qwe.render()) qwe = EmptyBlock() parse(""" servername wqeqweqwe; http { ## # Basic Settings ## sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; # server_tokens off; # server_names_hash_bucket_size 64; # server_name_in_redirect off; include /etc/nginx/mime.types; default_type application/octet-stream; ## # Logging Settings ## access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; ## # Gzip Settings ## gzip on; gzip_disable "msie6"; }#123123 """, qwe) print(qwe.render())
24.825397
113
0.545716
359
3,128
4.562674
0.32312
0.055556
0.026862
0.029304
0.164225
0.12149
0.12149
0.105617
0.045177
0.045177
0
0.023397
0.316816
3,128
125
114
25.024
0.743098
0.003836
0
0.173913
0
0.01087
0.344783
0.099197
0
0
0
0
0
1
0.01087
false
0
0.032609
0
0.043478
0.021739
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
167df72d7c85276ff20ea4552c3c38a522dba306
7,024
py
Python
optimizer.py
thanusha22/CEC-1
02ad9247b006a348cc871a5714cf5abfa4a516af
[ "MIT" ]
null
null
null
optimizer.py
thanusha22/CEC-1
02ad9247b006a348cc871a5714cf5abfa4a516af
[ "MIT" ]
null
null
null
optimizer.py
thanusha22/CEC-1
02ad9247b006a348cc871a5714cf5abfa4a516af
[ "MIT" ]
null
null
null
from pathlib import Path import optimizers.PSO as pso import optimizers.MVO as mvo import optimizers.GWO as gwo import optimizers.MFO as mfo import optimizers.CS as cs import optimizers.BAT as bat import optimizers.WOA as woa import optimizers.FFA as ffa import optimizers.SSA as ssa import optimizers.GA as ga import optimizers.HHO as hho import optimizers.SCA as sca import optimizers.JAYA as jaya import optimizers.HYBRID as hybrid import benchmarks import csv import numpy import time import warnings import os import plot_convergence as conv_plot import plot_boxplot as box_plot warnings.simplefilter(action="ignore") def selector(algo, func_details, popSize, Iter): function_name = func_details[0] lb = func_details[1] ub = func_details[2] dim = func_details[3] if algo == "SSA": x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "PSO": x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "GA": x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "BAT": x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "FFA": x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "GWO": x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "WOA": x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "MVO": x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "MFO": x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "CS": x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "HHO": x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "SCA": x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "JAYA": x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "HYBRID": x = hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) else: return null return x def run(optimizer, objectivefunc, NumOfRuns, params, export_flags): """ It serves as the main interface of the framework for running the experiments. Parameters ---------- optimizer : list The list of optimizers names objectivefunc : list The list of benchmark functions NumOfRuns : int The number of independent runs params : set The set of parameters which are: 1. Size of population (PopulationSize) 2. The number of iterations (Iterations) export_flags : set The set of Boolean flags which are: 1. Export (Exporting the results in a file) 2. Export_details (Exporting the detailed results in files) 3. Export_convergence (Exporting the covergence plots) 4. Export_boxplot (Exporting the box plots) Returns ----------- N/A """ # Select general parameters for all optimizers (population size, number of iterations) .... PopulationSize = params["PopulationSize"] Iterations = params["Iterations"] # Export results ? Export = export_flags["Export_avg"] Export_details = export_flags["Export_details"] Export_convergence = export_flags["Export_convergence"] Export_boxplot = export_flags["Export_boxplot"] Flag = False Flag_details = False # CSV Header for for the cinvergence CnvgHeader = [] results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + "/" Path(results_directory).mkdir(parents=True, exist_ok=True) for l in range(0, Iterations): CnvgHeader.append("Iter" + str(l + 1)) for i in range(0, len(optimizer)): for j in range(0, len(objectivefunc)): convergence = [0] * NumOfRuns executionTime = [0] * NumOfRuns for k in range(0, NumOfRuns): func_details = benchmarks.getFunctionDetails(objectivefunc[j]) x = selector(optimizer[i], func_details, PopulationSize, Iterations) convergence[k] = x.convergence optimizerName = x.optimizer objfname = x.objfname if Export_details == True: ExportToFile = results_directory + "experiment_details.csv" with open(ExportToFile, "a", newline="\n") as out: writer = csv.writer(out, delimiter=",") if ( Flag_details == False ): # just one time to write the header of the CSV file header = numpy.concatenate( [["Optimizer", "objfname", "ExecutionTime"], CnvgHeader] ) writer.writerow(header) Flag_details = True # at least one experiment executionTime[k] = x.executionTime a = numpy.concatenate( [[x.optimizer, x.objfname, x.executionTime], x.convergence] ) writer.writerow(a) out.close() if Export == True: ExportToFile = results_directory + "experiment.csv" with open(ExportToFile, "a", newline="\n") as out: writer = csv.writer(out, delimiter=",") if ( Flag == False ): # just one time to write the header of the CSV file header = numpy.concatenate( [["Optimizer", "objfname", "ExecutionTime"], CnvgHeader] ) writer.writerow(header) Flag = True avgExecutionTime = float("%0.2f" % (sum(executionTime) / NumOfRuns)) avgConvergence = numpy.around( numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2 ).tolist() a = numpy.concatenate( [[optimizerName, objfname, avgExecutionTime], avgConvergence] ) writer.writerow(a) out.close() if Export_convergence == True: conv_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Export_boxplot == True: box_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Flag == False: # Faild to run at least one experiment print( "No Optomizer or Cost function is selected. Check lists of available optimizers and cost functions" ) print("Execution completed")
38.173913
111
0.58955
790
7,024
5.168354
0.226582
0.040411
0.085721
0.099437
0.348273
0.3277
0.3277
0.312515
0.284595
0.273084
0
0.004765
0.312785
7,024
183
112
38.382514
0.841102
0.142939
0
0.151515
0
0
0.063758
0.003721
0
0
0
0
0
1
0.015152
false
0
0.174242
0
0.204545
0.015152
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
167e133f17b315eee99f736bb553b46a271cd9cc
1,614
py
Python
tests/fields/test_primitive_types.py
slawak/dataclasses-avroschema
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
[ "MIT" ]
null
null
null
tests/fields/test_primitive_types.py
slawak/dataclasses-avroschema
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
[ "MIT" ]
null
null
null
tests/fields/test_primitive_types.py
slawak/dataclasses-avroschema
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
[ "MIT" ]
null
null
null
import dataclasses import pytest from dataclasses_avroschema import fields from . import consts @pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type): name = "a_field" field = fields.Field(name, primitive_type, dataclasses.MISSING) avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {"name": name, "type": avro_type} == field.to_dict() @pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type): name = "a_field" field = fields.Field(name, primitive_type, None) avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {"name": name, "type": avro_type, "default": fields.NULL} == field.to_dict() @pytest.mark.parametrize("primitive_type,default", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type, default): name = "a_field" field = fields.Field(name, primitive_type, default) avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert {"name": name, "type": avro_type, "default": default} == field.to_dict() @pytest.mark.parametrize( "primitive_type,invalid_default", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def test_invalid_default_values(primitive_type, invalid_default): name = "a_field" field = fields.Field(name, primitive_type, invalid_default) msg = f"Invalid default type. Default should be {primitive_type}" with pytest.raises(AssertionError, match=msg): field.to_dict()
34.340426
87
0.76456
213
1,614
5.464789
0.187793
0.178694
0.072165
0.103093
0.688144
0.636598
0.636598
0.556701
0.469931
0.426117
0
0
0.126394
1,614
46
88
35.086957
0.825532
0
0
0.193548
0
0
0.125155
0.032218
0
0
0
0
0.129032
1
0.129032
false
0
0.129032
0
0.258065
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
167f92f56a42d5741ea4dde46075bf065ebbe3cd
11,512
py
Python
Bindings/Python/examples/Moco/examplePredictAndTrack.py
mcx/opensim-core
c109f8cec3a81c732f335cd39752da6ae573b604
[ "Apache-2.0" ]
532
2015-03-13T18:51:10.000Z
2022-03-27T08:08:29.000Z
Bindings/Python/examples/Moco/examplePredictAndTrack.py
mcx/opensim-core
c109f8cec3a81c732f335cd39752da6ae573b604
[ "Apache-2.0" ]
2,701
2015-01-03T21:33:34.000Z
2022-03-30T07:13:41.000Z
Bindings/Python/examples/Moco/examplePredictAndTrack.py
mcx/opensim-core
c109f8cec3a81c732f335cd39752da6ae573b604
[ "Apache-2.0" ]
271
2015-02-16T23:25:29.000Z
2022-03-30T20:12:17.000Z
# -------------------------------------------------------------------------- # # OpenSim Moco: examplePredictAndTrack.py # # -------------------------------------------------------------------------- # # Copyright (c) 2018 Stanford University and the Authors # # # # Author(s): Christopher Dembia # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain a # # copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # -------------------------------------------------------------------------- # import os import math import opensim as osim """ This file performs the following problems using a double pendulum model: 1. predict an optimal trajectory (and controls), 2. track the states from the optimal trajectory, and 3. track the marker trajectories from the optimal trajectory. """ visualize = True # The following environment variable is set during automated testing. if os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize = False # Create a model of a double pendulum. # ------------------------------------ def createDoublePendulumModel(): model = osim.Model() model.setName("double_pendulum") # Create two links, each with a mass of 1 kg, center of mass at the body's # origin, and moments and products of inertia of zero. b0 = osim.Body("b0", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b0) b1 = osim.Body("b1", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b1) # Add markers to body origin locations. m0 = osim.Marker("m0", b0, osim.Vec3(0)) m1 = osim.Marker("m1", b1, osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1) # Connect the bodies with pin joints. Assume each body is 1 m long. j0 = osim.PinJoint("j0", model.getGround(), osim.Vec3(0), osim.Vec3(0), b0, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q0 = j0.updCoordinate() q0.setName("q0") j1 = osim.PinJoint("j1", b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q1 = j1.updCoordinate() q1.setName("q1") model.addJoint(j0) model.addJoint(j1) tau0 = osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName("tau0") tau0.setOptimalForce(1) model.addComponent(tau0) tau1 = osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName("tau1") tau1.setOptimalForce(1) model.addComponent(tau1) # Add display geometry. bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1) transform = osim.Transform(osim.Vec3(-0.5, 0, 0)) b0Center = osim.PhysicalOffsetFrame("b0_center", b0, transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center = osim.PhysicalOffsetFrame("b1_center", b1, transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML("double_pendulum.osim") return model def solvePrediction(): # Predict the optimal trajectory for a minimum time swing-up. # In the diagram below, + represents the origin, and ---o represents a link # in the double pendulum. # # o # | # o # | # +---o---o + # # iniital pose final pose # study = osim.MocoStudy() study.setName("double_pendulum_predict") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. problem.setTimeBounds(0, [0, 5]) # Arguments are name, [lower bound, upper bound], # initial [lower bound, upper bound], # final [lower bound, upper bound]. problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0) problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0, 0) problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0) problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0, 0) problem.setControlInfo("/tau0", [-100, 100]) problem.setControlInfo("/tau1", [-100, 100]) # Cost: minimize final time and error from desired # end effector position. ftCost = osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost = osim.MocoMarkerFinalGoal() finalCost.setName("final") finalCost.setWeight(1000.0) finalCost.setPointName("/markerset/m1") finalCost.setReferenceLocation(osim.Vec3(0, 2, 0)) problem.addGoal(finalCost) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver("ipopt") guess = solver.createGuess() guess.setNumTimes(2) guess.setTime([0, 1]) guess.setState("/jointset/j0/q0/value", [0, -math.pi]) guess.setState("/jointset/j1/q1/value", [0, 2*math.pi]) guess.setState("/jointset/j0/q0/speed", [0, 0]) guess.setState("/jointset/j1/q1/speed", [0, 0]) guess.setControl("/tau0", [0, 0]) guess.setControl("/tau1", [0, 0]) guess.resampleWithNumTimes(10) solver.setGuess(guess) # Save the problem to a setup file for reference. study.printToXML("examplePredictAndTrack_predict.omoco") # Solve the problem. solution = study.solve() solution.write("examplePredictAndTrack_predict_solution.sto") if visualize: study.visualize(solution) return solution def computeMarkersReference(predictedSolution): model = createDoublePendulumModel() model.initSystem() states = predictedSolution.exportToStatesTable() statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories = osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels(["/markerset/m0", "/markerset/m1"]) for state in statesTraj: model.realizePosition(state) m0 = model.getComponent("markerset/m0") m1 = model.getComponent("markerset/m1") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) # Assign a weight to each marker. markerWeights = osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m0", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m1", 5)) return osim.MarkersReference(markerTrajectories, markerWeights) def solveStateTracking(stateRef): # Predict the optimal trajectory for a minimum time swing-up. study = osim.MocoStudy() study.setName("double_pendulum_track") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments are name, [lower bound, upper bound], # initial [lower bound, upper bound], # final [lower bound, upper bound]. finalTime = stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0) problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0) problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0) problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0) problem.setControlInfo("/tau0", [-150, 150]) problem.setControlInfo("/tau1", [-150, 150]) # Cost: track provided state data. stateTracking = osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort = osim.MocoControlGoal() effort.setName("effort") effort.setWeight(0.001) # TODO problem.addGoal(effort) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver("ipopt") solver.set_optim_jacobian_approximation("exact") solver.set_optim_hessian_approximation("exact") solver.set_exact_hessian_block_sparsity_mode("dense") # Save the problem to a setup file for reference. study.printToXML("examplePredictAndTrack_track_states.omoco") # Solve the problem. solution = study.solve() solution.write("examplePredictAndTrack_track_states_solution.sto") if visualize: study.visualize(solution) return solution def solveMarkerTracking(markersRef, guess): # Predict the optimal trajectory for a minimum time swing-up. study = osim.MocoStudy() study.setName("double_pendulum_track") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments are name, [lower bound, upper bound], # initial [lower bound, upper bound], # final [lower bound, upper bound]. finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0) problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0) problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0) problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0) problem.setControlInfo("/tau0", [-100, 100]) problem.setControlInfo("/tau1", [-100, 100]) # Cost: track provided marker data. markerTracking = osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort = osim.MocoControlGoal() effort.setName("effort") effort.setWeight(0.0001) # problem.addGoal(effort) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver("ipopt") solver.set_optim_jacobian_approximation("exact") solver.set_optim_hessian_approximation("exact") solver.set_exact_hessian_block_sparsity_mode("dense") solver.setGuess(guess) # Save the problem to a setup file for reference. study.printToXML("examplePredictAndTrack_track_markers.omoco") # Solve the problem. solution = study.solve() solution.write("examplePredictAndTrack_track_markers_solution.sto") if visualize: study.visualize(solution) return solution optimalTrajectory = solvePrediction() markersRef = computeMarkersReference(optimalTrajectory) trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable()) trackedSolution2 = solveMarkerTracking(markersRef, trackedSolution)
34.160237
79
0.633687
1,220
11,512
5.92623
0.24918
0.018672
0.014938
0.024896
0.465007
0.435131
0.430152
0.424066
0.418534
0.36722
0
0.036702
0.230803
11,512
336
80
34.261905
0.779785
0.278145
0
0.350282
0
0
0.119668
0.085819
0
0
0
0.002976
0
1
0.028249
false
0
0.016949
0
0.073446
0.022599
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1680b6fe6e7e3043a7d70ac1ab9bfc138b53e7ea
5,255
py
Python
pymapd/_parsers.py
mflaxman10/pymapd
00b72ae399a0ff829507ee0b3a2b7404f3a06c26
[ "Apache-2.0" ]
null
null
null
pymapd/_parsers.py
mflaxman10/pymapd
00b72ae399a0ff829507ee0b3a2b7404f3a06c26
[ "Apache-2.0" ]
null
null
null
pymapd/_parsers.py
mflaxman10/pymapd
00b72ae399a0ff829507ee0b3a2b7404f3a06c26
[ "Apache-2.0" ]
null
null
null
""" Utility methods for parsing data returned from MapD """ import datetime from collections import namedtuple from sqlalchemy import text import mapd.ttypes as T from ._utils import seconds_to_time Description = namedtuple("Description", ["name", "type_code", "display_size", "internal_size", "precision", "scale", "null_ok"]) ColumnDetails = namedtuple("ColumnDetails", ["name", "type", "nullable", "precision", "scale", "comp_param"]) _typeattr = { 'SMALLINT': 'int', 'INT': 'int', 'BIGINT': 'int', 'TIME': 'int', 'TIMESTAMP': 'int', 'DATE': 'int', 'BOOL': 'int', 'FLOAT': 'real', 'DECIMAL': 'real', 'DOUBLE': 'real', 'STR': 'str', } _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc, val): # type: (T.TColumnType, T.TDatum) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null: return None val = getattr(val.val, _typeattr[typename] + '_val') base = datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': val = (base + datetime.timedelta(seconds=val)) elif typename == 'DATE': val = (base + datetime.timedelta(seconds=val)).date() elif typename == 'TIME': val = seconds_to_time(val) return val def _extract_col_vals(desc, val): # type: (T.TColumnType, T.TColumn) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls = val.nulls vals = getattr(val.data, _typeattr[typename] + '_col') vals = [None if null else v for null, v in zip(nulls, vals)] base = datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': vals = [None if v is None else base + datetime.timedelta(seconds=v) for v in vals] elif typename == 'DATE': vals = [None if v is None else (base + datetime.timedelta(seconds=v)).date() for v in vals] elif typename == 'TIME': vals = [None if v is None else seconds_to_time(v) for v in vals] return vals def _extract_description(row_desc): # type: (List[T.TColumnType]) -> List[Description] """ Return a tuple of (name, type_code, display_size, internal_size, precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description """ return [Description(col.col_name, col.col_type.type, None, None, None, None, col.col_type.nullable) for col in row_desc] def _extract_column_details(row_desc): # For Connection.get_table_details return [ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision, x.col_type.scale, x.col_type.comp_param) for x in row_desc ] def _is_columnar(data): # type: (T.TQueryResult) -> bool return data.row_set.is_columnar def _load_schema(buf): """ Load a `pyarrow.Schema` from a buffer written to shared memory Parameters ---------- buf : pyarrow.Buffer Returns ------- schema : pyarrow.Schema """ import pyarrow as pa reader = pa.RecordBatchStreamReader(buf) return reader.schema def _load_data(buf, schema): """ Load a `pandas.DataFrame` from a buffer written to shared memory Parameters ---------- buf : pyarrow.Buffer shcema : pyarrow.Schema Returns ------- df : pandas.DataFrame """ import pyarrow as pa message = pa.read_message(buf) rb = pa.read_record_batch(message, schema) return rb.to_pandas() def _parse_tdf_gpu(tdf): """ Parse the results of a select ipc_gpu into a GpuDataFrame Parameters ---------- tdf : TDataFrame Returns ------- gdf : GpuDataFrame """ import numpy as np from pygdf.gpuarrow import GpuArrowReader from pygdf.dataframe import DataFrame from numba import cuda from numba.cuda.cudadrv import drvapi from .shm import load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx = cuda.current_context() dptr = ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size) # TODO: extra copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype = np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader = GpuArrowReader(schema_buffer, darr) df = DataFrame() for k, v in reader.to_dict().items(): df[k] = v return df def _bind_parameters(operation, parameters): return (text(operation) .bindparams(**parameters) .compile(compile_kwargs={"literal_binds": True}))
27.952128
79
0.597146
632
5,255
4.78481
0.286392
0.020833
0.013228
0.037037
0.242063
0.230489
0.177579
0.170635
0.170635
0.140873
0
0.004536
0.286775
5,255
187
80
28.101604
0.802295
0.16784
0
0.132075
0
0
0.064753
0
0
0
0
0.005348
0
1
0.084906
false
0
0.122642
0.028302
0.301887
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16833799777639519b435db61702159dbc70cb57
20,687
py
Python
featuretools/entityset/entity.py
rohit901/featuretools
20bee224782acf94909c2bf33239fd5332a8c1de
[ "BSD-3-Clause" ]
1
2021-07-30T16:03:48.000Z
2021-07-30T16:03:48.000Z
featuretools/entityset/entity.py
rohit901/featuretools
20bee224782acf94909c2bf33239fd5332a8c1de
[ "BSD-3-Clause" ]
13
2021-03-04T19:29:21.000Z
2022-01-21T10:49:20.000Z
featuretools/entityset/entity.py
rohit901/featuretools
20bee224782acf94909c2bf33239fd5332a8c1de
[ "BSD-3-Clause" ]
2
2021-02-09T21:37:48.000Z
2021-12-22T16:13:27.000Z
import logging import warnings import dask.dataframe as dd import numpy as np import pandas as pd from featuretools import variable_types as vtypes from featuretools.utils.entity_utils import ( col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils import import_or_none, is_instance from featuretools.utils.wrangle import _check_time_type, _dataframes_equal from featuretools.variable_types import Text, find_variable_types ks = import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes class Entity(object): """Represents an entity in a Entityset, and stores relevant metadata and data An Entity is analogous to a table in a relational database See Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` """ def __init__(self, id, df, entityset, variable_types=None, index=None, time_index=None, secondary_time_index=None, last_time_index=None, already_sorted=False, make_index=False, verbose=False): """ Create Entity Args: id (str): Id of Entity. df (pd.DataFrame): Dataframe providing the data for the entity. entityset (EntitySet): Entityset for this Entity. variable_types (dict[str -> type/str/dict[str -> type]]) : An entity's variable_types dict maps string variable ids to types (:class:`.Variable`) or type_string (str) or (type, kwargs) to pass keyword arguments to the Variable. index (str): Name of id column in the dataframe. time_index (str): Name of time column in the dataframe. secondary_time_index (dict[str -> str]): Dictionary mapping columns in the dataframe to the time index column they are associated with. last_time_index (pd.Series): Time index of the last event for each instance across all child entities. make_index (bool, optional) : If True, assume index does not exist as a column in dataframe, and create a new column of that name using integers the (0, len(dataframe)). Otherwise, assume index exists in dataframe. """ _validate_entity_params(id, df, time_index) created_index, index, df = _create_index(index, make_index, df) self.id = id self.entityset = entityset self.data = {'df': df, 'last_time_index': last_time_index} self.created_index = created_index self._verbose = verbose secondary_time_index = secondary_time_index or {} self._create_variables(variable_types, index, time_index, secondary_time_index) self.df = df[[v.id for v in self.variables]] self.set_index(index) self.time_index = None if time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out = u"Entity: {}\n".format(self.id) repr_out += u" Variables:" for v in self.variables: repr_out += u"\n {} (dtype: {})".format(v.id, v.type_string) shape = self.shape repr_out += u"\n Shape:\n (Rows: {}, Columns: {})".format( shape[0], shape[1]) return repr_out @property def shape(self): '''Shape of the entity's dataframe''' return self.df.shape def __eq__(self, other, deep=False): if self.index != other.index: return False if self.time_index != other.time_index: return False if self.secondary_time_index != other.secondary_time_index: return False if len(self.variables) != len(other.variables): return False if set(self.variables) != set(other.variables): return False if deep: if self.last_time_index is None and other.last_time_index is not None: return False elif self.last_time_index is not None and other.last_time_index is None: return False elif self.last_time_index is not None and other.last_time_index is not None: if not self.last_time_index.equals(other.last_time_index): return False if not _dataframes_equal(self.df, other.df): return False variables = {variable: (variable, ) for variable in self.variables} for variable in other.variables: variables[variable] += (variable, ) for self_var, other_var in variables.values(): if not self_var.__eq__(other_var, deep=True): return False return True def __sizeof__(self): return sum([value.__sizeof__() for value in self.data.values()]) @property def df(self): '''Dataframe providing the data for the entity.''' return self.data["df"] @df.setter def df(self, _df): self.data["df"] = _df @property def last_time_index(self): ''' Time index of the last event for each instance across all child entities. ''' return self.data["last_time_index"] @last_time_index.setter def last_time_index(self, lti): self.data["last_time_index"] = lti def __hash__(self): return id(self.id) def __getitem__(self, variable_id): return self._get_variable(variable_id) def _get_variable(self, variable_id): """Get variable instance Args: variable_id (str) : Id of variable to get. Returns: :class:`.Variable` : Instance of variable. Raises: RuntimeError : if no variable exist with provided id """ for v in self.variables: if v.id == variable_id: return v raise KeyError("Variable: %s not found in entity" % (variable_id)) @property def variable_types(self): '''Dictionary mapping variable id's to variable types''' return {v.id: type(v) for v in self.variables} def convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs): """Convert variable in dataframe to different type Args: variable_id (str) : Id of variable to convert. new_type (subclass of `Variable`) : Type of variable to convert to. entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity. convert_data (bool) : If True, convert underlying data in the EntitySet. Raises: RuntimeError : Raises if it cannot convert the underlying data Examples: >>> from featuretools.tests.testing_utils import make_ecommerce_entityset >>> es = make_ecommerce_entityset() >>> es["customers"].convert_variable_type("engagement_level", vtypes.Categorical) """ if convert_data: # first, convert the underlying data (or at least try to) self.df = convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs) # replace the old variable with the new one, maintaining order variable = self._get_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def _create_variables(self, variable_types, index, time_index, secondary_time_index): """Extracts the variables from a dataframe Args: variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's variable_types dict maps string variable ids to types (:class:`.Variable`) or type_strings (str) or (type, kwargs) to pass keyword arguments to the Variable. index (str): Name of index column time_index (str or None): Name of time_index column secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns that each map to a list of columns that depend on that secondary time """ variables = [] variable_types = variable_types.copy() or {} string_to_class_map = find_variable_types() # TODO: Remove once Text has been removed from variable types string_to_class_map[Text.type_string] = Text for vid in variable_types.copy(): vtype = variable_types[vid] if isinstance(vtype, str): if vtype in string_to_class_map: variable_types[vid] = string_to_class_map[vtype] else: variable_types[vid] = string_to_class_map['unknown'] warnings.warn("Variable type {} was unrecognized, Unknown variable type was used instead".format(vtype)) if index not in variable_types: variable_types[index] = vtypes.Index link_vars = get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df, link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types) for v in inferred_variable_types: # TODO document how vtype can be tuple vtype = inferred_variable_types[v] if isinstance(vtype, tuple): # vtype is (ft.Variable, dict_of_kwargs) _v = vtype[0](v, self, **vtype[1]) else: _v = inferred_variable_types[v](v, self) variables += [_v] # convert data once we've inferred self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make sure index is at the beginning index_variable = [v for v in variables if v.id == index][0] self.variables = [index_variable] + [v for v in variables if v.id != index] def update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's internal dataframe, optionaly making sure data is sorted, reference indexes to other entities are consistent, and last_time_indexes are consistent. ''' if len(df.columns) != len(self.variables): raise ValueError("Updated dataframe contains {} columns, expecting {}".format(len(df.columns), len(self.variables))) for v in self.variables: if v.id not in df.columns: raise ValueError("Updated dataframe is missing new {} column".format(v.id)) # Make sure column ordering matches variable ordering self.df = df[[v.id for v in self.variables]] self.set_index(self.index) if self.time_index is not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and self.last_time_index is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, max_values=5, verbose=False): """ Find interesting values for categorical variables, to be used to generate "where" clauses Args: max_values (int) : Maximum number of values per variable to add. verbose (bool) : If True, print summary of interesting values found. Returns: None """ for variable in self.variables: # some heuristics to find basic 'where'-able variables if isinstance(variable, vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO - consider removing this constraints # don't add interesting values for entities in relationships skip = False for r in self.entityset.relationships: if variable in [r.child_variable, r.parent_variable]: skip = True break if skip: continue counts = self.df[variable.id].value_counts() # find how many of each unique value there are; sort by count, # and add interesting values to each variable total_count = np.sum(counts) counts[:] = counts.sort_values()[::-1] for i in range(min(max_values, len(counts.index))): idx = counts.index[i] # add the value to interesting_values if it represents more than # 25% of the values we have not seen so far if len(counts.index) < 25: if verbose: msg = "Variable {}: Marking {} as an " msg += "interesting value" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else: fraction = counts[idx] / total_count if fraction > 0.05 and fraction < 0.95: if verbose: msg = "Variable {}: Marking {} as an " msg += "interesting value" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) # total_count -= counts[idx] else: break self.entityset.reset_data_description() def delete_variables(self, variable_ids): """ Remove variables from entity's dataframe and from self.variables Args: variable_ids (list[str]): Variables to delete Returns: None """ # check if variable is not a list if not isinstance(variable_ids, list): raise TypeError('variable_ids must be a list of variable names') if len(variable_ids) == 0: return self.df = self.df.drop(variable_ids, axis=1) for v_id in variable_ids: v = self._get_variable(v_id) self.variables.remove(v) def set_time_index(self, variable_id, already_sorted=False): # check time type if not isinstance(self.df, pd.DataFrame) or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check = self.df[variable_id].iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise TypeError("%s time index not recognized as numeric or" " datetime" % (self.id)) if self.entityset.time_type is None: self.entityset.time_type = time_type elif self.entityset.time_type != time_type: raise TypeError("%s time index is %s type which differs from" " other entityset time indexes" % (self.id, time_type)) if is_instance(self.df, (dd, ks), 'DataFrame'): t = time_type # skip checking values already_sorted = True # skip sorting else: t = vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]): t = vtypes.DatetimeTimeIndex # use stable sort if not already_sorted: # sort by time variable, then by index self.df = self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id, t, convert_data=False) self.time_index = variable_id def set_index(self, variable_id, unique=True): """ Args: variable_id (string) : Name of an existing variable to set as index. unique (bool) : Whether to assert that the index is unique. """ if isinstance(self.df, pd.DataFrame): self.df = self.df.set_index(self.df[variable_id], drop=False) self.df.index.name = None if unique: assert self.df.index.is_unique, "Index is not unique on dataframe " \ "(Entity {})".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index = variable_id def set_secondary_time_index(self, secondary_time_index): for time_index, columns in secondary_time_index.items(): if is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check = self.df[time_index].head(1).iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise TypeError("%s time index not recognized as numeric or" " datetime" % (self.id)) if self.entityset.time_type != time_type: raise TypeError("%s time index is %s type which differs from" " other entityset time indexes" % (self.id, time_type)) if time_index not in columns: columns.append(time_index) self.secondary_time_index = secondary_time_index def _create_index(index, make_index, df): '''Handles index creation logic base on user input''' created_index = None if index is None: # Case 1: user wanted to make index but did not specify column name assert not make_index, "Must specify an index name if make_index is True" # Case 2: make_index not specified but no index supplied, use first column warnings.warn(("Using first column as index. " "To change this, specify the index parameter")) index = df.columns[0] elif make_index and index in df.columns: # Case 3: user wanted to make index but column already exists raise RuntimeError("Cannot make index: index variable already present") elif index not in df.columns: if not make_index: # Case 4: user names index, it is not in df. does not specify # make_index. Make new index column and warn warnings.warn("index {} not found in dataframe, creating new " "integer column".format(index)) # Case 5: make_index with no errors or warnings # (Case 4 also uses this code path) if isinstance(df, dd.DataFrame): df[index] = 1 df[index] = df[index].cumsum() - 1 elif is_instance(df, ks, 'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0, index, range(len(df))) created_index = index # Case 6: user specified index, which is already in df. No action needed. return created_index, index, df def _validate_entity_params(id, df, time_index): '''Validation checks for Entity inputs''' assert isinstance(id, str), "Entity id must be a string" assert len(df.columns) == len(set(df.columns)), "Duplicate column names" for c in df.columns: if not isinstance(c, str): raise ValueError("All column names must be strings (Column {} " "is not a string)".format(c)) if time_index is not None and time_index not in df.columns: raise LookupError('Time index not found in dataframe')
42.391393
124
0.592353
2,505
20,687
4.708583
0.144511
0.054939
0.028995
0.008902
0.313607
0.258499
0.215854
0.184824
0.158033
0.148283
0
0.002448
0.328709
20,687
487
125
42.478439
0.846907
0.240731
0
0.187075
0
0
0.081546
0.001466
0
0
0
0.004107
0.013605
1
0.078231
false
0
0.037415
0.010204
0.193878
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
168519bcca14cbc5945efcceae792622fe09d3d9
25,777
py
Python
RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py
AlsikeE/Ez
2f84ac1896a5b6d8f467c14d3618274bdcfd2cad
[ "Apache-2.0" ]
null
null
null
RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py
AlsikeE/Ez
2f84ac1896a5b6d8f467c14d3618274bdcfd2cad
[ "Apache-2.0" ]
null
null
null
RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py
AlsikeE/Ez
2f84ac1896a5b6d8f467c14d3618274bdcfd2cad
[ "Apache-2.0" ]
1
2021-05-08T02:23:00.000Z
2021-05-08T02:23:00.000Z
import itertools from ez_lib import ez_flow_tool from collections import defaultdict from ez_scheduler import EzScheduler from ez_lib.ez_ob import CenUpdateInfo, UpdateNext from misc import constants, logger from domain.message import * from collections import deque from misc import global_vars import time import eventlet mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler): def __init__(self, switches_, log_): self.switches = switches_ super(CenCtrlScheduler, self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict() ########## Begin three properties are used for parallel processes ########## self.no_of_pending_msgs = {} self.notification_queues = {x: deque([]) for x in self.switches} self.current_notification_time = {x: -1 for x in self.switches} self.current_processing_time = {x: -1 for x in self.switches} ########### End three properties are used for parallel processes ########### self.to_sames = defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True def reset(self): super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict() ########## Begin three properties are used for parallel processes ########## self.no_of_pending_msgs = {} self.notification_queues = {x: deque([]) for x in self.switches} self.current_notification_time = {x: -1 for x in self.switches} self.current_processing_time = {x: -1 for x in self.switches} ########### End three properties are used for parallel processes ########### self.to_sames = defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True def __str__(self): return "Centralized Controller" @staticmethod def init_logger(): return logger.getLogger("Centralized Controller", constants.LOG_LEVEL) def create_dependency_graph(self, old_flows, new_flows): time_start_computing = time.time() * 1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info("links by endpoints %s segs_by_segpath_id %s" % (self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info("time to compute dependency graph: %s" % str(time() * 1000 - time_start_computing)) def process_coherent(self): send_to_sames = set() for key in self.to_sames.keys(): to_same = self.to_sames[key] for sw in to_same: send_to_sames.add(sw) # for sw in send_to_sames: # msg = NotificationMessage(0, sw, constants.COHERENT_MSG, 0) # self.send_to_switch(msg, sw) def compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0 for add_op in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \ += self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id): # pool = eventlet.GreenPool() mulog.info("start finding dependency loop and sort updates") mulog.info(links_by_endpoints) for sw in self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, # links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id) # pool.waitall() # for link in links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id) # global_vars.finish_prioritizing_time = time.clock() def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id): for link in links_by_endpoints.values(): if link.src == sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id) for link in links_by_endpoints.values(): if link.src == sw: self.compute_required_vol_for_dependency_loop(link) current_time = time.clock() if global_vars.finish_computation_time < current_time: global_vars.finish_computation_time = time.clock() def execute_all_remove_only_updates(self, update_infos): for l_segment in self.segments_by_seg_path_id.values(): old_sws = set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id = l_segment.seg_path_id self.received_removed_msg[seg_path_id] = set() if l_segment.remove_only: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) for sw in old_sws: update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT) l_segment.update_status = constants.SENT_REMOVING def update_message_queues(self, update_infos, process_update_info_func): increased = set() related_sws = set([]) for key in update_infos.keys(): update_info = update_infos[key] # self.logger.info("Process update info %s at %d ms from starting" % (update_info, (time() - self.current_start_time)*1000)) assert update_info, CenUpdateInfo for sw in update_infos[key].update_nexts.keys(): if sw not in increased: self.current_notification_time[sw] += 1 increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0 #update_next = update_info.update_nexts[sw] process_update_info_func(sw, update_info) self.log.debug("add message in processing update_info: %s" % update_info) self.log.debug("pending messages: %s" % str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw + 1]) return related_sws def increase_processing_time(self, sw): self.current_processing_time[sw] += 1 def enque_msg_to_notification_queue(self, sw, msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1 def deque_msg_from_notification_queue(self, sw): msg = self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1 return msg def has_pending_msg_of_sw(self, sw): return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0 # def check_all_capable_for_link(self, link, executable_segments_by_link): # capable_segments = [] # done_loop = True # endpoints = (link.src, link.dst) # total_vol = 0 # for op in link.to_adds_loop: # l_segment = self.segments_by_seg_path_id[op.seg_path_id] # if l_segment.update_status == constants.NOTHING: # done_loop = False # total_vol += l_segment.vol # # def check_and_send_possible_update_by_link(self, update_infos): # executable_segments_by_link = {} # executable_link_by_segments = {} # for link in self.links_by_endpoints.values(): # self.check_all_capable_for_link(link, executable_segments_by_link) def total_pending_cycle_vol(self, link): total_vol = 0 for add_op in link.to_adds + link.to_adds_loop + link.to_adds_only: total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol def check_to_split(self, link, l_segment): pass def splittable_vol(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding or removing segment final_split_vol = 0 l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_add_only = False for op in link.to_adds_only: if op.seg_path_id == seg_path_id: return 0 splittable, split_vol = self.check_to_split(link, l_segment) if splittable and final_split_vol > split_vol > 0: final_split_vol = split_vol self.log.debug("capable %s" % l_segment) return final_split_vol def check_and_send_possible_split_updates(self, update_infos): has_execution = True while has_execution: has_execution = False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) split_vol = self.splittable_vol(l_segment.seg_path_id) if split_vol > 0: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): # self.log.debug("send to sw%s" % str(l_segment.new_seg[i])) next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING l_segment.is_splitting = True for pair in l_segment.new_link_seg: self.log.info("avail_cap of link %s: %f, " "give %f to segment %s" % (str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -= split_vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count += 1 self.log.debug("number of flows that is not done anything %d" % count) def check_possible_update_by_links(self, update_infos): has_execution = True while has_execution: has_execution = False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count += 1 self.log.debug("number of flows that is not done anything %d" % count) def check_and_send_possible_updates(self, update_infos): has_execution = True while has_execution: has_execution = False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) mulog.info("chk&send psb_uds for linksegment %s"%l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count += 1 self.log.debug("number of flows that is not done anything %d" % count) def check_and_do_next_update(self, msg): update_infos = defaultdict(CenUpdateInfo) if not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] = set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug("handle updated msg %s" % msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment = self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info("receive updated msgs for segment %s, new_seg_length = %d" # % (str(link_segment.seg_path_id), len(link_segment.new_seg))) if link_segment.update_status == constants.SENT_ADDING \ and len(self.received_updated_msg[msg.seg_path_id]) == \ len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos) return update_infos def finish_adding_new_path(self, link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000 if len(link_segment.old_seg) < 1: link_segment.update_status = constants.FINISH_ALL else: # self.log.info("receive enough updated msgs for segment %s" % str(link_segment.seg_path_id)) link_segment.update_status = constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def remove_segment_and_check_to_update(self, msg): assert isinstance(msg, NotificationMessage) update_infos = defaultdict(CenUpdateInfo) self.log.debug("handle removed msg %s" % msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment = self.segments_by_seg_path_id[msg.seg_path_id] next_idx = 0 if msg.src_id != link_segment.init_sw: next_idx = link_segment.old_seg.index(msg.src_id) + 1 if next_idx < len(link_segment.old_seg): dst = link_segment.old_seg[next_idx] pair = (msg.src_id, dst) self.links_by_endpoints[pair].avail_cap += link_segment.vol # self.log.info("avail_cap of link %d->%d: %f, " # "get from segment %s" % (msg.src_id, dst, # self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1: link_segment.update_status = constants.FINISH_ALL self.log.debug("finish %s" % str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return update_infos def check_finish_update(self): count = 0 finished = True for link_segment in self.segments_by_seg_path_id.values(): if link_segment.update_status != constants.FINISH_ALL: update_status = '' if link_segment.update_status == constants.NOTHING: count += 1 update_status = "NOTHING" if link_segment.update_status == constants.SENT_ADDING: self.log.debug("must receive %d more UPDATED msgs" % (len(link_segment.new_seg)-1)) self.log.debug("received from: %s" % self.received_updated_msg[link_segment.seg_path_id]) update_status = "SENT_ADDING" elif link_segment.update_status == constants.SENT_REMOVING: self.log.debug("must receive %d more REMOVED msgs" % (len(link_segment.old_seg)-1)) self.log.debug("received from: %s" % self.received_removed_msg[link_segment.seg_path_id]) update_status = "SENT REMOVING" elif link_segment.update_status == constants.FINISH_ADDING: update_status = "FINISH_ADDING" elif link_segment.update_status == constants.FINISH_REMOVING: update_status = "FINISH_REMOVING" self.log.debug("segment %s is not finished! update_status %s." % (str(link_segment.seg_path_id), update_status)) # return False finished = False break has_no_pending_barrier = self.has_not_pending_msg() if not has_no_pending_barrier: return constants.ON_GOING elif not finished: self.log.debug("number of flows that is not done anything %d" % count) self.scheduling_mode = constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK else: current_mode = self.scheduling_mode self.scheduling_mode = constants.NORMAL_MODE if current_mode == constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK else: return constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self): self.log.debug("pending queue: %s" % str(self.no_of_pending_msgs)) for queue_len in self.no_of_pending_msgs.values(): if queue_len > 0: return False return True def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment): seg_path_id = l_segment.seg_path_id if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) pair = (l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap += l_segment.vol # self.log.info("avail_cap of link %d->%d: %f, " # "get from segment %s" % (l_segment.init_sw, # l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id))) if len(l_segment.old_seg) > 1: for i in range(len(l_segment.old_seg) - 1): # self.log.debug("send to: %s" % l_segment.old_seg[i]) next_sw = l_segment.old_seg[i + 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_REMOVING else: l_segment.update_status = constants.FINISH_ALL def are_all_moving_in_ops_finished(self, link): for u_op in link.to_adds + link.to_adds_loop: current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state == constants.NOTHING \ or current_state == constants.SENT_ADDING: return False return True def is_capable(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding or removing segment l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_dependency_loop_op = False for op in link.to_adds_loop: if op.seg_path_id == seg_path_id: is_dependency_loop_op = True break is_add_only = False for op in link.to_adds_only: if op.seg_path_id == seg_path_id: is_add_only = True break if (not is_dependency_loop_op and (link.avail_cap - l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints])) \ or (is_dependency_loop_op and link.avail_cap < l_segment.vol)\ or (is_add_only and (not self.are_all_moving_in_ops_finished(link) or link.avail_cap < l_segment.vol)): return False self.log.debug("capable %s" % l_segment) return True
54.039832
136
0.58141
3,169
25,777
4.343326
0.07384
0.057469
0.073888
0.022377
0.706771
0.654752
0.603894
0.541703
0.498838
0.46687
0
0.003894
0.342398
25,777
476
137
54.153361
0.808094
0.105559
0
0.455497
0
0
0.033022
0
0
0
0
0.002101
0.007853
1
0.075916
false
0.002618
0.028796
0.007853
0.159686
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16863f0872927e8b824cd132c78fbf22829a951a
892
py
Python
src/trackbar.py
clovadev/opencv-python
f9c685f8dc658f630a9742f4dd55663bde03fe7d
[ "MIT" ]
null
null
null
src/trackbar.py
clovadev/opencv-python
f9c685f8dc658f630a9742f4dd55663bde03fe7d
[ "MIT" ]
null
null
null
src/trackbar.py
clovadev/opencv-python
f9c685f8dc658f630a9742f4dd55663bde03fe7d
[ "MIT" ]
null
null
null
import numpy as np import cv2 as cv def nothing(x): pass # Create a black image, a window img = np.zeros((300, 512, 3), np.uint8) cv.namedWindow('image') # create trackbars for color change cv.createTrackbar('R', 'image', 0, 255, nothing) cv.createTrackbar('G', 'image', 0, 255, nothing) cv.createTrackbar('B', 'image', 0, 255, nothing) # create switch for ON/OFF functionality switch = 'OFF/ON' cv.createTrackbar(switch, 'image', 0, 1, nothing) while True: # get current positions of four trackbars r = cv.getTrackbarPos('R', 'image') g = cv.getTrackbarPos('G', 'image') b = cv.getTrackbarPos('B', 'image') s = cv.getTrackbarPos(switch, 'image') # 스위치가 꺼져 있으면 흑백, 켜져 있으면 색상 if s == 0: img[:] = 0 else: img[:] = [b, g, r] # 이미지 표시 cv.imshow('image', img) if cv.waitKey(10) > 0: break cv.destroyAllWindows()
22.3
49
0.618834
130
892
4.246154
0.469231
0.115942
0.048913
0.086957
0.115942
0.115942
0
0
0
0
0
0.04058
0.226457
892
39
50
22.871795
0.75942
0.197309
0
0
0
0
0.087447
0
0
0
0
0
0
1
0.041667
false
0.041667
0.083333
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1687efc3eb23ad09ae90d5260997fa4ec210ea9f
1,246
py
Python
aoc_2015/src/day20.py
ambertests/adventofcode
140ed1d71ed647d30d1e6572964cab1e89dfd105
[ "MIT" ]
null
null
null
aoc_2015/src/day20.py
ambertests/adventofcode
140ed1d71ed647d30d1e6572964cab1e89dfd105
[ "MIT" ]
null
null
null
aoc_2015/src/day20.py
ambertests/adventofcode
140ed1d71ed647d30d1e6572964cab1e89dfd105
[ "MIT" ]
null
null
null
from functools import reduce # https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python def factors(n): step = 2 if n%2 else 1 return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5)+1, step) if not n % i))) def solve(target): house_count = 0 deliveries = {} complete = set() pt1 = 0 pt2 = 0 while pt1 == 0 or pt2 == 0: house_count += 1 gifts1 = 0 gifts2 = 0 elves = factors(house_count) if pt1 == 0: gifts1 = sum(elves)*10 if gifts1 >= target: pt1 = house_count if pt2 == 0: working = elves.difference(complete) for elf in working: if elf in deliveries: deliveries[elf] += 1 if deliveries[elf] == 50: complete.add(elf) else: deliveries[elf] = 1 gifts2 = sum(working)*11 if gifts2 >= target: pt2 = house_count return pt1, pt2 # takes around 20s pt1, pt2 = solve(29000000) print("Part 1:", pt1) print("Part 2:", pt2)
27.688889
125
0.50321
158
1,246
3.911392
0.424051
0.080906
0.038835
0
0
0
0
0
0
0
0
0.08366
0.386035
1,246
44
126
28.318182
0.724183
0.11236
0
0
0
0
0.012704
0
0
0
0
0
0
1
0.055556
false
0
0.027778
0
0.138889
0.055556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1688724e3867c7e8e39adb6579cee704e885e634
1,604
py
Python
setup.py
jean/labels
dcb6f40fb4e222068e302202dd5d7d98b4771e4b
[ "MIT" ]
1
2019-11-06T14:08:40.000Z
2019-11-06T14:08:40.000Z
setup.py
jean/labels
dcb6f40fb4e222068e302202dd5d7d98b4771e4b
[ "MIT" ]
null
null
null
setup.py
jean/labels
dcb6f40fb4e222068e302202dd5d7d98b4771e4b
[ "MIT" ]
null
null
null
import pathlib import setuptools def read(*args: str) -> str: file_path = pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text("utf-8") setuptools.setup( name="labels", version="0.3.0.dev0", author="Raphael Pierzina", author_email="raphael@hackebrot.de", maintainer="Raphael Pierzina", maintainer_email="raphael@hackebrot.de", license="MIT", url="https://github.com/hackebrot/labels", project_urls={ "Repository": "https://github.com/hackebrot/labels", "Issues": "https://github.com/hackebrot/labels/issues", }, description="CLI app for managing GitHub labels for Python 3.6 and newer. 📝", long_description=read("README.md"), long_description_content_type="text/markdown", packages=setuptools.find_packages("src"), package_dir={"": "src"}, include_package_data=True, zip_safe=False, python_requires=">=3.6", install_requires=["click", "requests", "pytoml", "attrs"], entry_points={"console_scripts": ["labels = labels.cli:labels"]}, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Utilities", ], keywords=["github", "command-line"], )
34.12766
81
0.640898
176
1,604
5.715909
0.579545
0.027833
0.099404
0.068588
0.09841
0.069583
0
0
0
0
0
0.011691
0.200125
1,604
46
82
34.869565
0.771629
0
0
0
0
0
0.471945
0
0
0
0
0
0
1
0.02381
false
0
0.047619
0
0.095238
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1689397a49d0387c8d71492ecee794b05a45ba83
862
py
Python
src/ralph/ui/forms/util.py
quamilek/ralph
bf7231ea096924332b874718b33cd1f43f9c783b
[ "Apache-2.0" ]
null
null
null
src/ralph/ui/forms/util.py
quamilek/ralph
bf7231ea096924332b874718b33cd1f43f9c783b
[ "Apache-2.0" ]
null
null
null
src/ralph/ui/forms/util.py
quamilek/ralph
bf7231ea096924332b874718b33cd1f43f9c783b
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from ralph.business.models import Venture, VentureRole def all_ventures(): yield '', '---------' for v in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield ( v.id, "%s[%s] %s" % ( '\u00A0' * 4 * v.path.count('/'), # u00A0 == 'no-break space' v.symbol, v.name, ) ) def all_roles(): yield '', '---------' for r in VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name', 'parent__parent__name', 'parent__name', 'name' ): yield r.id, '{} / {}'.format(r.venture.name, r.full_name)
28.733333
78
0.558005
97
862
4.57732
0.494845
0.09009
0.144144
0
0
0
0
0
0
0
0
0.013093
0.291183
862
29
79
29.724138
0.713584
0.054524
0
0.086957
0
0
0.149015
0.033251
0
0
0
0
0
1
0.086957
false
0
0.217391
0
0.304348
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
168b7cd601c412154d052fac8164eeb139aec911
4,769
py
Python
services/users/manage.py
eventprotocol/event-protocol-webapp
38ccdc63bc744576ebb3631b7e17cfd4a09216b6
[ "MIT" ]
null
null
null
services/users/manage.py
eventprotocol/event-protocol-webapp
38ccdc63bc744576ebb3631b7e17cfd4a09216b6
[ "MIT" ]
11
2020-09-05T14:16:23.000Z
2022-03-03T22:33:14.000Z
services/users/manage.py
eventprotocol/event-protocol-webapp
38ccdc63bc744576ebb3631b7e17cfd4a09216b6
[ "MIT" ]
null
null
null
""" manage.py for flask application """ import unittest import coverage import os from flask.cli import FlaskGroup from project import create_app, db from project.api.models import User # Code coverage COV = coverage.Coverage( branch=True, include='project/*', omit=[ 'project/tests/*', 'project/config.py', ] ) COV.start() app = create_app() cli = FlaskGroup(create_app=create_app) @cli.command() def cov(): """ Runs the unit tests with coverage """ tests = unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir) COV.erase() return 0 return -1 @cli.command() def recreate_db(): """ Destroys all db and recreates a new db """ db.drop_all() db.create_all() db.session.commit() @cli.command() def test(): """ Runs test without code coverage """ tests = unittest.TestLoader().discover( 'project/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 else: return -1 @cli.command() def seed_db(): """ Seeds the database with some initial data """ user1 = User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username = "Meeting Room Of The Century" user1.email = "info@meetmeetrevolution.com" user1.city_country = "Singapore, SG" user1.tags = "Meeting Spaces" user1.about = '''This is the best meeting space you will ever see''' user1.seller_detail = '''We sell space''' user1.buyer_detail = '''We are not buying''' user2 = User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username = "Makeup Till You Breakup" user2.email = "info@makeupbreakup.com" user2.city_country = "Singapore, SG" user2.tags = "Stylist" user2.about = '''Reimagine your looks with us''' user2.seller_detail = '''We are serving looks tonight''' user2.buyer_detail = '''We are not buying''' user3 = User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username = "Heart Attack Buffet" user3.email = "info@buffettothemax.com" user3.city_country = "Singapore, SG" user3.tags = "Buffet" user3.about = '''Eat till you get a heart attack''' user3.seller_detail = '''We sell food''' user3.buyer_detail = '''We are not buying''' user4 = User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username = "Pleasant Photography" user4.email = "info@pleasantphoto.com" user4.city_country = "Singapore, SG" user4.tags = "Photography" user4.about = ('We are a group of photographers specialized in wedding' 'photography. ' 'We have won numerous awards for our photos. ' 'We will capture your ' 'memories in ways you cannot imagine.') user4.seller_detail = '''We sell photos''' user4.buyer_detail = '''We are not buying''' user5 = User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username = "Epic Winebar" user5.email = "epic@alcoholtothemax.com" user5.city_country = "Singapore, SG" user5.tags = "Bar, Restaurant" user5.about = ('Award winnning winebar with the best selection of alcohol.' 'We serve delicious international cuisine, with fusion' 'dishes inspired from our travels. We are always ready for' 'your craziest events.') user5.seller_detail = '''We sell wine''' user5.buyer_detail = '''We are not buying''' user6 = User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username = "Dancers Who Dance" user6.email = "dance@dancealot.com" user6.city_country = "Singapore, SG" user6.tags = "Performer" user6.about = ('Dancers who dance are people who like to dance alot.' 'Give us music and we will dance for you.') user6.seller_detail = '''We sell dance''' user6.buyer_detail = '''We are not buying''' db.session.add(user1) db.session.add(user2) db.session.add(user3) db.session.add(user4) db.session.add(user5) db.session.add(user6) db.session.commit() if __name__ == '__main__': cli()
29.621118
79
0.642063
558
4,769
5.40681
0.367384
0.03182
0.025522
0.043752
0.1412
0.127942
0.078223
0.044415
0.044415
0.044415
0
0.061488
0.236108
4,769
160
80
29.80625
0.766676
0.04047
0
0.116667
0
0
0.34228
0.082076
0
0
0.055901
0
0
1
0.033333
false
0
0.05
0
0.116667
0.016667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
168bb7123d253d48e67b56f36bbcad938db24dd7
1,750
py
Python
keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py
erelcan/keras-transformer
ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f
[ "Apache-2.0" ]
3
2021-02-14T17:10:59.000Z
2021-02-14T18:09:17.000Z
keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py
erelcan/keras-transformer
ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f
[ "Apache-2.0" ]
null
null
null
keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py
erelcan/keras-transformer
ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f
[ "Apache-2.0" ]
null
null
null
import os from keras.callbacks import ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from keras_transformer.utils.io_utils import save_to_pickle class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def __init__(self, workspace_path, artifacts, callbacks, **kwargs): super().__init__(os.path.join(workspace_path, "model-{epoch:01d}.h5"), **kwargs) self._workspace_path = workspace_path self._artifacts = artifacts self._completed_epoch = 0 self._callbacks = callbacks def on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch, logs) self._completed_epoch += 1 self.update_artifacts() should_save = False if self.epochs_since_last_save == 0: if self.save_best_only: current = logs.get(self.monitor) if current == self.best: should_save = True else: should_save = True if should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path, "artifacts-" + str(epoch+1) + ".pkl")) def update_artifacts(self): for callback in self._callbacks: self._artifacts["callbacks"][callback.get_name()] = callback.get_artifacts() self._artifacts["callbacks"][self.get_name()] = self.get_artifacts() def get_name(self): return self.__class__.__name__ def get_artifacts(self): return {"best_score": self.best, "completed_epoch": self._completed_epoch} def prepare_from_artifacts(self, artifacts): self.best = artifacts["best_score"] self._completed_epoch = artifacts["completed_epoch"]
35
117
0.671429
202
1,750
5.460396
0.29703
0.070716
0.065277
0.047144
0
0
0
0
0
0
0
0.005189
0.229143
1,750
49
118
35.714286
0.812454
0
0
0.055556
0
0
0.058286
0
0
0
0
0
0
1
0.166667
false
0
0.111111
0.055556
0.361111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
168da4e09bd5b50aa5b8cd08e50f215c17b399b2
608
py
Python
leetcode/regex_matching.py
Kaushalya/algo_journal
bcea8afda0dc86b36452378e3bcff9b0f57d6856
[ "Apache-2.0" ]
null
null
null
leetcode/regex_matching.py
Kaushalya/algo_journal
bcea8afda0dc86b36452378e3bcff9b0f57d6856
[ "Apache-2.0" ]
null
null
null
leetcode/regex_matching.py
Kaushalya/algo_journal
bcea8afda0dc86b36452378e3bcff9b0f57d6856
[ "Apache-2.0" ]
null
null
null
# Level: Hard def isMatch(s: str, p: str) -> bool: if not p: return not s n_s = len(s) n_p = len(p) j = 0 i = -1 while i < n_s-1: i = i+ 1 if j >= n_p: return False if p[j] == '*': while s[i]==s[i-1]: i += 1 j += 1 if p[j] == '.' or s[i] == p[j]: j += 1 # continue elif s[i] != p[j] and j<n_p-1: j += 2 else: return False return True if __name__ == "__main__": ss = 'abbbbbc' p = 'a*' print(isMatch(ss, p))
17.882353
39
0.361842
93
608
2.225806
0.344086
0.048309
0.028986
0.038647
0
0
0
0
0
0
0
0.031847
0.483553
608
34
40
17.882353
0.627389
0.032895
0
0.153846
0
0
0.032423
0
0
0
0
0
0
1
0.038462
false
0
0
0
0.192308
0.038462
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
168db9c8444379377b3a611c0a5f87f083f3ec4d
3,217
py
Python
tests/factories.py
luzik/waliki
b7db696075ceebb5676be61f44e2d806cc472255
[ "BSD-3-Clause" ]
324
2015-01-02T20:48:33.000Z
2021-12-11T14:44:34.000Z
tests/factories.py
luzik/waliki
b7db696075ceebb5676be61f44e2d806cc472255
[ "BSD-3-Clause" ]
103
2015-01-02T03:01:34.000Z
2020-04-02T19:03:53.000Z
tests/factories.py
luzik/waliki
b7db696075ceebb5676be61f44e2d806cc472255
[ "BSD-3-Clause" ]
84
2015-01-07T08:53:05.000Z
2021-01-04T00:26:38.000Z
import factory from django.contrib.auth.models import User, Group, Permission from waliki.models import ACLRule, Page, Redirect class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: u'user{0}'.format(n)) password = factory.PostGenerationMethodCall('set_password', 'pass') email = factory.LazyAttribute(lambda o: '%s@example.org' % o.username) class Meta: model = User @factory.post_generation def groups(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for group in extracted: self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = Group name = factory.Sequence(lambda n: "Group #%s" % n) @factory.post_generation def users(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for user in extracted: self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory): class Meta: model = ACLRule name = factory.Sequence(lambda n: u'Rule {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def permissions(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for perm in extracted: if not isinstance(perm, Permission): perm = Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm) @factory.post_generation def users(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for user in extracted: self.users.add(user) @factory.post_generation def groups(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for group in extracted: self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory): title = factory.Sequence(lambda n: u'Page {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def raw(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: self.raw = extracted class Meta: model = Page class RedirectFactory(factory.django.DjangoModelFactory): old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n)) new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n)) class Meta: model = Redirect
28.723214
98
0.608642
380
3,217
5.115789
0.218421
0.061728
0.08642
0.090535
0.640432
0.600823
0.526749
0.526749
0.526749
0.51286
0
0.003099
0.297793
3,217
111
99
28.981982
0.857459
0.11346
0
0.577465
0
0
0.036633
0
0
0
0
0
0
1
0.084507
false
0.014085
0.042254
0
0.492958
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
168dc722af15d363851566ae2eeabcf9ccc50653
68,372
py
Python
nxt_editor/commands.py
dalteocraft/nxt_editor
18992da7cfa89769568434ec08d787510e09f1c4
[ "MIT" ]
131
2020-12-03T08:01:26.000Z
2022-03-07T03:41:37.000Z
nxt_editor/commands.py
dalteocraft/nxt_editor
18992da7cfa89769568434ec08d787510e09f1c4
[ "MIT" ]
127
2020-12-07T21:43:02.000Z
2022-02-17T22:31:14.000Z
nxt_editor/commands.py
dalteocraft/nxt_editor
18992da7cfa89769568434ec08d787510e09f1c4
[ "MIT" ]
17
2020-12-08T08:06:44.000Z
2021-11-18T05:40:11.000Z
# Built-in import copy import logging import time # External from Qt.QtWidgets import QUndoCommand # Internal from nxt_editor import colors from nxt_editor import user_dir from nxt import nxt_path from nxt.nxt_layer import LAYERS, SAVE_KEY from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger) from nxt import nxt_io from nxt import GRID_SIZE import nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func): def wrapper(self): self.model.processing.emit(True) func(self) self.model.processing.emit(False) return wrapper class NxtCommand(QUndoCommand): def __init__(self, model): super(NxtCommand, self).__init__() self.model = model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me = {} def _get_effects(self, layer_path): """Gets the effected state for a given layer with context to this command. Since a single command can effect layers in different ways. :param layer_path: string of layer real path :return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo) """ first_eff_by_undo = False first_eff_by_redo = False try: first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo'] except KeyError: pass try: first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo'] except KeyError: pass return first_eff_by_undo, first_eff_by_redo def reset_layer_effected(self, layer_just_saved): """When the model marks a layer as saved we reset the class attr `_first_effected_by_redo` to False. This makes sure the layer is properly marked as unsaved even if we undo an action after saving it. :param layer_just_saved: string of layer real path :return: None """ eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved) where_were_at = self.model.undo_stack.index() cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1)) if cur_cmd is self: return if layer_just_saved in self._layers_effected_by_me: if eff_by_undo: # This command has already been marked as undo effects the # layer, meaning the layer has been saved and the undo queue # was moved to an index before this command and the same # layer was saved again. eff_by_redo = True eff_by_undo = False else: # Now the undo of this command effects the layer not the redo eff_by_redo = False eff_by_undo = True self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo, 'redo': eff_by_redo} def redo_effected_layer(self, layer_path): """Adds layer to the model's set of effected (unsaved) layers. If this command was the first to effect the layer we mark it as such by setting the class attr `_first_effected_by_redo` to True. :param layer_path: string of layer real path :return: None """ layer_unsaved = layer_path in self.model.effected_layers eff_by_undo, eff_by_redo = self._get_effects(layer_path) if not eff_by_undo and layer_unsaved: return if not eff_by_undo: self._layers_effected_by_me[layer_path] = {'undo': False, 'redo': True} self.model.effected_layers.add(layer_path) else: # Layer was saved and then undo was called, thus this redo has a # net zero effect on the layer try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a save action pass def undo_effected_layer(self, layer_path): """Removes layer from the model's set of effected (unsaved) layers. If the layer is not marked as effected in the model we mark it as effected. This case happens when undo is called after a layer is saved. :param layer_path: string of layer real path :return: None """ eff_by_undo, eff_by_redo = self._get_effects(layer_path) layer_saved = layer_path not in self.model.effected_layers if layer_saved: eff_by_undo = True # Set redo to False since now its been saved & the undo effects it eff_by_redo = False self.model.effected_layers.add(layer_path) elif eff_by_redo: try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a save action pass self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo, 'redo': eff_by_redo} class AddNode(NxtCommand): """Add a node to the graph""" def __init__(self, name, data, parent_path, pos, model, layer_path): super(AddNode, self).__init__(model) self.name = name self.data = data self.parent_path = parent_path self.layer_path = layer_path self.stage = model.stage # command data self.pos = pos or [0.0, 0.0] self.prev_selection = self.model.selection # resulting node self.node_path = None self.created_node_paths = [] @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) dirty_nodes = [] # delete any created nodes for node_path in self.created_node_paths: node = layer.lookup(node_path) if node is not None: _, dirty = self.stage.delete_node(node, layer, remove_layer_data=False) dirty_nodes += dirty node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0: rm_layer_data = True else: rm_layer_data = False comp_layer = self.model.comp_layer if node is not None: # delete node _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes += dirty dirty_nodes += self.created_node_paths dirty_nodes += [self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = self.prev_selection @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.created_node_paths = [] dirty_nodes = [] nodes, dirty = self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes += dirty self.node_path = layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = [self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added node: {}'.format(self.node_path)) class DeleteNode(NxtCommand): def __init__(self, node_path, model, layer_path, other_removed_nodes): """Delete node from the layer at the layer path and the comp layer. It is important to note that the other_removed_nodes list must be shared by other DeleteNode commands in a command macro. The list will be mutated by the stage as it deletes node, this behavior is depended upon! :param node_path: String of node path :param model: StageModel :param layer_path: String of layer realpath :param other_removed_nodes: list of node paths that will be deleted in this event loop. """ super(DeleteNode, self).__init__(model) self.layer_path = layer_path self.stage = model.stage # get undo data self.prev_selection = self.model.selection self.prev_starts = [] self.prev_breaks = {} self.node_path = node_path self.node_data = {} self.others = other_removed_nodes @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer parent = self.node_data['parent'] # We don't want to fix names because we know this node should be # named what it was named when it was deleted new_nodes, dirty = self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False) if self.node_data['break']: self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._add_start_node(self.node_path, layer) # restore layer data pos = self.node_data.get('pos') if pos: self.model.top_layer.positions[self.node_path] = pos # This might be a bug? We don't touch the top layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display = self.node_data.get('attr_display') if attr_display is not None: self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints = self.prev_breaks ancestor_tuple = self.node_data.get('ancestor_child_order') if ancestor_tuple: ancestor_path, ancestor_child_order = ancestor_tuple ancestor = layer.lookup(ancestor_path) if ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection = self.prev_selection # Fixme: Does not account for rebuilding proxy nodes for the dirty nodes dirty_set = tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if dirty_set != (self.node_path,): self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer self.node_data = {} self.prev_starts = self.model.get_start_nodes(layer) self.prev_breaks = user_dir.breakpoints dirty_nodes = [] node = layer.lookup(self.node_path) # get node info parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH) name = getattr(node, INTERNAL_ATTRS.NAME) is_break = self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data = {'parent': parent, 'name': name, 'pos': self.model.get_node_pos(self.node_path), 'break': is_break} closest_ancestor = layer.ancestors(self.node_path) if closest_ancestor: closest_ancestor = closest_ancestor[0] else: closest_ancestor = None closest_ancestor_path = layer.get_node_path(closest_ancestor) if closest_ancestor_path: ancestor_child_order = getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] = (closest_ancestor_path, ancestor_child_order[:]) # Attr display data attr_display = self.model.get_attr_display_state(self.node_path) if attr_display is not None: self.node_data['attr_display'] = attr_display # get layer data is_start = self.model.get_is_node_start(self.node_path, layer) self.node_data['start'] = is_start self.node_data['save_dict'] = get_node_as_dict(node) if self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._remove_start_node(self.node_path, layer) node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0: rm_layer_data = True else: rm_layer_data = False for p in self.others[:]: self.others += comp_layer.get_node_dirties(p) _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes += dirty + [self.node_path] if self.node_path in self.model.selection: fix_selection = self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection = fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText("Delete node: {}".format(self.node_path)) class SetNodeAttributeData(NxtCommand): """Set attribute value""" def __init__(self, node_path, attr_name, data, model, layer_path): super(SetNodeAttributeData, self).__init__(model) self.node_path = node_path self.nice_attr_name = attr_name self.attr_name = attr_name self.data = data self.stage = model.stage self.layer_path = layer_path self.created_node_paths = [] self.remove_attr = False self.prev_data = {} self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value = None self.prev_selection = model.selection @processing def undo(self): start = time.time() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp = self.model.comp_layer dirties = [self.node_path] # delete any created nodes for node_path in self.created_node_paths: n = layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=layer, comp_layer=comp, remove_layer_data=False) n = layer.lookup(self.node_path) if n is not None: if self.remove_attr: self.stage.delete_node_attr(n, self.attr_name) dirties += comp.get_node_dirties(self.node_path) else: result = self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer, create=False, comp_layer=comp, **self.prev_data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += result if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) changed_attrs = () for dirty in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs) if not self.recomp: changed = tuple([self.node_path] + self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection = self.prev_selection # undo_debug(self, start) @processing def redo(self): start = time.time() created_node = False self.prev_selection = self.model.selection layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp = self.model.comp_layer self.remove_attr = False self.created_node_paths = [] # get the node node = layer.lookup(self.node_path) dirties = [self.node_path] if node is None: parent_path = nxt_path.get_parent_path(self.node_path) name = nxt_path.node_name_from_node_path(self.node_path) if self.attr_name in INTERNAL_ATTRS.ALL: self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)} else: attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}} self.return_value = self.attr_name _, dirties = self.stage.add_node(name=name, data=attr_data, parent=parent_path, layer=layer.layer_idx(), comp_layer=comp, fix_names=False) # Fixme: Targeted parenting would avoid the need for a recomp if layer.descendants(self.node_path): self.recomp = True created_node = True self.created_node_paths += [self.node_path] node = layer.lookup(self.node_path) self.prev_data = self.stage.get_node_attr_data(node, self.attr_name, layer, quiet=True) if self.prev_data: self.prev_data = copy.deepcopy(self.prev_data) # set attribute value this also adds the attribute if it does not exist if not self.stage.node_attr_exists(node, self.attr_name): self.remove_attr = True if not created_node: self.return_value = self.stage.node_setattr_data(node, self.attr_name, layer=layer, create=True, comp_layer=comp, **self.data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += self.return_value if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: changed_attrs = () for dirty in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val = str(self.data.get(META_ATTRS.VALUE)) self.setText("Set {} to {}".format(attr_path, val)) # redo_debug(self, start) class SetNodeAttributeValue(SetNodeAttributeData): def __init__(self, node_path, attr_name, value, model, layer_path): data = {META_ATTRS.VALUE: value} super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data, model, layer_path) class RenameNode(SetNodeAttributeValue): """Rename node""" def __init__(self, node_path, name, model, layer_path): self.old_node_path = node_path layer = model.lookup_layer(layer_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path, layer_only=True) super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name, model, layer_path) def undo(self): self.model.about_to_rename.emit() self.prev_data['force'] = True super(RenameNode, self).undo() self.node_path = self.old_node_path self.model.selection = [self.node_path] def redo(self): self.model.about_to_rename.emit() super(RenameNode, self).redo() self.node_path = self.return_value self.model.selection = [self.node_path] if self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText("{} renamed to {}".format(self.old_node_path, self.return_value)) class DuplicateNodes(NxtCommand): """Duplicate nodes on this graph""" def __init__(self, node_paths, descendants, model, source_layer_path, target_layer_path): # TODO: We should make another base command class that can be used to # set multiple attr's data. That way duplicate can just be a # setattr. The way it works now we can only set one attr's data at a # time and duplicate needs to get local + INTERNAL number of attrs. super(DuplicateNodes, self).__init__(model) self.node_paths = node_paths self.descendants = descendants self.source_layer_path = source_layer_path self.target_layer_path = target_layer_path self.stage = model.stage # get undo data self.prev_selection = self.model.selection # resulting nodes self.new_node_paths = [] @processing def undo(self): target_layer = self.model.lookup_layer(self.target_layer_path) # delete duplicated nodes for node_path in self.new_node_paths: n = target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, target_layer, remove_layer_data=True) self.model.selection = self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing def redo(self): new_selection = [] self.new_node_paths = [] source_layer = self.model.lookup_layer(self.source_layer_path) target_layer = self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for node_path in self.node_paths: node = source_layer.lookup(node_path) # duplicate node new, dirty = self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) # process new nodes for new_node in new: # add new node path to the list and emit model signal new_node_path = target_layer.get_node_path(new_node) self.new_node_paths += [new_node_path] # self.model.node_added.emit(new_node_path) # set position has_parent = self.model.node_has_parent(new_node_path, target_layer) if not has_parent and new_node_path != node_path: pos = self.model.get_node_pos(node_path) pos = [pos[0] + 20, pos[1] + 20] self.model._set_node_pos(new_node_path, pos, layer=target_layer) self.model.selection = new_selection self.model.update_comp_layer(rebuild=True) if len(self.node_paths) == 1: nodes_str = self.node_paths[0] else: nodes_str = 'nodes' self.setText('Duplicated {}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue): """Instance nodes on this graph""" def __init__(self, node_path, model, source_layer_path, target_layer_path): src_name = nxt_path.node_name_from_node_path(node_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path) new_path = nxt_path.join_node_paths(parent_path, new_name) self.new_path = new_path super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path, model, target_layer_path) def redo(self): node_path = self.data.get(META_ATTRS.VALUE) layer = self.model.lookup_layer(self.layer_path) new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0), layer) self.model._set_node_pos(self.new_path, new_pos, layer) super(InstanceNode, self).redo() self.return_value = self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand): """Move nodes""" def __init__(self, node_positions, model, layer_path): super(SetNodesPosition, self).__init__(model) self.model = model self.layer_path = layer_path self.new_positions = node_positions self.old_positions = {} for path in self.new_positions.keys(): self.old_positions[path] = model.get_node_pos(path) @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) for node_path, old_pos in self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path) @processing def redo(self): delta_str = None layer = self.model.lookup_layer(self.layer_path) for node_path, new_pos in self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer) if not delta_str: pos = new_pos prev_pos = self.old_positions[node_path] # Only letting it set text once, relying on consistent delta. x_delta = pos[0] - prev_pos[0] y_delta = pos[1] - prev_pos[1] delta_str = '{}, {}'.format(x_delta, y_delta) if len(self.new_positions) == 1: nodes_str = node_path else: nodes_str = 'nodes' self.setText('Move {} {}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand): """Select Nodes and Connections""" def __init__(self, paths, model): super(SetSelection, self).__init__() self.new_paths = paths self.model = model self.prev_paths = self.model.selection def undo(self): self.model.selection = self.prev_paths def redo(self): self.model.selection = self.new_paths self.setText('Set selection: {}'.format(str(self.new_paths))) class AddSelection(SetSelection): def __init__(self, paths, model): self.added_paths = paths curr_selection = model.selection new_paths = curr_selection + paths super(AddSelection, self).__init__(new_paths, model) def redo(self): super(AddSelection, self).redo() self.setText('Add {} to selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection): def __init__(self, paths, model): self.rem_paths = paths new_selection = model.selection[:] for path in paths: try: new_selection.remove(path) except ValueError: continue super(RemoveFromSelection, self).__init__(new_selection, model) def redo(self): super(RemoveFromSelection, self).redo() self.setText('Remove {} from selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand): """Localize nodes""" def __init__(self, node_paths, model): super(LocalizeNodes, self).__init__(model) self.node_paths = node_paths self.model = model self.stage = model.stage self.prev_selection = self.model.selection self.prev_node_data = {} self.created_node_paths = [] @processing def undo(self): for node_path in self.created_node_paths: n = self.model.target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False) layers = [self.model.target_layer] for node_path, all_data in self.prev_node_data.items(): apply_data = {} node = self.model.target_layer.lookup(node_path) if not node: continue data = all_data['data'] child_order = all_data['data'].get('child_order', []) apply_data['child_order'] = child_order apply_data['attributes'] = data.get('attributes', {}) attrs_to_keep = apply_data['attributes'].keys() apply_data['enabled'] = data.get('enabled') if data.get('instance'): apply_data['instance'] = data['instance'] self.stage.transfer_node_data(node, self.model.target_layer, apply_data, self.model.comp_layer) local_attrs = self.stage.get_node_local_attr_names(node_path, layers) for attr in local_attrs: if attr not in attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data = {} self.created_node_paths = [] layer = self.model.target_layer for node_path in self.node_paths: node_data = {} display_node = self.model.comp_layer.lookup(node_path) if not display_node: continue # add node if it doesn't exist on the target layer target_node = self.model.target_layer.lookup(node_path) if not target_node: new_nodes, new_paths, dirty = _add_node_hierarchy(node_path, self.model, layer) target_node = new_nodes[-1] self.created_node_paths += new_paths # self.model.node_added.emit(node_path) # preserve original data node_data['data'] = get_node_as_dict(target_node) # localize source node self.stage.transfer_node_data(target_node, self.model.target_layer, display_node, self.model.comp_layer) self.prev_node_data[node_path] = node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection = self.prev_selection if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText('Localize {}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData): """Localize nodes""" def __init__(self, node_path, attr_name, model, layer_path): node = model.comp_layer.lookup(node_path) data = model.stage.get_node_attr_data(node, attr_name, model.comp_layer) if META_ATTRS.SOURCE in data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path, attr_name, data, model, layer_path) class LocalizeCompute(SetNodeAttributeValue): """Localize nodes""" def __init__(self, node_path, model, layer_path): comp_layer = model.comp_layer display_node = comp_layer.lookup(node_path) code_lines = model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(LocalizeCompute, self).redo() self.setText("Localize compute on {}".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): inst_path = model.get_node_instance_path(node_path, model.comp_layer, expand=False) super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model, layer_path) def redo(self): super(LocalizeInstancePath, self).redo() self.setText("Localize instance path to {}".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None, model, layer_path) def redo(self): super(RevertInstancePath, self).redo() self.setText("Revert instance path on {}".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): exec_path = model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path, model, layer_path) def redo(self): super(LocalizeExecPath, self).redo() self.setText("Localize exec input on {}".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None, model, layer_path) def redo(self): self.setText("Revert exec input on {}".format(self.node_path)) class RevertNode(DeleteNode): """Localize nodes""" def __init__(self, node_path, model, layer_path, others): super(RevertNode, self).__init__(node_path, model, layer_path, others) self.rebuild = False # Tells the delete command not to re-comp self.created_node_paths = [] self.node_path = node_path def undo(self): layer = self.model.lookup_layer(self.layer_path) # Remove our created empty nodes for node_path in self.created_node_paths: n = layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer, remove_layer_data=False) super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection def redo(self): self.created_node_paths = [] super(RevertNode, self).redo() layer = self.model.lookup_layer(self.layer_path) # Re-create the node as an empty node new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path, self.model, layer) self.created_node_paths += new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection = self.prev_selection self.setText('Revert {}'.format(self.node_path)) class ParentNodes(NxtCommand): """Parent Nodes""" def __init__(self, node_paths, parent_node_path, model): super(ParentNodes, self).__init__(model) self.parent_node_path = parent_node_path self.parent_node = None self.model = model self.stage = model.stage self.node_paths = node_paths # resulting nodes self.node_path_data = {} self.new_node_paths = [] self.created_node_paths = [] # get node selection for undo self.prev_selection = self.model.selection # get previous node data for all child nodes for undo self.prev_node_data = {} @processing def undo(self): layer = self.model.target_layer self.undo_effected_layer(layer.real_path) # undo parent common_parent_nodes = {} for old_path, node_data in self.prev_node_data.items(): prev_parent_path = node_data['parent'] prev_parent_node = layer.lookup(prev_parent_path) new_path = self.node_path_data[old_path] node = layer.lookup(new_path) if prev_parent_path not in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] = {node: old_path} else: common_parent_nodes[prev_parent_path][node] = old_path child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple: ancestor_path, child_order = child_order_tuple ancestor = layer.lookup(ancestor_path) if ancestor: self.stage.set_node_child_order(ancestor, child_order, layer) if new_path in list(self.model.top_layer.positions.keys()): source_layer = self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for parent_path, nodes_dict in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer) for parent_path, nodes_dict in common_parent_nodes.items(): for node, old_path in nodes_dict.items(): node_data = self.prev_node_data[old_path] # restore name prev_name = node_data['name'] name = getattr(node, INTERNAL_ATTRS.NAME) if name != prev_name: self.stage.set_node_name(node, name=prev_name, layer=layer, force=True) # restore position if self.parent_node_path != nxt_path.WORLD: prev_pos = node_data['pos'] source_layer = self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos, layer=source_layer) # delete any created nodes for node_path in self.created_node_paths: node = layer.lookup(node_path) if node is not None: self.stage.delete_node(node, layer) idx = 0 for old_node_path in self.node_paths: new_node_path = self.new_node_paths[idx] attr_state = self.model.remove_attr_display_state(new_node_path) if attr_state is not None: self.model._set_attr_display_state(old_node_path, attr_state) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data = {} self.node_path_data = {} self.new_node_paths = [] self.created_node_paths = [] nodes = [] layer = self.model.target_layer self.redo_effected_layer(layer.real_path) for node_path in self.node_paths: node = layer.lookup(node_path) name = getattr(node, INTERNAL_ATTRS.NAME) parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer) node_data = self.stage.get_node_data(node, layer) node_data['pos'] = self.model.get_node_pos(node_path) node_data['name'] = name node_data['parent'] = parent_path parent_node = layer.lookup(parent_path) ancestor_path = parent_path child_order = [] if parent_node: child_order = getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else: ancestors = layer.ancestors(node_path) if ancestors: ancestor = ancestors[0] ancestor_path = layer.get_node_path(ancestor) child_order = self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path, child_order] self.prev_node_data[node_path] = node_data nodes += [node] # get current node hierarchy information for each node. each node # path is placed in a list of descendants for each top node so when # they are un-parented each node can be placed visually beside it's # original top node. node_hierarchy_data = {} if self.parent_node_path is nxt_path.WORLD: for node_path in self.node_paths: node = layer.lookup(node_path) top_node = self.stage.get_top_node(node, self.model.target_layer) if top_node is None: top_node = node top_node_path = layer.get_node_path(top_node) top_node_descendant_list = node_hierarchy_data.get(top_node, []) top_node_descendant_list += [node] node_hierarchy_data[top_node_path] = top_node_descendant_list if not node_hierarchy_data: return # parent self.node_path_data = self.stage.parent_nodes(nodes, self.parent_node_path, layer) self.new_node_paths = list(self.node_path_data.values()) idx = 0 for new_node_path in self.new_node_paths: old_node_path = self.node_paths[idx] attr_state = self.model.remove_attr_display_state(old_node_path) if attr_state is not None: self.model._set_attr_display_state(new_node_path, attr_state) # set position for un-parent if self.parent_node_path == nxt_path.WORLD: old_root = nxt_path.get_root_path(old_node_path) new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14, GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos, layer) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = list(self.node_path_data.values()) if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText("Parent {} to {}".format(path_str, self.parent_node_path)) class AddAttribute(SetNodeAttributeData): """Add an attribute to a node.""" def __init__(self, node_path, attr_name, value, model, layer_path): data = {META_ATTRS.VALUE: value} super(AddAttribute, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(AddAttribute, self).redo() self.remove_attr = True self.setText("Add {} attr to {}".format(self.attr_name, self.node_path)) class DeleteAttribute(AddAttribute): """Delete attribute on a node""" def __init__(self, node_path, attr_name, model, layer_path): super(DeleteAttribute, self).__init__(node_path, attr_name, None, model, layer_path) # Get the data to be set if undo is called layer = self.model.lookup_layer(self.layer_path) node = layer.lookup(self.node_path) self.data = self.stage.get_node_attr_data(node, self.attr_name, layer) def undo(self): super(DeleteAttribute, self).redo() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def redo(self): # Overload remove attr here to insure attr is deleted self.remove_attr = True super(DeleteAttribute, self).undo() layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText("Remove {} attr from {}".format(self.attr_name, self.node_path)) class RevertCompute(SetNodeAttributeValue): """Revert compute""" def __init__(self, node_path, model, layer_path): super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [], model, layer_path) def redo(self): super(RevertCompute, self).redo() self.setText("Revert compute on {}".format(self.node_path)) class RenameAttribute(NxtCommand): """Rename attribute""" def __init__(self, node_path, attr_name, new_attr_name, model, layer_path): super(RenameAttribute, self).__init__(model) self.node_path = node_path self.attr_name = attr_name self.new_attr_name = new_attr_name self.model = model self.stage = model.stage self.layer_path = layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path) def rename_attribute(self, layer, attr_name, new_attr_name): node = layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name, new_attr_name, layer) self.model.update_comp_layer() old_name = nxt_path.make_attr_path(self.node_path, attr_name) new_name = nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText("Rename {} to {}".format(old_name, new_name)) class SetAttributeComment(SetNodeAttributeData): """Set attribute comment""" def __init__(self, node_path, attr_name, comment, model, layer_path): data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(SetAttributeComment, self).redo() attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText("Changed comment on {}".format(attr_path)) class SetCompute(SetNodeAttributeValue): """Set node code value""" def __init__(self, node_path, code_lines, model, layer_path): super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(SetCompute, self).redo() self.setText("Changed compute on {}".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue): """Set node comment""" def __init__(self, node_path, comment, model, layer_path): super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment, model, layer_path) def redo(self): super(SetNodeComment, self).redo() self.setText("Changed comment on {}".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue): """Set node instance""" def __init__(self, node_path, instance_path, model, layer_path): super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model, layer_path) def redo(self): super(SetNodeInstance, self).redo() txt = ("Set inst path on " "{} to {}".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue): """Set node enabled state""" def __init__(self, node_path, value, model, layer_path): super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value, model, layer_path) def redo(self): super(SetNodeEnabledState, self).redo() if self.data.get(META_ATTRS.VALUE): self.setText("Enabled {}".format(self.node_path)) else: self.setText("Disabled {}".format(self.node_path)) class SetNodeCollapse(NxtCommand): """Set the node collapse state""" def __init__(self, node_paths, value, model, layer_path): super(SetNodeCollapse, self).__init__(model) self.node_paths = node_paths self.value = value self.model = model self.stage = model.stage self.layer_path = layer_path self.prev_values = {} @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for node_path, prev_value in self.prev_values.items(): layer.collapse[node_path] = prev_value self.model.comp_layer.collapse[node_path] = prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values = {} for np in self.node_paths: self.prev_values[np] = self.model.get_node_collapse(np, layer) for node_path in self.node_paths: layer.collapse[node_path] = self.value self.model.comp_layer.collapse[node_path] = self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText("Collapsed {}".format(path_str)) else: self.setText("Expanded {}".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue): """Set node execute sources""" def __init__(self, node_path, exec_source, model, layer_path): super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source, model, layer_path) def redo(self): super(SetNodeExecuteSources, self).redo() val = self.data.get(META_ATTRS.VALUE) if val is None: self.setText("Removed exec input for {}".format(self.node_path)) return self.setText("Set {} exec input to {}".format(self.node_path, val)) class SetNodeBreakPoint(QUndoCommand): """Set node as a break point""" def __init__(self, node_paths, value, model, layer_path): super(SetNodeBreakPoint, self).__init__() self.node_paths = node_paths self.value = value self.model = model self.layer_path = layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if not self.value: func = self.model._add_breakpoint else: func = self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if self.value: func = self.model._add_breakpoint else: func = self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText("Add breakpoint to {}".format(path_str)) else: self.setText("Remove breakpoint from {}".format(path_str)) class ClearBreakpoints(QUndoCommand): """Clear all the breakpoints for a given layer""" def __init__(self, model, layer_path): super(ClearBreakpoints, self).__init__() self.model = model self.layer_path = layer_path self.prev_breaks = [] @processing def undo(self): user_dir.breakpoints[self.layer_path] = self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def redo(self): self.prev_breaks = user_dir.breakpoints.get(self.layer_path, []) if self.layer_path in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText("Clear all breakpoints") class SetNodeStartPoint(SetNodeAttributeValue): """Set this node as the execution start point""" def __init__(self, node_path, value, model, layer_path): super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value, model, layer_path) class SetNodeChildOrder(SetNodeAttributeValue): """Set node child order""" def __init__(self, node_path, child_order, model, layer_path): super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order, model, layer_path) def redo(self): super(SetNodeChildOrder, self).redo() self.setText("Change child order on {}".format(self.node_path)) class SetLayerAlias(NxtCommand): """Set Layer Alias""" def __init__(self, alias, layer_path, model): super(SetLayerAlias, self).__init__(model) self.layer_path = layer_path self.alias = alias self.old_alias = '' self.model = model self.stage = model.stage @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_alias = layer.get_alias(local=True) layer.set_alias(self.alias) else: self.old_alias = layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText("Set {} alias to {}".format(layer.filepath, self.alias)) class NewLayer(NxtCommand): """Add new layer""" def __init__(self, file_path, file_name, idx, model, chdir): super(NewLayer, self).__init__(model) self.new_layer_path = None self.model = model self.stage = model.stage self.insert_idx = idx self.file_path = file_path self.file_name = file_name self.chdir = chdir @processing def undo(self): new_layer = self.model.lookup_layer(self.new_layer_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0 < self.insert_idx <= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS] open_layer_colors = [] for layer in self.stage._sub_layers: color = layer.color if color: color = color.lower() open_layer_colors += [color] layer_color = layer_color_index[0] for c in layer_color_index: if c not in open_layer_colors: layer_color = c break real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data = {"parent_layer": parent_layer, SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS: self.file_name } new_layer = self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path = new_layer.real_path self.redo_effected_layer(new_layer.real_path) # Fixme: The next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText("New layer {}".format(self.new_layer_path)) class ReferenceLayer(NxtCommand): """Refernce existing layer""" def __init__(self, file_path, idx, model, chdir): super(ReferenceLayer, self).__init__(model) self.model = model self.stage = model.stage self.insert_idx = idx self.file_path = file_path self.real_path = nxt_path.full_file_expand(self.file_path, chdir) @processing def undo(self): new_layer = self.model.lookup_layer(self.real_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0 < self.insert_idx <= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_data = nxt_io.load_file_data(self.real_path) extra_data = {"parent_layer": parent_layer, "filepath": self.file_path, "real_path": self.real_path, "alias": layer_data['name'] } layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) # Fixme: The next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText("Added reference to {}".format(self.real_path)) class RemoveLayer(ReferenceLayer): """Remove existing layer""" def __init__(self, layer_path, model): idx = model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path, idx, model, None) self.text = "Removed reference to {}".format(layer_path) @processing def undo(self): super(RemoveLayer, self).redo() self.setText(self.text) @processing def redo(self): super(RemoveLayer, self).undo() self.setText(self.text) class MuteToggleLayer(NxtCommand): """Toggles muting an existing layer""" def __init__(self, layer_path, model): super(MuteToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model self.layer_paths = [] def undo(self): self.toggle_state() for layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths = [] self.toggle_state() for layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state = not layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText("Toggle {} muted.".format(layer.get_alias())) class SoloToggleLayer(NxtCommand): """Toggles soloing an existing layer""" def __init__(self, layer_path, model): super(SoloToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model self.layer_paths = [] def undo(self): self.toggle_state() for layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths = [] self.toggle_state() for layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state = not layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText("Toggle {} soloed.".format(layer.get_alias())) class SetLayerColor(NxtCommand): def __init__(self, color, layer_path, model): """Sets the color for a given layer, if the layer is not a top layer the top layer store an overrides. :param color: string of new layer alias (name) :param layer_path: real path of layer :param model: StageModel """ super(SetLayerColor, self).__init__(model) self.layer_path = layer_path self.color = color self.old_color = '' self.model = model self.stage = model.stage @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.color = self.old_color else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_color = layer.get_color(local=True) layer.color = self.color else: self.old_color = layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText("Set {} color to {}".format(layer.filepath, self.color)) def _add_node_hierarchy(base_node_path, model, layer): stage = model.stage comp_layer = model.comp_layer new_node_paths = [] new_nodes = [] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer) for nn_p, n in new_node_table: display_node = comp_layer.lookup(nn_p) if display_node is not None: display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths += [nn_p] new_nodes += [n] return new_nodes, new_node_paths, dirty def undo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug("Undo " + cmd.text() + " | " + update_time + "ms") def redo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug(cmd.text() + " | " + update_time + "ms")
40.60095
80
0.597379
8,219
68,372
4.663341
0.054995
0.048424
0.032561
0.014559
0.621478
0.538588
0.458386
0.402004
0.334481
0.293937
0
0.001263
0.316928
68,372
1,683
81
40.625074
0.819412
0.071418
0
0.471858
0
0
0.018492
0
0
0
0
0.000594
0
1
0.089437
false
0.003084
0.009252
0
0.13724
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
168de834f7c08dea94c1b268f9213453f995fc3e
6,642
py
Python
mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py
libracore/mietrechtspraxis
7b2320a70b98b086be136a86b1ab4fadfce215ff
[ "MIT" ]
1
2021-07-15T13:25:23.000Z
2021-07-15T13:25:23.000Z
mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py
libracore/mietrechtspraxis
7b2320a70b98b086be136a86b1ab4fadfce215ff
[ "MIT" ]
1
2022-01-27T13:30:56.000Z
2022-01-27T13:30:56.000Z
mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py
libracore/mietrechtspraxis
7b2320a70b98b086be136a86b1ab4fadfce215ff
[ "MIT" ]
2
2021-08-14T22:23:08.000Z
2021-09-08T09:31:51.000Z
# -*- coding: utf-8 -*- # Copyright (c) 2021, libracore AG and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from datetime import datetime from PyPDF2 import PdfFileWriter from frappe.utils.file_manager import save_file class ArbitrationAuthority(Document): pass def _get_sb(**kwargs): ''' call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: - token - plz ''' # check that token is present try: token = kwargs['token'] except: # 400 Bad Request (Missing Token) return raise_4xx(400, 'Bad Request', 'Token Required') # check that token is correct if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'): # 401 Unauthorized (Invalid Token) return raise_4xx(401, 'Unauthorized', 'Invalid Token') # check that plz_city is present try: plz_city = kwargs['plz_city'] except: # 400 Bad Request (Missing PLZ/City) return raise_4xx(400, 'Bad Request', 'PLZ/City Required') answer = [] # lookup for plz city_results = frappe.db.sql(""" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `pincode` = '{plz_city}' ORDER BY `city` ASC """.format(plz_city=plz_city), as_dict=True) if len(city_results) < 1: # lookup for city city_results = frappe.db.sql(""" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `city` LIKE '%{plz_city}%' ORDER BY `city` ASC """.format(plz_city=plz_city), as_dict=True) if len(city_results) > 0: for city in city_results: data = {} data['plz'] = city.plz data['ort'] = city.city data['gemeinde'] = city.municipality data['bezirk'] = city.district data['kanton'] = city.canton data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(""" SELECT `schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage` FROM `tabArbitration Authority` AS `schlichtungsbehoerde` LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` = '{municipality}' """.format(municipality=city.municipality), as_dict=True) answer.append(data) if len(answer) > 0: return raise_200(answer) else: # 404 Not Found return raise_4xx(404, 'Not Found', 'No results') else: # 404 Not Found return raise_4xx(404, 'Not Found', 'No results') def get_informations(kanton): search = frappe.db.sql(""" SELECT `informationen`, `homepage`, `gesetzessammlung`, `formulare` FROM `tabKantonsinformationen` WHERE `kanton` = '{kanton}' """.format(kanton=kanton), as_dict=True) if len(search) > 0: result = search[0] else: result = {} return result def raise_4xx(code, title, message): # 4xx Bad Request / Unauthorized / Not Found return ['{code} {title}'.format(code=code, title=title), { "error": { "code": code, "message": "{message}".format(message=message) } }] def raise_200(answer): return ['200 OK', answer] @frappe.whitelist() def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead}) return def _get_sammel_pdf(no_letterhead=1): output = PdfFileWriter() schlichtungsbehoerden = frappe.db.sql("""SELECT `name` FROM `tabArbitration Authority`""", as_dict=True) for schlichtungsbehoerde in schlichtungsbehoerden: output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output = output, no_letterhead = no_letterhead) output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf = True, output = output, no_letterhead = no_letterhead) pdf = frappe.utils.pdf.get_file_data_from_writer(output) now = datetime.now() ts = "{0:04d}-{1:02d}-{2:02d}".format(now.year, now.month, now.day) file_name = "{0}_{1}.pdf".format('SB_Sammel-PDF', ts) save_file(file_name, pdf, '', '', is_private=1) return
43.986755
187
0.483439
556
6,642
5.643885
0.294964
0.028999
0.022307
0.027087
0.248885
0.227533
0.210325
0.192479
0.151689
0.123008
0
0.018499
0.422162
6,642
150
188
44.28
0.799114
0.075881
0
0.281818
0
0
0.531901
0.067738
0
0
0
0
0
1
0.054545
false
0.009091
0.054545
0.018182
0.218182
0.018182
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16904816a9bda6205128c0d91b67e3ab2be3d489
3,943
py
Python
src/commands/locate_item.py
seisatsu/DennisMUD-ESP32
b63d4b914c5e8d0f9714042997c64919b20be842
[ "MIT" ]
19
2018-10-02T03:58:46.000Z
2021-04-09T13:09:23.000Z
commands/locate_item.py
seisatsu/Dennis
8f1892f21beba6b21b4f7b9ba3062296bb1dc4b9
[ "MIT" ]
100
2018-09-22T22:54:35.000Z
2021-04-16T17:46:34.000Z
src/commands/locate_item.py
seisatsu/DennisMUD-ESP32
b63d4b914c5e8d0f9714042997c64919b20be842
[ "MIT" ]
1
2022-01-03T02:21:56.000Z
2022-01-03T02:21:56.000Z
####################### # Dennis MUD # # locate_item.py # # Copyright 2018-2020 # # Michael D. Reiley # ####################### # ********** # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # ********** NAME = "locate item" CATEGORIES = ["items"] ALIASES = ["find item"] USAGE = "locate item <item_id>" DESCRIPTION = """Find out what room the item <item_id> is in, or who is holding it. You can only locate an item that you own. Wizards can locate any item. Ex. `locate item 4`""" def COMMAND(console, args): # Perform initial checks. if not COMMON.check(NAME, console, args, argc=1): return False # Perform argument type checks and casts. itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0) if itemid is None: return False # Check if the item exists. thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=False) if not thisitem: return False # Keep track of whether we found anything in case the item is duplified and we can't return right away. found_something = False # Check if we are holding the item. if itemid in console.user["inventory"]: console.msg("{0}: {1} ({2}) is in your inventory.".format(NAME, thisitem["name"], thisitem["id"])) # If the item is duplified we need to keep looking for other copies. if not thisitem["duplified"]: return True found_something = True # Check if someone else is holding the item. for targetuser in console.database.users.all(): if targetuser["name"] == console.user["name"]: continue if itemid in targetuser["inventory"]: console.msg("{0}: {1} ({2}) is in the inventory of: {3}.".format(NAME, thisitem["name"], thisitem["id"], targetuser["name"])) # If the item is duplified we need to keep looking for other copies. if not thisitem["duplified"]: return True found_something = True # Check if the item is in a room. for targetroom in console.database.rooms.all(): if itemid in targetroom["items"]: console.msg("{0}: {1} ({2}) is in room: {3} ({4})".format(NAME, thisitem["name"], thisitem["id"], targetroom["name"], targetroom["id"])) # If the item is duplified we need to keep looking for other copies. if not thisitem["duplified"]: return True found_something = True # Couldn't find the item. if not found_something: console.log.error("Item exists but has no location: {item}", item=itemid) console.msg("{0}: ERROR: Item exists but has no location. Use `requisition` to fix this.".format(NAME)) return False # Finished. return True
41.072917
116
0.633274
530
3,943
4.692453
0.350943
0.028146
0.018094
0.028951
0.221954
0.221954
0.184158
0.152392
0.131484
0.131484
0
0.008562
0.259447
3,943
95
117
41.505263
0.843151
0.421506
0
0.318182
0
0
0.236902
0
0
0
0
0
0
1
0.022727
false
0
0
0
0.204545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16904f40b9743948ab5dc6a0d2f55015295bc2fd
2,787
py
Python
modelling/scsb/models/monthly-comparisons.py
bcgov-c/wally
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
[ "Apache-2.0" ]
null
null
null
modelling/scsb/models/monthly-comparisons.py
bcgov-c/wally
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
[ "Apache-2.0" ]
null
null
null
modelling/scsb/models/monthly-comparisons.py
bcgov-c/wally
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
[ "Apache-2.0" ]
null
null
null
import json import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error as MSE, r2_score import math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f: # data = json.load(f) all_zones_df = pd.read_csv("../data/scsb_all_zones.csv") zone_25_df = pd.read_csv("../data/scsb_zone_25.csv") zone_26_df = pd.read_csv("../data/scsb_zone_26.csv") zone_27_df = pd.read_csv("../data/scsb_zone_27.csv") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3] for x in month_dependant_variables] data = zone_26_df xgb_results = [] rfr_results = [] dtr_results = [] # calculate monthly estimations for 3 models for dependant_month in month_dependant_variables: features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X = features_df.drop([dependant_month], axis=1) y = features_df.get(dependant_month) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42) xgb = XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) # compare the outputs of scsb against the 3 models for row_target_index in range(20): xgb_row = [] rfr_row = [] dtr_row = [] for month in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution') name = data['name'].iloc[row_target_index] plt.title(name) plt.savefig('../plots/{}.png'.format(name)) plt.show()
38.708333
161
0.734482
421
2,787
4.584323
0.344418
0.027979
0.043523
0.022798
0.131088
0.045596
0.035751
0
0
0
0
0.021022
0.12953
2,787
71
162
39.253521
0.774526
0.077503
0
0
0
0
0.14809
0.049493
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1695439f6b89942d55b135dae20f140a0772199c
3,727
py
Python
shuffling_algorithm.py
BaptisteLafoux/aztec_tiling
413acd8751b8178942e91fbee32987f02bc5c695
[ "MIT" ]
null
null
null
shuffling_algorithm.py
BaptisteLafoux/aztec_tiling
413acd8751b8178942e91fbee32987f02bc5c695
[ "MIT" ]
null
null
null
shuffling_algorithm.py
BaptisteLafoux/aztec_tiling
413acd8751b8178942e91fbee32987f02bc5c695
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Dec 30 22:04:48 2020 @author: baptistelafoux """ import domino import numpy as np import numpy.lib.arraysetops as aso def spawn_block(x, y): if np.random.rand() > 0.5: d1 = domino.domino(np.array([x, y]), np.array([x + 1, y]), np.array([0,-1])) d2 = domino.domino(np.array([x, y + 1]), np.array([x + 1, y + 1]), np.array([0, 1])) else: d1 = domino.domino(np.array([x, y]), np.array([x, y + 1]), np.array([-1,0])) d2 = domino.domino(np.array([x + 1, y]), np.array([x + 1, y + 1]), np.array([ 1,0])) return [d1, d2] def aztec_grid(order, only_new_blocks = True): grid_X, grid_Y = np.meshgrid(np.arange(2 * order) - (2 * order - 1)/2 , np.arange(2 * order) - (2 * order - 1)/2) center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X = center_pts[:,0] Y = center_pts[:,1] if only_new_blocks: idx = (np.abs(X) + np.abs(Y) <= order) & (np.abs(X) + np.abs(Y) > order - 1) else: idx = np.abs(X) + np.abs(Y) <= order return X[idx], Y[idx] def add_to_grid(tiles, grid): for tile in tiles: grid[tile.pt1[0], tile.pt1[1]] = tile grid[tile.pt2[0], tile.pt2[1]] = tile return grid def generate_good_block(grid): center_pts = np.array([*grid]) center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))] X = center_pts[:, 0] Y = center_pts[:, 1] for (x,y) in zip(X,Y): try: if ~grid[x, y]: idx = [(x,y), (x+1,y), (x,y+1), (x+1,y+1)] try: should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype = bool) if should_create_a_block: grid = add_to_grid(spawn_block(x, y), grid) except: pass except: pass return grid def enlarge_grid_deprec(grid, order): center_pts = [*grid] X_aztec, Y_aztec = aztec_grid(order) center_pts_aztec = [tuple([x,y]) for (x,y) in zip(X_aztec, Y_aztec)] diff_array = set(center_pts_aztec) - set(center_pts) if order > 1: for x, y in list(diff_array): grid[x, y] = False else: for (x,y) in zip(X_aztec, Y_aztec): grid[x, y] = False return grid def enlarge_grid(grid, order): X_aztec, Y_aztec = aztec_grid(order, True) for (x,y) in zip(X_aztec, Y_aztec): grid[x, y] = False return grid def move_tiles(grid, curr_order): temp_grid = {} for coord in grid: if grid[coord] != False: x1, y1 = grid[coord].pt1 x2, y2 = grid[coord].pt2 grid[coord].move() temp_grid = add_to_grid([grid[coord]], temp_grid) grid[x1, y1] = False grid[x2, y2] = False for coord in temp_grid: grid[coord] = temp_grid[coord] return grid def destroy_bad_blocks(grid): center_pts = np.array([*grid]) X = center_pts[:, 0] Y = center_pts[:, 1] for (x,y) in zip(X,Y): try: next_x, next_y = np.array([x, y]) + grid[x, y].v if (grid[next_x, next_y] != False): if all(grid[next_x, next_y].v == - grid[x, y].v): grid[x, y ] = False grid[next_x, next_y] = False except: pass return grid
26.81295
117
0.499866
554
3,727
3.214801
0.18231
0.028074
0.040427
0.023582
0.480067
0.411005
0.332959
0.27288
0.205503
0.19315
0
0.032747
0.344513
3,727
138
118
27.007246
0.696275
0.027904
0
0.380952
0
0
0
0
0
0
0
0
0
1
0.095238
false
0.035714
0.035714
0
0.22619
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
169a6a92aa8a5f8b13f2ca7a2bc5a3d4390e96a9
6,363
py
Python
quantum/plugins/nicira/extensions/nvp_qos.py
yamt/neutron
f94126739a48993efaf1d1439dcd3dadb0c69742
[ "Apache-2.0" ]
null
null
null
quantum/plugins/nicira/extensions/nvp_qos.py
yamt/neutron
f94126739a48993efaf1d1439dcd3dadb0c69742
[ "Apache-2.0" ]
null
null
null
quantum/plugins/nicira/extensions/nvp_qos.py
yamt/neutron
f94126739a48993efaf1d1439dcd3dadb0c69742
[ "Apache-2.0" ]
null
null
null
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Nicira, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Aaron Rosen, Nicira Networks, Inc. from abc import abstractmethod from quantum.api import extensions from quantum.api.v2 import attributes as attr from quantum.api.v2 import base from quantum.common import exceptions as qexception from quantum import manager # For policy.json/Auth qos_queue_create = "create_qos_queue" qos_queue_delete = "delete_qos_queue" qos_queue_get = "get_qos_queue" qos_queue_list = "get_qos_queues" class DefaultQueueCreateNotAdmin(qexception.InUse): message = _("Need to be admin in order to create queue called default") class DefaultQueueAlreadyExists(qexception.InUse): message = _("Default queue already exists.") class QueueInvalidDscp(qexception.InvalidInput): message = _("Invalid value for dscp %(data)s must be integer.") class QueueMinGreaterMax(qexception.InvalidInput): message = _("Invalid bandwidth rate, min greater than max.") class QueueInvalidBandwidth(qexception.InvalidInput): message = _("Invalid bandwidth rate, %(data)s must be a non negative" " integer.") class MissingDSCPForTrusted(qexception.InvalidInput): message = _("No DSCP field needed when QoS workload marked trusted") class QueueNotFound(qexception.NotFound): message = _("Queue %(id)s does not exist") class QueueInUseByPort(qexception.InUse): message = _("Unable to delete queue attached to port.") class QueuePortBindingNotFound(qexception.NotFound): message = _("Port is not associated with lqueue") def convert_to_unsigned_int_or_none(val): if val is None: return try: val = int(val) if val < 0: raise ValueError except (ValueError, TypeError): msg = _("'%s' must be a non negative integer.") % val raise qexception.InvalidInput(error_message=msg) return val # Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default': {'allow_post': True, 'allow_put': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'default': False}, 'name': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'min': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible': True}, 'dscp': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, }, } QUEUE = 'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { RXTX_FACTOR: {'allow_post': True, 'allow_put': False, 'is_visible': False, 'default': 1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': False}}, 'networks': {QUEUE: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': False}} } class Nvp_qos(object): """Port Queue extension.""" @classmethod def get_name(cls): return "nvp-qos" @classmethod def get_alias(cls): return "nvp-qos" @classmethod def get_description(cls): return "NVP QoS extension." @classmethod def get_namespace(cls): return "http://docs.openstack.org/ext/nvp-qos/api/v2.0" @classmethod def get_updated(cls): return "2012-10-05T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] plugin = manager.QuantumManager.get_plugin() resource_name = 'qos_queue' collection_name = resource_name.replace('_', '-') + "s" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def get_extended_resources(self, version): if version == "2.0": return dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items()) else: return {} class QueuePluginBase(object): @abstractmethod def create_qos_queue(self, context, queue): pass @abstractmethod def delete_qos_queue(self, context, id): pass @abstractmethod def get_qos_queue(self, context, id, fields=None): pass @abstractmethod def get_qos_queues(self, context, filters=None, fields=None): pass
31.191176
78
0.610718
715
6,363
5.236364
0.317483
0.023504
0.034722
0.043269
0.279915
0.223024
0.176015
0.129541
0.081197
0.081197
0
0.009395
0.280685
6,363
203
79
31.344828
0.808608
0.120069
0
0.222222
0
0
0.220646
0.004488
0
0
0
0
0
1
0.088889
false
0.02963
0.044444
0.037037
0.355556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
169c6caecdf841a261ae5cbf1ce633a03edb8b3a
2,532
py
Python
tests/unit/concurrently/test_TaskPackageDropbox_put.py
shane-breeze/AlphaTwirl
59dbd5348af31d02e133d43fd5bfaad6b99a155e
[ "BSD-3-Clause" ]
null
null
null
tests/unit/concurrently/test_TaskPackageDropbox_put.py
shane-breeze/AlphaTwirl
59dbd5348af31d02e133d43fd5bfaad6b99a155e
[ "BSD-3-Clause" ]
null
null
null
tests/unit/concurrently/test_TaskPackageDropbox_put.py
shane-breeze/AlphaTwirl
59dbd5348af31d02e133d43fd5bfaad6b99a155e
[ "BSD-3-Clause" ]
null
null
null
# Tai Sakuma <tai.sakuma@gmail.com> import pytest try: import unittest.mock as mock except ImportError: import mock from alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture() def workingarea(): return mock.MagicMock() @pytest.fixture() def dispatcher(): return mock.MagicMock() @pytest.fixture() def obj(workingarea, dispatcher): ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) ret.open() yield ret ret.close() ##__________________________________________________________________|| def test_repr(obj): repr(obj) def test_open_terminate_close(workingarea, dispatcher): obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert 0 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.open() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.terminate() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count obj.close() assert 1 == workingarea.open.call_count assert 1 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count def test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run.side_effect = [1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert 0 == obj.put(package0) assert 1 == obj.put(package1) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list def test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run_multiple.return_value = [1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert [0, 1] == obj.put_multiple([package0, package1]) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, [0, 1])] == dispatcher.run_multiple.call_args_list ##__________________________________________________________________||
30.878049
98
0.742496
292
2,532
5.630137
0.181507
0.065693
0.072993
0.048662
0.689173
0.687348
0.644769
0.622263
0.539538
0.539538
0
0.02869
0.146524
2,532
81
99
31.259259
0.732068
0.10387
0
0.425926
0
0
0.014178
0
0
0
0
0
0.351852
1
0.12963
false
0
0.092593
0.037037
0.259259
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
169dfe6f123a1bb92dcedefda60fdcdf0dde5b42
3,497
py
Python
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py
gokarslan/networking-odl2
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
[ "Apache-2.0" ]
null
null
null
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py
gokarslan/networking-odl2
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
[ "Apache-2.0" ]
null
null
null
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py
gokarslan/networking-odl2
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from networking_odl.common import constants as odl_const from networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config import cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port = plugin.get_port(data['context'], port_id) port['fixed_ips'] = [] ports = {'port': port} plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port_id)
40.662791
78
0.637975
420
3,497
5.030952
0.321429
0.055372
0.032182
0.055372
0.347847
0.308093
0.308093
0.274965
0.274965
0.274965
0
0.012638
0.275951
3,497
85
79
41.141176
0.82188
0.174435
0
0.388889
0
0
0.084262
0.009749
0
0
0
0
0.092593
1
0.092593
false
0
0.12963
0
0.240741
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16a0f5c79d486ed958f66a4f801398499c8d9ff1
3,389
py
Python
premium/backend/src/baserow_premium/api/admin/dashboard/views.py
cjh0613/baserow
62871f5bf53c9d25446976031aacb706c0abe584
[ "MIT" ]
839
2020-07-20T13:29:34.000Z
2022-03-31T21:09:16.000Z
premium/backend/src/baserow_premium/api/admin/dashboard/views.py
cjh0613/baserow
62871f5bf53c9d25446976031aacb706c0abe584
[ "MIT" ]
28
2020-08-07T09:23:58.000Z
2022-03-01T22:32:40.000Z
premium/backend/src/baserow_premium/api/admin/dashboard/views.py
cjh0613/baserow
62871f5bf53c9d25446976031aacb706c0abe584
[ "MIT" ]
79
2020-08-04T01:48:01.000Z
2022-03-27T13:30:54.000Z
from datetime import timedelta from django.contrib.auth import get_user_model from drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.permissions import IsAdminUser from rest_framework.views import APIView from baserow.api.decorators import accept_timezone from baserow.core.models import Group, Application from baserow_premium.admin.dashboard.handler import AdminDashboardHandler from .serializers import AdminDashboardSerializer User = get_user_model() class AdminDashboardView(APIView): permission_classes = (IsAdminUser,) @extend_schema( tags=["Admin"], operation_id="admin_dashboard", description="Returns the new and active users for the last 24 hours, 7 days and" " 30 days. The `previous_` values are the values of the period before, so for " "example `previous_new_users_last_24_hours` are the new users that signed up " "from 48 to 24 hours ago. It can be used to calculate an increase or decrease " "in the amount of signups. A list of the new and active users for every day " "for the last 30 days is also included.\n\nThis is a **premium** feature.", responses={ 200: AdminDashboardSerializer, 401: None, }, ) @accept_timezone() def get(self, request, now): """ Returns the new and active users for the last 24 hours, 7 days and 30 days. The `previous_` values are the values of the period before, so for example `previous_new_users_last_24_hours` are the new users that signed up from 48 to 24 hours ago. It can be used to calculate an increase or decrease in the amount of signups. A list of the new and active users for every day for the last 30 days is also included. """ handler = AdminDashboardHandler() total_users = User.objects.filter(is_active=True).count() total_groups = Group.objects.all().count() total_applications = Application.objects.all().count() new_users = handler.get_new_user_counts( { "new_users_last_24_hours": timedelta(hours=24), "new_users_last_7_days": timedelta(days=7), "new_users_last_30_days": timedelta(days=30), }, include_previous=True, ) active_users = handler.get_active_user_count( { "active_users_last_24_hours": timedelta(hours=24), "active_users_last_7_days": timedelta(days=7), "active_users_last_30_days": timedelta(days=30), }, include_previous=True, ) new_users_per_day = handler.get_new_user_count_per_day( timedelta(days=30), now=now ) active_users_per_day = handler.get_active_user_count_per_day( timedelta(days=30), now=now ) serializer = AdminDashboardSerializer( { "total_users": total_users, "total_groups": total_groups, "total_applications": total_applications, "new_users_per_day": new_users_per_day, "active_users_per_day": active_users_per_day, **new_users, **active_users, } ) return Response(serializer.data)
38.078652
88
0.649159
425
3,389
4.936471
0.275294
0.062917
0.031459
0.028599
0.50858
0.461868
0.461868
0.383222
0.383222
0.348904
0
0.022941
0.279729
3,389
88
89
38.511364
0.836542
0.120685
0
0.059701
0
0
0.233882
0.060014
0
0
0
0
0
1
0.014925
false
0
0.149254
0
0.208955
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16a20512bd62fea83ee40c49a4b7cc5fa386ce48
969
py
Python
src/clientOld.py
dan3612812/socketChatRoom
b0d548477687de2d9fd521826db9ea75e528de5c
[ "MIT" ]
null
null
null
src/clientOld.py
dan3612812/socketChatRoom
b0d548477687de2d9fd521826db9ea75e528de5c
[ "MIT" ]
null
null
null
src/clientOld.py
dan3612812/socketChatRoom
b0d548477687de2d9fd521826db9ea75e528de5c
[ "MIT" ]
null
null
null
# -*- coding: UTF-8 -*- import sys import socket import time import threading import select HOST = '192.168.11.98' PORT = int(sys.argv[1]) queue = [] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s) print("add client to queue") def socketRecv(): while True: data = s.recv(1024).decode("utf-8") print(data) time.sleep(0.1) def inputJob(): while True: data = input() s.send(bytes(data, "utf-8")) time.sleep(0.1) socketThread = threading.Thread(target=socketRecv) socketThread.start() # inputThread = Thread(target=inputJob) # inputThread.start() try: while True: data = input() s.send(bytes(data, "utf-8")) time.sleep(0.1) except KeyboardInterrupt or EOFError: print("in except") # s.close() # 關閉連線 socketThread.do_run = False # socketThread.join() # inputThread.join() print("close thread") sys.exit(0)
19.38
53
0.627451
130
969
4.653846
0.476923
0.026446
0.064463
0.054545
0.155372
0.155372
0.155372
0.155372
0.155372
0.155372
0
0.034483
0.221878
969
49
54
19.77551
0.767905
0.139319
0
0.294118
0
0
0.082324
0
0
0
0
0
0
1
0.058824
false
0
0.147059
0
0.205882
0.117647
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16a3072f25578896e1189f9fac5976e0586e6b47
6,369
py
Python
demo_large_image.py
gunlyungyou/AerialDetection
a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26
[ "Apache-2.0" ]
9
2020-10-08T19:51:17.000Z
2022-02-16T12:58:01.000Z
demo_large_image.py
gunlyungyou/AerialDetection
a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26
[ "Apache-2.0" ]
null
null
null
demo_large_image.py
gunlyungyou/AerialDetection
a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26
[ "Apache-2.0" ]
8
2020-09-25T14:47:55.000Z
2022-02-16T12:31:13.000Z
from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections import mmcv from mmcv import Config from mmdet.datasets import get_dataset import cv2 import os import numpy as np from tqdm import tqdm import DOTA_devkit.polyiou as polyiou import math import pdb CLASS_NAMES_KR = ('소형 선박', '대형 선박', '민간 항공기', '군용 항공기', '소형 승용차', '버스', '트럭', '기차', '크레인', '다리', '정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로') CLASS_NAMES_EN = ('small ship', 'large ship', 'civil airplane', 'military airplane', 'small car', 'bus', 'truck', 'train', 'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout') CLASS_MAP = {k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets, thresh): obbs = dets[:, 0:-1] x1 = np.min(obbs[:, 0::2], axis=1) y1 = np.min(obbs[:, 1::2], axis=1) x2 = np.max(obbs[:, 0::2], axis=1) y2 = np.max(obbs[:, 1::2], axis=1) scores = dets[:, 8] areas = (x2 - x1 + 1) * (y2 - y1 + 1) polys = [] for i in range(len(dets)): tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4], dets[i][5], dets[i][6], dets[i][7]]) polys.append(tm_polygon) order = scores.argsort()[::-1] keep = [] while order.size > 0: ovr = [] i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0, yy2 - yy1) hbb_inter = w * h hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter) h_inds = np.where(hbb_ovr > 0)[0] tmp_order = order[h_inds + 1] for j in range(tmp_order.size): iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]] = iou try: if math.isnan(ovr[0]): pdb.set_trace() except: pass inds = np.where(hbb_ovr <= thresh)[0] order = order[inds + 1] return keep class DetectorModel(): def __init__(self, config_file, checkpoint_file): # init RoITransformer self.config_file = config_file self.checkpoint_file = checkpoint_file self.cfg = Config.fromfile(self.config_file) self.data_test = self.cfg.data['test'] self.dataset = get_dataset(self.data_test) self.classnames = self.dataset.CLASSES self.model = init_detector(config_file, checkpoint_file, device='cuda:0') def inference_single(self, imagname, slide_size, chip_size): img = mmcv.imread(imagname) height, width, channel = img.shape slide_h, slide_w = slide_size hn, wn = chip_size # TODO: check the corner case # import pdb; pdb.set_trace() total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))] for i in tqdm(range(int(width / slide_w + 1))): for j in range(int(height / slide_h) + 1): subimg = np.zeros((hn, wn, channel)) # print('i: ', i, 'j: ', j) chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3] subimg[:chip.shape[0], :chip.shape[1], :] = chip chip_detections = inference_detector(self.model, subimg) # print('result: ', result) for cls_id, name in enumerate(self.classnames): chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h # import pdb;pdb.set_trace() try: total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except: import pdb; pdb.set_trace() # nms for i in range(len(self.classnames)): keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i] = total_detections[i][keep] return total_detections def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size): detections = self.inference_single(srcpath, slide_size, chip_size) classnames = [cls if cls not in CLASS_MAP else CLASS_MAP[cls] for cls in self.classnames] img = draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3) cv2.imwrite(dstpath, img) if __name__ == '__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import glob roksis = glob('data/roksi2020/val/images/*.png') #target = roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo', out), # (512, 512), # (1024, 1024)) for target in roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512, 512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg', # (512, 512), # (1024, 1024))
43.326531
122
0.551892
830
6,369
4.028916
0.26988
0.011962
0.0314
0.028409
0.261065
0.183612
0.120215
0.052033
0.052033
0
0
0.044151
0.310096
6,369
146
123
43.623288
0.716887
0.151358
0
0.036697
0
0
0.071163
0.027685
0
0
0
0.006849
0
1
0.036697
false
0.009174
0.119266
0
0.183486
0.009174
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16a410bbf9dbba9b62a772c35376b67270885de8
3,981
py
Python
scripts/map_frame_to_utm_tf_publisher.py
coincar-sim/lanelet2_interface_ros
f1738766dd323ed64a4ebcc8254438920a587b80
[ "BSD-3-Clause" ]
7
2019-03-27T03:59:50.000Z
2021-10-17T10:46:29.000Z
scripts/map_frame_to_utm_tf_publisher.py
coincar-sim/lanelet2_interface_ros
f1738766dd323ed64a4ebcc8254438920a587b80
[ "BSD-3-Clause" ]
6
2019-04-13T15:55:55.000Z
2021-06-01T21:08:18.000Z
scripts/map_frame_to_utm_tf_publisher.py
coincar-sim/lanelet2_interface_ros
f1738766dd323ed64a4ebcc8254438920a587b80
[ "BSD-3-Clause" ]
4
2021-03-25T09:22:55.000Z
2022-03-22T05:40:49.000Z
#!/usr/bin/env python # # Copyright (c) 2018 # FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de) # KIT, Institute of Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import roslib import rospy import tf import tf2_ros import geometry_msgs.msg import lanelet2 stb = None static_transform = None lat_origin = None lon_origin = None map_frame_id = None actual_utm_with_no_offset_frame_id = None def timer_callback(event): global stb, static_transform static_transform.header.stamp = rospy.Time.now() stb.sendTransform(static_transform) def wait_for_params_successful(): global lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id for i in range(3000): try: lat_origin = float(rospy.get_param("/lanelet2_interface_ros/lat_origin")) lon_origin = float(rospy.get_param("/lanelet2_interface_ros/lon_origin")) map_frame_id = rospy.get_param("/lanelet2_interface_ros/map_frame_id") actual_utm_with_no_offset_frame_id = rospy.get_param( "/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id") except Exception: rospy.sleep(0.01) continue return True return False if __name__ == '__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if not wait_for_params_successful(): rospy.logerr("map_frame_to_utm_tf_publisher: Could not initialize") exit() origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin) projector = lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False, False) origin_xy = projector.forward(origin_latlon) stb = tf2_ros.TransformBroadcaster() static_transform = geometry_msgs.msg.TransformStamped() static_transform.header.stamp = rospy.Time.now() static_transform.header.frame_id = map_frame_id static_transform.child_frame_id = actual_utm_with_no_offset_frame_id static_transform.transform.translation.x = -origin_xy.x static_transform.transform.translation.y = -origin_xy.y static_transform.transform.translation.z = 0.0 q = tf.transformations.quaternion_from_euler(0, 0, 0) static_transform.transform.rotation.x = q[0] static_transform.transform.rotation.y = q[1] static_transform.transform.rotation.z = q[2] static_transform.transform.rotation.w = q[3] rospy.Timer(rospy.Duration(1.), timer_callback) rospy.spin()
38.278846
85
0.757096
557
3,981
5.197487
0.40395
0.07772
0.058031
0.025907
0.249741
0.203109
0.186529
0.140933
0.08532
0.07323
0
0.010312
0.171816
3,981
103
86
38.650485
0.867759
0.413213
0
0.037037
0
0
0.108413
0.095837
0
0
0
0
0
1
0.037037
false
0
0.111111
0
0.185185
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16a762cb2b4ddc4c0f253e56da58680346091ea8
7,879
py
Python
applications/FluidDynamicsApplication/tests/sod_shock_tube_test.py
Rodrigo-Flo/Kratos
f718cae5d1618e9c0e7ed1da9e95b7a853e62b1b
[ "BSD-4-Clause" ]
null
null
null
applications/FluidDynamicsApplication/tests/sod_shock_tube_test.py
Rodrigo-Flo/Kratos
f718cae5d1618e9c0e7ed1da9e95b7a853e62b1b
[ "BSD-4-Clause" ]
null
null
null
applications/FluidDynamicsApplication/tests/sod_shock_tube_test.py
Rodrigo-Flo/Kratos
f718cae5d1618e9c0e7ed1da9e95b7a853e62b1b
[ "BSD-4-Clause" ]
null
null
null
# Import kratos core and applications import KratosMultiphysics import KratosMultiphysics.KratosUnittest as KratosUnittest import KratosMultiphysics.kratos_utilities as KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis class SodShockTubeTest(KratosUnittest.TestCase): def testSodShockTubeExplicitASGS(self): self.solver_type = "CompressibleExplicit" self.use_oss = False self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type = "CompressibleExplicit" self.use_oss = False self.shock_capturing = True self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSS(self): self.solver_type = "CompressibleExplicit" self.use_oss = True self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type = "CompressibleExplicit" self.use_oss = True self.shock_capturing = True self._CustomizeSimulationSettings() def setUp(self): self.print_output = False self.print_reference_values = False self.check_absolute_tolerance = 1.0e-8 self.check_relative_tolerance = 1.0e-10 self.work_folder = "sod_shock_tube_test" settings_filename = "ProjectParameters.json" # Read the simulation settings with KratosUnittest.WorkFolderScope(self.work_folder,__file__): with open(settings_filename,'r') as parameter_file: self.parameters = KratosMultiphysics.Parameters(parameter_file.read()) def runTest(self): # If required, add the output process to the test settings if self.print_output: self._AddOutput() # If required, add the reference values output process to the test settings if self.print_reference_values: self._AddReferenceValuesOutput() else: self._AddReferenceValuesCheck() # Create the test simulation with KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model = KratosMultiphysics.Model() simulation = FluidDynamicsAnalysis(self.model, self.parameters) simulation.Run() def tearDown(self): with KratosUnittest.WorkFolderScope(self.work_folder, __file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def _CustomizeSimulationSettings(self): # Customize simulation settings self.parameters["solver_settings"]["solver_type"].SetString(self.solver_type) self.parameters["solver_settings"]["use_oss"].SetBool(self.use_oss) self.parameters["solver_settings"]["shock_capturing"].SetBool(self.shock_capturing) def _AddOutput(self): gid_output_settings = KratosMultiphysics.Parameters("""{ "python_module" : "gid_output_process", "kratos_module" : "KratosMultiphysics", "process_name" : "GiDOutputProcess", "help" : "This process writes postprocessing files for GiD", "Parameters" : { "model_part_name" : "FluidModelPart", "output_name" : "TO_BE_DEFINED", "postprocess_parameters" : { "result_file_configuration" : { "gidpost_flags" : { "GiDPostMode" : "GiD_PostBinary", "WriteDeformedMeshFlag" : "WriteDeformed", "WriteConditionsFlag" : "WriteConditions", "MultiFileFlag" : "SingleFile" }, "file_label" : "step", "output_control_type" : "step", "output_frequency" : 1.0, "body_output" : true, "node_output" : false, "skin_output" : false, "plane_output" : [], "nodal_results" : ["DENSITY","MOMENTUM","TOTAL_ENERGY"], "gauss_point_results" : ["SHOCK_SENSOR","THERMAL_SENSOR","SHEAR_SENSOR"], "nodal_nonhistorical_results" : ["ARTIFICIAL_BULK_VISCOSITY","ARTIFICIAL_CONDUCTIVITY","ARTIFICIAL_DYNAMIC_VISCOSITY"] }, "point_data_configuration" : [] } } }""") output_name = "sod_shock_tube{0}{1}{2}".format( "_explicit" if self.solver_type == "CompressibleExplicit" else "_implicit", "_ASGS" if self.use_oss == False else "_OSS", "_SC" if self.shock_capturing else "") gid_output_settings["Parameters"]["output_name"].SetString(output_name) self.parameters["output_processes"]["gid_output"].Append(gid_output_settings) def _AddReferenceValuesOutput(self): json_output_settings = KratosMultiphysics.Parameters("""{ "python_module" : "json_output_process", "kratos_module" : "KratosMultiphysics", "process_name" : "JsonOutputProcess", "Parameters" : { "output_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"], "output_file_name" : "TO_BE_DEFINED", "model_part_name" : "FluidModelPart.FluidParts_Fluid", "time_frequency" : 0.025 } }""") output_file_name = "sod_shock_tube{0}{1}{2}_results.json".format( "_explicit" if self.solver_type == "CompressibleExplicit" else "_implicit", "_ASGS" if self.use_oss == False else "_OSS", "_SC" if self.shock_capturing else "") json_output_settings["Parameters"]["output_file_name"].SetString(output_file_name) self.parameters["processes"]["json_check_process_list"].Append(json_output_settings) def _AddReferenceValuesCheck(self): json_check_settings = KratosMultiphysics.Parameters("""{ "python_module" : "from_json_check_result_process", "kratos_module" : "KratosMultiphysics", "process_name" : "FromJsonCheckResultProcess", "Parameters" : { "check_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"], "input_file_name" : "TO_BE_DEFINED", "model_part_name" : "FluidModelPart.FluidParts_Fluid", "tolerance" : 0.0, "relative_tolerance" : 0.0, "time_frequency" : 0.025 } }""") input_file_name = "sod_shock_tube{0}{1}{2}_results.json".format( "_explicit" if self.solver_type == "CompressibleExplicit" else "_implicit", "_ASGS" if self.use_oss == False else "_OSS", "_SC" if self.shock_capturing else "") json_check_settings["Parameters"]["input_file_name"].SetString(input_file_name) json_check_settings["Parameters"]["tolerance"].SetDouble(self.check_absolute_tolerance) json_check_settings["Parameters"]["relative_tolerance"].SetDouble(self.check_relative_tolerance) self.parameters["processes"]["json_check_process_list"].Append(json_check_settings) if __name__ == '__main__': test = SodShockTubeTest() test.setUp() # test.testSodShockTubeExplicitASGS() test.testSodShockTubeExplicitASGSShockCapturing() # test.testSodShockTubeExplicitOSS() # test.testSodShockTubeExplicitOSSShockCapturing() test.runTest() test.tearDown()
48.635802
142
0.615053
684
7,879
6.736842
0.236842
0.014323
0.024306
0.051649
0.378472
0.368056
0.334201
0.240017
0.220486
0.179688
0
0.00534
0.286965
7,879
161
143
48.937888
0.814881
0.047214
0
0.29927
0
0
0.469655
0.120181
0
0
0
0
0
1
0.080292
false
0
0.029197
0
0.116788
0.029197
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16aafc257a8e2aae93d3cae037dc8cf239e63a42
20,180
py
Python
lib/aws_sso_lib/assignments.py
vdesjardins/aws-sso-util
bf092a21674e8286c4445df7f4aae8ad061444ca
[ "Apache-2.0" ]
330
2020-11-11T15:53:22.000Z
2022-03-30T06:45:57.000Z
lib/aws_sso_lib/assignments.py
vdesjardins/aws-sso-util
bf092a21674e8286c4445df7f4aae8ad061444ca
[ "Apache-2.0" ]
47
2020-11-11T01:32:29.000Z
2022-03-30T01:33:28.000Z
lib/aws_sso_lib/assignments.py
vdesjardins/aws-sso-util
bf092a21674e8286c4445df7f4aae8ad061444ca
[ "Apache-2.0" ]
23
2020-11-25T14:12:37.000Z
2022-03-30T02:16:26.000Z
import re import numbers import collections import logging from collections.abc import Iterable import itertools import aws_error_utils from .lookup import Ids, lookup_accounts_for_ou from .format import format_account_id LOGGER = logging.getLogger(__name__) _Context = collections.namedtuple("_Context", [ "session", "ids", "principal", "principal_filter", "permission_set", "permission_set_filter", "target", "target_filter", "get_principal_names", "get_permission_set_names", "get_target_names", "ou_recursive", "cache", "filter_cache" ]) def _filter(filter_cache, key, func, args): if not func: return True if key not in filter_cache: filter_cache[key] = func(*args) return filter_cache[key] def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) def _is_principal_tuple(principal): try: return all([ len(principal) == 2, isinstance(principal[0], str), principal[0] in ["GROUP", "USER"], isinstance(principal[1], str), ]) except: return False def _process_principal(principal): if not principal: return None if isinstance(principal, str): return [(None, principal)] if _is_principal_tuple(principal): return [tuple(principal)] else: return _flatten(_process_principal(p) for p in principal) def _process_permission_set(ids, permission_set): if not permission_set: return None if not isinstance(permission_set, str) and isinstance(permission_set, Iterable): return _flatten(_process_permission_set(ids, ps) for ps in permission_set) if permission_set.startswith("arn"): permission_set_arn = permission_set elif permission_set.startswith("ssoins-") or permission_set.startswith("ins-"): permission_set_arn = f"arn:aws:sso:::permissionSet/{permission_set}" elif permission_set.startswith("ps-"): permission_set_arn = f"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}" else: raise TypeError(f"Invalid permission set id {permission_set}") return [permission_set_arn] def _is_target_tuple(target): try: return all([ len(target) == 2, isinstance(target[0], str), target[0] in ["AWS_OU", "AWS_ACCOUNT"], isinstance(target[1], str), ]) except: return False def _process_target(target): if not target: return None if isinstance(target, numbers.Number): return [("AWS_ACCOUNT", format_account_id(target))] if isinstance(target, str): if re.match(r"^\d+$", target): return [("AWS_ACCOUNT", format_account_id(target))] elif re.match(r"^r-[a-z0-9]{4,32}$", target) or re.match(r"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$", target): return [("AWS_OU", target)] else: raise TypeError(f"Invalid target {target}") elif _is_target_tuple(target): target_type, target_id = target if target_type not in ["AWS_ACCOUNT", "AWS_OU"]: raise TypeError(f"Invalid target type {target_type}") return [(target_type, target_id)] else: value = _flatten(_process_target(t) for t in target) return value def _get_account_iterator(target, context: _Context): def target_iterator(): target_name = None if context.get_target_names: organizations_client = context.session.client("organizations") account = organizations_client.describe_account(AccountId=target[1])["Account"] if account.get("Name"): target_name = account["Name"] value = (*target, target_name) if not _filter(context.filter_cache, value[1], context.target_filter, value): LOGGER.debug(f"Account is filtered: {value}") else: LOGGER.debug(f"Visiting single account: {value}") yield value return target_iterator def _get_ou_iterator(target, context: _Context): def target_iterator(): target_name = None # if context.get_target_names: # organizations_client = context.session.client("organizations") # ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])["OrganizationalUnit"] # if ou.get("Name"): # target_name = ou("Name") value = (*target, target_name) accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive) for account in accounts: yield "AWS_ACCOUNT", account["Id"], account["Name"] return target_iterator def _get_single_target_iterator(target, context: _Context): target_type = target[0] if target_type == "AWS_ACCOUNT": return _get_account_iterator(target, context) elif target_type == "AWS_OU": return _get_ou_iterator(target, context) else: raise TypeError(f"Invalid target type {target_type}") def _get_all_accounts_iterator(context: _Context): def target_iterator(): organizations_client = context.session.client("organizations") accounts_paginator = organizations_client.get_paginator("list_accounts") for response in accounts_paginator.paginate(): LOGGER.debug(f"ListAccounts page: {response}") for account in response["Accounts"]: account_id = account["Id"] account_name = account["Name"] value = ("AWS_ACCOUNT", account_id, account_name) if not _filter(context.filter_cache, account_id, context.target_filter, value): LOGGER.debug(f"Account is filtered: {value}") continue LOGGER.debug(f"Visiting account: {value}") yield value return target_iterator def _get_target_iterator(context: _Context): if context.target: iterables = [_get_single_target_iterator(t, context) for t in context.target] def target_iterator(): return itertools.chain(*[it() for it in iterables]) return target_iterator else: LOGGER.debug(f"Iterating for all accounts") return _get_all_accounts_iterator(context) def _get_single_permission_set_iterator(permission_set, context: _Context): permission_set_arn = permission_set permission_set_id = permission_set_arn.split("/")[-1] def permission_set_iterator(target_type, target_id, target_name): if not context.get_permission_set_names: permission_set_name = None else: sso_admin_client = context.session.client("sso-admin") response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f"DescribePermissionSet response: {response}") permission_set_name = response["PermissionSet"]["Name"] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f"Single permission set is filtered: {(permission_set_id, permission_set_name)}") else: LOGGER.debug(f"Visiting single permission set {(permission_set_id, permission_set_name)}") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_all_permission_sets_iterator(context: _Context): def permission_set_iterator(target_type, target_id, target_name): if target_type != "AWS_ACCOUNT": raise TypeError(f"Unsupported target type {target_type}") sso_admin_client = context.session.client("sso-admin") permission_sets_paginator = sso_admin_client.get_paginator("list_permission_sets_provisioned_to_account") for response in permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id): LOGGER.debug(f"ListPermissionSetsProvisionedToAccount {target_id} page: {response}") if "PermissionSets" not in response: continue for permission_set_arn in response["PermissionSets"]: permission_set_id = permission_set_arn.split("/", 2)[-1] if not context.get_permission_set_names: permission_set_name = None else: if permission_set_arn not in context.cache: response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f"DescribePermissionSet response: {response}") context.cache[permission_set_arn] = response["PermissionSet"]["Name"] permission_set_name = context.cache[permission_set_arn] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f"Permission set is filtered: {(permission_set_id, permission_set_name)}") continue LOGGER.debug(f"Visiting permission set: {(permission_set_id, permission_set_name)}") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_permission_set_iterator(context: _Context): if context.permission_set: iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set] def permission_set_iterator(target_type, target_id, target_name): return itertools.chain(*[it(target_type, target_id, target_name) for it in iterables]) return permission_set_iterator else: LOGGER.debug("Iterating for all permission sets") return _get_all_permission_sets_iterator(context) def _get_principal_iterator(context: _Context): def principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): if target_type != "AWS_ACCOUNT": raise TypeError(f"Unsupported target type {target_type}") sso_admin_client = context.session.client("sso-admin") identity_store_client = context.session.client("identitystore") assignments_paginator = sso_admin_client.get_paginator("list_account_assignments") for response in assignments_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id, PermissionSetArn=permission_set_arn): LOGGER.debug(f"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}") if not response["AccountAssignments"] and not "NextToken" in response: LOGGER.debug(f"No assignments for {target_id} {permission_set_arn.split('/')[-1]}") for assignment in response["AccountAssignments"]: principal_type = assignment["PrincipalType"] principal_id = assignment["PrincipalId"] LOGGER.debug(f"Visiting principal {principal_type}:{principal_id}") if context.principal: for principal in context.principal: type_matches = (principal[0] is None or principal[0] != principal_type) if type_matches and principal[1] == principal_id: LOGGER.debug(f"Found principal {principal_type}:{principal_id}") break else: LOGGER.debug(f"Principal {principal_type}:{principal_id} does not match principals") continue principal_key = (principal_type, principal_id) if not context.get_principal_names: principal_name = None else: if principal_key not in context.cache: if principal_type == "GROUP": try: response = identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id ) LOGGER.debug(f"DescribeGroup response: {response}") context.cache[principal_key] = response["DisplayName"] except aws_error_utils.catch_aws_error("ResourceNotFoundException"): context.cache[principal_key] = None elif principal_type == "USER": try: response = identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id, UserId=principal_id ) LOGGER.debug(f"DescribeUser response: {response}") context.cache[principal_key] = response["UserName"] except aws_error_utils.catch_aws_error("ResourceNotFoundException"): context.cache[principal_key] = None else: raise ValueError(f"Unknown principal type {principal_type}") principal_name = context.cache[principal_key] if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)): if context.principal: LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}") else: LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}") continue LOGGER.debug(f"Visiting principal: {principal_type}:{principal_id}") yield principal_type, principal_id, principal_name return principal_iterator Assignment = collections.namedtuple("Assignment", [ "instance_arn", "principal_type", "principal_id", "principal_name", "permission_set_arn", "permission_set_name", "target_type", "target_id", "target_name", ]) def list_assignments( session, instance_arn=None, identity_store_id=None, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): """Iterate over AWS SSO assignments. Args: session (boto3.Session): boto3 session to use instance_arn (str): The SSO instance to use, or it will be looked up using ListInstances identity_store_id (str): The identity store to use if principal names are being retrieved or it will be looked up using ListInstances principal: A principal specification or list of principal specifications. A principal specification is a principal id or a 2-tuple of principal type and id. principal_filter: A callable taking principal type, principal id, and principal name (which may be None), and returning True if the principal should be included. permission_set: A permission set arn or id, or a list of the same. permission_set_filter: A callable taking permission set arn and name (name may be None), returning True if the permission set should be included. target: A target specification or list of target specifications. A target specification is an account or OU id, or a 2-tuple of target type, which is either AWS_ACCOUNT or AWS_OU, and target id. target_filter: A callable taking target type, target id, and target name (which may be None), and returning True if the target should be included. get_principal_names (bool): Retrieve names for principals in assignments. get_permission_set_names (bool): Retrieve names for permission sets in assignments. get_target_names (bool): Retrieve names for targets in assignments. ou_recursive (bool): Set to True if an OU is provided as a target to get all accounts including those in child OUs. Returns: An iterator over Assignment namedtuples """ ids = Ids(lambda: session, instance_arn, identity_store_id) return _list_assignments( session, ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, ) def _list_assignments( session, ids, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): principal = _process_principal(principal) permission_set = _process_permission_set(ids, permission_set) target = _process_target(target) cache = {} filter_cache = {} context = _Context( session = session, ids=ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, cache=cache, filter_cache=filter_cache, ) target_iterator = _get_target_iterator(context) permission_set_iterator = _get_permission_set_iterator(context) principal_iterator = _get_principal_iterator(context) for target_type, target_id, target_name in target_iterator(): for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name): for principal_type, principal_id, principal_name in principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): assignment = Assignment( ids.instance_arn, principal_type, principal_id, principal_name, permission_set_arn, permission_set_name, target_type, target_id, target_name, ) LOGGER.debug(f"Visiting assignment: {assignment}") yield assignment if __name__ == "__main__": import boto3 import sys import json logging.basicConfig(level=logging.INFO) kwargs = {} for v in sys.argv[1:]: if hasattr(logging, v): LOGGER.setLevel(getattr(logging, v)) else: kwargs = json.loads(v) def fil(*args): print(args) return True kwargs["target_filter"] = fil try: session = boto3.Session() print(",".join(Assignment._fields)) for value in list_assignments(session, **kwargs): print(",".join(v or "" for v in value)) except KeyboardInterrupt: pass
41.608247
147
0.637413
2,231
20,180
5.467952
0.095473
0.135339
0.038036
0.019182
0.55193
0.465284
0.39659
0.352078
0.31978
0.29527
0
0.002968
0.281962
20,180
484
148
41.694215
0.838923
0.097175
0
0.391414
0
0.002525
0.138337
0.041087
0
0
0
0
0
1
0.068182
false
0.002525
0.030303
0.007576
0.189394
0.007576
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16ac3137138a7e3b002c9c9337af2623d4ef26d0
2,600
py
Python
buildutil/main.py
TediCreations/buildutils
49a35e0926baf65f7688f89e53f525812540101c
[ "MIT" ]
null
null
null
buildutil/main.py
TediCreations/buildutils
49a35e0926baf65f7688f89e53f525812540101c
[ "MIT" ]
null
null
null
buildutil/main.py
TediCreations/buildutils
49a35e0926baf65f7688f89e53f525812540101c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import os import argparse import subprocess if __name__ == '__main__': from version import __version__ from configParser import ConfigParser else: from .version import __version__ from .configParser import ConfigParser def command(cmd): """Run a shell command""" subprocess.call(cmd, shell=True) """ cmd_split = cmd.split() process = subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() return stdout, stderr """ def main(): absFilePath = os.path.dirname(os.path.abspath(__file__)) cwdPath = os.path.abspath(os.getcwd()) parser = argparse.ArgumentParser( prog="buildutil", description="Assembly/C/C++ utility to build embedded systems", epilog="Author: Kanelis Elias", fromfile_prefix_chars='@') # parser.add_argument('-v', '--verbose', # action='store_true', # help='an optional argument') """ parser.add_argument('Path', metavar='path', type=str, default=cwdPath, help='the config filepath') """ parser.add_argument( '-d', '--directory', type=str, default=cwdPath, help='the config filepath') parser.add_argument( '-v', '--version', action='store_true', help='get the version of the build system') # parser.add_argument( # '-f', # '--file', # help='A readable file', # metavar='FILE', # type=argparse.FileType('r'), # default=None) cmd_parser = parser.add_subparsers(dest='cmd', description="") parser_build = cmd_parser.add_parser( 'build', help="build the project") parser_get_version = cmd_parser.add_parser( 'get_version', help="try to get the version from git") # parser_get_version.add_argument( # '-a', '--alpha', # dest='alpha', # help='try to get the version') # Execute parse_args() args = parser.parse_args() subcommand = parser.parse_args().cmd if args.version is True: print(f"version: {__version__}") exit(0) # if subcommand is None or subcommand == "build": if subcommand == "build": makefilePath = os.path.join(absFilePath, "conf/make/Makefile") command(f"make -f {makefilePath}") elif subcommand == "get_version": print("version") else: ConfigParser() print("fuck") return # Working directory wd = os.path.abspath(args.directory) print(f"File: {absFilePath}") print(F"CWD: {cwdPath}") print(F"Working directory: {wd}") print(F"makefile path: {makefilePath}") print() command(f"make -f {makefilePath}") if __name__ == '__main__': main()
20.967742
65
0.672692
322
2,600
5.242236
0.341615
0.042654
0.050355
0.028436
0.194313
0.164692
0.138626
0.138626
0.069905
0.069905
0
0.000934
0.176538
2,600
123
66
21.138211
0.787482
0.186538
0
0.137931
0
0
0.272416
0
0
0
0
0
0
1
0.034483
false
0
0.12069
0
0.172414
0.137931
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16adc3c8486e2f9e557cbef70e8a437e66aeb740
19,267
py
Python
gautools/submit_gaussian.py
thompcinnamon/QM-calc-scripts
60b06e14b2efd307d419201079bb24152ab0bd3c
[ "Apache-2.0" ]
null
null
null
gautools/submit_gaussian.py
thompcinnamon/QM-calc-scripts
60b06e14b2efd307d419201079bb24152ab0bd3c
[ "Apache-2.0" ]
2
2018-07-18T19:53:08.000Z
2019-02-25T23:25:51.000Z
gautools/submit_gaussian.py
theavey/QM-calc-scripts
60b06e14b2efd307d419201079bb24152ab0bd3c
[ "Apache-2.0" ]
1
2017-01-04T20:50:21.000Z
2017-01-04T20:50:21.000Z
#! /usr/bin/env python3 ######################################################################## # # # This script was written by Thomas Heavey in 2015. # # theavey@bu.edu thomasjheavey@gmail.com # # # # Copyright 2015 Thomas J. Heavey IV # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # # implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # # ######################################################################## # This is written to work with python 3 because it should be good to # be working on the newest version of python. from __future__ import print_function import argparse # For parsing commandline arguments import datetime import glob # Allows referencing file system/file names import os import re import readline # Allows easier file input (with tab completion?) import subprocess # Allows for submitting commands to the shell from warnings import warn from thtools import cd, make_obj_dir, save_obj, resolve_path yes = ['y', 'yes', '1'] # An input function that can prefill in the text entry # Not sure if this works in 3.5+ because raw_input is gone def rlinput(prompt, prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt) finally: readline.set_startup_hook() def _dir_and_file(path): warn('_dir_and_file is deprecated. Use os.path.split instead', DeprecationWarning) if '/' in path: rel_dir, f_name = path.rsplit('/', 1) rel_dir = rel_dir + '/' else: rel_dir = '' f_name = path return rel_dir, f_name def create_gau_input(coord_name, template, verbose=True): """ make gaussian input file by combining header and coordinates files This function takes as input a file with a set of molecular coordinates (the form should not matter, it will just be copied into the next file) and a template file that should be the header for the desired calculation (including charge and multiplicity), returns the name of the file, and creates a Gaussian input file ending with '.com' :param str coord_name: name of file with coordinates in a format Gaussian can read :param str template: name of file with header for Gaussian calculation (up to and including the charge and multiplicity) :param bool verbose: If True, some status messages will be printed (including file names) :return: name of the written file :rtype: str """ if verbose: print('Creating Gaussian input file...') _out_name = coord_name.rsplit('.', 1)[0] + '.com' with open(_out_name, 'w') as out_file: with open(template, 'r') as templ_file: if verbose: print('opened {}'.format(template)) for line in templ_file: out_file.write(line) if '\n' not in line: out_file.write('\n') with open(coord_name, 'r') as in_file: if verbose: print('opened {}'.format(coord_name)) for i, line in enumerate(in_file): if i < 2: # ignore first two lines # number of atoms and the title/comment continue # if line.strip().isdigit(): # # the first line is the number of atoms # continue # # XYZ files created by mathematica have a comment # # as the second line saying something like: # # "Created by mathematica". Obv. want to ignore that # if line.strip().startswith('Create') or # line.strip().startswith('generated'): # continue # else: out_file.write(line) out_file.write('\n\n\n') if verbose: print('created Gaussian input file {}'.format(_out_name)) return _out_name def get_input_files(base_name, batch): _in_name_list = glob.glob(base_name + '*') _in_name_list.sort() # sort files alphanumerically _in_name_list.sort(key=len) # sort by length (because otherwise would # put 1,10,11,... as opposed to 1,...,9,10,... # if number 01,02,... They should all be the same length and the # second sort won't do anything. if not batch: num_files = len(_in_name_list) if num_files > 1: print('Multiple files starting with {}'.format(base_name)) if input('Did you mean to execute a batch job? ') in yes: batch = True else: print('What file name shall I use?') _in_name_list = [rlinput('file name: ', base_name)] return _in_name_list, batch def use_template(template, in_names, verbose): made_name_list = [] for in_name in in_names: out_name = create_gau_input(in_name, template, verbose=verbose) made_name_list.append(out_name) if verbose: print('Added {} to files to possibly submit.'.format(out_name)) _in_name_list = made_name_list _in_name_list.sort() _in_name_list.sort(key=len) return _in_name_list def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False, mem='125', executable='g09', chk_file=None, copy_chk=False, ln_running=None, hold_jid=None, xyz=None, make_xyz=None, make_input=False, ugt_dict=None): """ Write submission script for (Gaussian) jobs for submission to queue If make_xyz is not None, the file make_xyz will be checked to exist first to make sure to not waste time when missing a necessary input file. :param str input_name: Name of the file to use as input :param int num_cores: Number of cores to request :param str time: Amount of time to request in the format 'hh:mm:ss' :param bool verbose: If True, print out some status messages and such :type mem: int or str :param mem: Minimum amount of memory to request :param str executable: Executable file to use for the job Example, 'g09', 'g16' :param str chk_file: If not None, this file will be copied back after the job has completed. If this is not None and make_input is True, this will also be passed to use_gen_template. :param bool copy_chk: If this is True, the script will attempt to copy what should be an existing checkpoint file to the scratch directory before running the job. `chk_file` must be not None as well. :param str ln_running: If not None, this will be the base name for linking the output file to the current directory. If chk_file is not None, it will also be linked with the same base name. :param str hold_jid: Job on which this job should depend. This should be the name of another job in the queuing system. :param str xyz: Name of an xyz file to use as input to use_gen_template (if make_input is True). :param str make_xyz: The name of a file to pass to obabel to be used to create an xyz file to pass to use_gen_template. :param bool make_input: If True, use_gen_template will be used to create input for the Gaussian calculation. :param dict ugt_dict: dict of arguments to pass to use_gen_template. This should not include out_file, xyz, nproc, mem, or checkpoint because those will all be used from other arguments to this function. out_file will be input_name; xyz will be xyz or a time-based name if make_xyz is not None; nproc will be $NSLOTS (useful if this gets changed after job submission); mem will be mem; and checkpoint will be chk_file. :return: The name of the script file :rtype: str """ rel_dir, file_name = os.path.split(input_name) if file_name.endswith('.com'): short_name = os.path.splitext(file_name)[0] if not short_name + '.com' == file_name: raise SyntaxError('problem interpreting file name. ' + 'Period in file name?') out_name = short_name + '.out' elif '.' in file_name: short_name, input_extension = os.path.splitext(file_name) if not short_name + '.' + input_extension == file_name: raise SyntaxError('problem interpreting file name. ' + 'Period in file name?') out_name = short_name + '.out' else: short_name = file_name file_name = short_name + '.com' print('Assuming input file is {}'.format(file_name)) out_name = short_name + '.out' job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if len(job_name) == 0: job_name = 'default' _script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz = os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f') + '.xyz') if xyz is None or make_xyz is not None: n_xyz = temp_xyz else: n_xyz = resolve_path(xyz) temp_pkl = temp_xyz[:-4] if ugt_dict is not None: make_obj_dir() pkl_path = save_obj(ugt_dict, temp_pkl) if chk_file is not None: chk_line = 'checkpoint=\'{}\','.format(chk_file) else: chk_line = '' with open(_script_name, 'w') as script_file: sfw = script_file.write sfw('#!/bin/bash -l\n\n') sfw('#$ -pe omp {}\n'.format(num_cores)) sfw('#$ -M theavey@bu.edu\n') sfw('#$ -m eas\n') sfw('#$ -l h_rt={}\n'.format(time)) sfw('#$ -l mem_total={}G\n'.format(mem)) sfw('#$ -N {}\n'.format(job_name)) sfw('#$ -j y\n') sfw('#$ -o {}.log\n\n'.format(short_name)) if hold_jid is not None: sfw('#$ -hold_jid {}\n\n'.format(hold_jid)) if make_xyz is not None: sfw('if [ ! -f {} ]; then\n'.format( os.path.abspath(make_xyz)) + ' exit 17\n' 'fi\n\n') sfw('module load wxwidgets/3.0.2\n') sfw('module load openbabel/2.4.1\n\n') sfw('obabel {} -O {}\n\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz))) if make_input: sfw('python -c "from gautools.tools import ' 'use_gen_template as ugt;\n' 'from thtools import load_obj, get_node_mem;\n' 'm = get_node_mem();\n' 'd = load_obj(\'{}\');\n'.format( os.path.abspath(pkl_path)) + 'ugt(\'{}\',\'{}\','.format( file_name, os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)"\n\n') sfw('INPUTFILE={}\n'.format(file_name)) sfw('OUTPUTFILE={}\n'.format(out_name)) if chk_file is not None: sfw('CHECKFILE={}\n\n'.format(chk_file)) else: sfw('\n') if ln_running is not None: sfw('WORKINGOUT={}.out\n'.format(ln_running)) if chk_file is not None: sfw('WORKINGCHK={}.chk\n\n'.format(ln_running)) else: sfw('\n') sfw('CURRENTDIR=`pwd`\n') sfw('SCRATCHDIR=/scratch/$USER\n') sfw('mkdir -p $SCRATCHDIR\n\n') sfw('cd $SCRATCHDIR\n\n') sfw('cp $CURRENTDIR/$INPUTFILE .\n') if chk_file is not None: sfw('# ') if not copy_chk else None sfw('cp $CURRENTDIR/$CHECKFILE .\n\n') else: sfw('\n') if ln_running is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\n') if chk_file is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\n\n') else: sfw('\n') sfw('echo About to run {} in /net/`'.format(executable) + 'hostname -s`$SCRATCHDIR\n\n') sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable)) sfw('\n\n') if ln_running is not None: sfw('rm $CURRENTDIR/$WORKINGOUT') if chk_file is not None: sfw(' $CURRENTDIR/$WORKINGCHK\n\n') else: sfw('\n\n') sfw('cp $OUTPUTFILE $CURRENTDIR/.\n') if chk_file is not None: sfw('cp $CHECKFILE $CURRENTDIR/.\n\n') else: sfw('\n') sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\n') sfw('echo output was copied to $CURRENTDIR\n\n') if verbose: print('script written to {}'.format(_script_name)) return _script_name def submit_scripts(scripts, batch=False, submit=False, verbose=False): outputs = [] if batch: if submit or input('submit all jobs? ') in yes: for script in scripts: rd, f = _dir_and_file(script) with cd(rd, ignore_blank=True): cl = ['qsub', f] # Don't really know how this works. Copied from # http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python process = subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True) output = process.communicate()[0] if verbose: print(output) outputs.append(output) else: if verbose: print('No jobs submitted, but scripts created') else: if submit or input('submit job {}? '.format(scripts[0])) in yes: rd, f = _dir_and_file(scripts[0]) with cd(rd, ignore_blank=True): cl = ['qsub', f] # Don't really know how this works. Copied from # http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python process = subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True) output = process.communicate()[0] if verbose: print(output) outputs.append(output) else: if verbose: print('{} not submitted'.format(scripts)) _job_info = [' '.join(output.split(' ')[2:4]) for output in outputs] return _job_info if __name__ == '__main__': description = 'Create and submit a script to run a Gaussian job on SCC' parser = argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name of Gaussian input file') parser.add_argument('-c', '--numcores', type=int, default=16, help='Number of cores for job') # I should probably check validity of this time request # Maybe it doesn't matter so much because it just won't # submit the job and it will give quick feedback about that? parser.add_argument('-t', '--time', help='Time required as "hh:mm:ss"', default='12:00:00') parser.add_argument('-e', '--executable', type=str, default='g09', help='name of executable to run') parser.add_argument('-b', '--batch', action='store_true', help='create multiple scripts (batch job)') parser.add_argument('-x', '--template', default=None, help='template file for creating input from coords') parser.add_argument('-s', '--submit', action='store_true', help='Automatically submit jobs?') parser.add_argument('-v', '--verbose', action='store_true', help='make program more verbose') parser.add_argument('-j', '--nojobinfo', action='store_false', help='Do not return the submitted job information') parser.add_argument('-k', '--chk_file', default=None, help='checkpoint file to be written and copied back') parser.add_argument('--copy_chk', action='store_true', help='Copy check file to the scratch directory') parser.add_argument('-l', '--ln_running', type=str, default=None, help='base name for linking output to cwd while ' 'running') parser.add_argument('-d', '--hold_jid', default=None, help='job on which this job should depend') args = parser.parse_args() in_name_list, args.batch = get_input_files(args.in_name, args.batch) if args.template: in_name_list = use_template(args.template, in_name_list, args.verbose) script_list = [] for in_name in in_name_list: script_name = write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time, verbose=args.verbose, executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name) if not len(script_list) == len(in_name_list): # This should never be the case as far as I know, but I would # like to make sure everything input gets a script and all the # script names are there to be submitted. raise IOError('num scripts dif. from num names given') job_info = submit_scripts(script_list, args.batch, args.submit, args.verbose) if job_info and args.nojobinfo: for job in job_info: print(job) if args.verbose: print('Done. Completed normally.')
44.496536
78
0.553537
2,450
19,267
4.216735
0.198776
0.004259
0.015681
0.012777
0.194076
0.145388
0.116155
0.088472
0.080922
0.080922
0
0.007388
0.339648
19,267
432
79
44.599537
0.804606
0.302849
0
0.239726
0
0
0.197488
0.016644
0
0
0
0
0
1
0.023973
false
0
0.041096
0
0.089041
0.054795
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16aff0c4c406b2f10dac6cda72a39c612f61400e
2,036
py
Python
experiments/recorder.py
WeiChengTseng/maddpg
f2813ab8bc43e2acbcc69818672e2e2fd305a007
[ "MIT" ]
3
2022-01-04T13:32:11.000Z
2022-01-11T05:59:22.000Z
experiments/recorder.py
WeiChengTseng/maddpg
f2813ab8bc43e2acbcc69818672e2e2fd305a007
[ "MIT" ]
null
null
null
experiments/recorder.py
WeiChengTseng/maddpg
f2813ab8bc43e2acbcc69818672e2e2fd305a007
[ "MIT" ]
null
null
null
import json import copy import pdb import numpy as np import pickle def listify_mat(matrix): matrix = np.array(matrix).astype(str) if len(matrix.shape) > 1: matrix_list = [] for row in matrix: try: matrix_list.append(list(row)) except: pdb.set_trace() return matrix_list else: return list(matrix) class Recorder(): def __init__(self): self._traj, self._cur_traj = [], [] return def pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj = [] return def add(self, o, a, r, d): # self._cur_traj.append((o, a, r, d)) self._cur_traj.append( (listify_mat(o), listify_mat(a), listify_mat(r), d)) return def export_pickle(self, filename='traj'): if filename == '': raise ValueError('incorrect file name') traj = [] for t in self._traj: obs = np.array([tt[0] for tt in t]).astype(np.float32) act = np.array([tt[1] for tt in t]).astype(np.float32) rwd = np.array([tt[2] for tt in t]).astype(np.float32) done = np.array([tt[3] for tt in t]) # pdb.set_trace() traj.append({ 'observations': obs[:-1], 'next_observations': obs[1:], 'actions': act[:-1], 'rewards': rwd[:-1], 'terminals': done[:-1] }) with open('{}.pkl'.format(filename), 'wb') as outfile: pickle.dump(traj, outfile) return def export(self, filename='traj'): if filename == '': raise ValueError('incorrect file name') traj = {'traj': []} for t in self._traj: traj['traj'].append(t) # json.dumps(traj, sort_keys=True, indent=4) pdb.set_trace() with open('{}.json'.format(filename), 'w') as outfile: json.dump(traj, outfile) return
27.513514
66
0.515717
252
2,036
4.039683
0.313492
0.034381
0.054028
0.031434
0.30943
0.30943
0.230845
0.163065
0.121807
0.121807
0
0.012725
0.343811
2,036
74
67
27.513514
0.749252
0.046169
0
0.224138
0
0
0.062919
0
0
0
0
0
0
1
0.103448
false
0
0.086207
0
0.327586
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16b0c13e303ebbec34fd3a80391f02025c584689
589
py
Python
generate/dummy_data/mvp/gen_csv.py
ifekxp/data
f3571223f51b3fcc3a708d9ac82e76e3cc1ee068
[ "MIT" ]
null
null
null
generate/dummy_data/mvp/gen_csv.py
ifekxp/data
f3571223f51b3fcc3a708d9ac82e76e3cc1ee068
[ "MIT" ]
null
null
null
generate/dummy_data/mvp/gen_csv.py
ifekxp/data
f3571223f51b3fcc3a708d9ac82e76e3cc1ee068
[ "MIT" ]
null
null
null
from faker import Faker import csv # Reference: https://pypi.org/project/Faker/ output = open('data.CSV', 'w', newline='') fake = Faker() header = ['name', 'age', 'street', 'city', 'state', 'zip', 'lng', 'lat'] mywriter=csv.writer(output) mywriter.writerow(header) for r in range(1000): mywriter.writerow([ fake.name(), fake.random_int(min=18, max=80, step=1), fake.street_address(), fake.city(), fake.state(), fake.zipcode(), fake.longitude(), fake.latitude() ]) output.close()
21.814815
73
0.556876
69
589
4.724638
0.652174
0.067485
0
0
0
0
0
0
0
0
0
0.020785
0.264856
589
27
74
21.814815
0.732102
0.071307
0
0
0
0
0.076923
0
0
0
0
0
0
1
0
false
0
0.105263
0
0.105263
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16b1afada94a1ed1f6f7ce90f2dda1d6203c70b0
1,302
py
Python
pyscf/nao/test/test_0017_tddft_iter_nao.py
mfkasim1/pyscf
7be5e015b2b40181755c71d888449db936604660
[ "Apache-2.0" ]
3
2021-02-28T00:52:53.000Z
2021-03-01T06:23:33.000Z
pyscf/nao/test/test_0017_tddft_iter_nao.py
mfkasim1/pyscf
7be5e015b2b40181755c71d888449db936604660
[ "Apache-2.0" ]
36
2018-08-22T19:44:03.000Z
2020-05-09T10:02:36.000Z
pyscf/nao/test/test_0017_tddft_iter_nao.py
mfkasim1/pyscf
7be5e015b2b40181755c71d888449db936604660
[ "Apache-2.0" ]
4
2018-02-14T16:28:28.000Z
2019-08-12T16:40:30.000Z
from __future__ import print_function, division import os,unittest from pyscf.nao import tddft_iter dname = os.path.dirname(os.path.abspath(__file__)) td = tddft_iter(label='water', cd=dname) try: from pyscf.lib import misc libnao_gpu = misc.load_library("libnao_gpu") td_gpu = tddft_iter(label='water', cd=dname, GPU=True) except: td_gpu = None class KnowValues(unittest.TestCase): def test_tddft_iter(self): """ This is iterative TDDFT with SIESTA starting point """ self.assertTrue(hasattr(td, 'xocc')) self.assertTrue(hasattr(td, 'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence + H2 -- 2 electrons self.assertEqual(td.xocc[0].shape[0], 4) self.assertEqual(td.xvrt[0].shape[0], 19) dn0 = td.apply_rf0(td.moms1[:,0]) def test_tddft_iter_gpu(self): """ Test GPU version """ if td_gpu is not None: self.assertTrue(hasattr(td_gpu, 'xocc')) self.assertTrue(hasattr(td_gpu, 'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence + H2 -- 2 electrons self.assertEqual(td_gpu.xocc[0].shape[0], 4) self.assertEqual(td_gpu.xvrt[0].shape[0], 19) dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0]) if __name__ == "__main__": unittest.main()
33.384615
107
0.678955
201
1,302
4.199005
0.363184
0.059242
0.099526
0.109005
0.430095
0.331754
0.270142
0.227488
0.158768
0.158768
0
0.031511
0.171275
1,302
38
108
34.263158
0.750695
0.144393
0
0
0
0
0.04
0
0
0
0
0
0.357143
1
0.071429
false
0
0.142857
0
0.25
0.035714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16b1e0777507d0977f5c8842b27867dc734bcc90
898
py
Python
setup.py
dimasciput/osm2geojson
7b5ba25e39d80838d41f342237161e0fdc5e64b6
[ "MIT" ]
null
null
null
setup.py
dimasciput/osm2geojson
7b5ba25e39d80838d41f342237161e0fdc5e64b6
[ "MIT" ]
null
null
null
setup.py
dimasciput/osm2geojson
7b5ba25e39d80838d41f342237161e0fdc5e64b6
[ "MIT" ]
null
null
null
import io from os import path from setuptools import setup dirname = path.abspath(path.dirname(__file__)) with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f: long_description = f.read() def parse_requirements(filename): lines = (line.strip() for line in open(path.join(dirname, filename))) return [line for line in lines if line and not line.startswith("#")] setup( name='osm2geojson', version='0.1.27', license='MIT', description='Parse OSM and Overpass JSON', long_description=long_description, long_description_content_type='text/markdown', keywords='geometry gis osm parsing', author='Parfeniuk Mykola', author_email='mikola.parfenyuck@gmail.com', url='https://github.com/aspectumapp/osm2geojson', packages=['osm2geojson'], include_package_data=True, install_requires=parse_requirements("requirements.txt") )
32.071429
73
0.722717
117
898
5.410256
0.649573
0.094787
0.037915
0.060032
0
0
0
0
0
0
0
0.010458
0.148107
898
27
74
33.259259
0.816993
0
0
0
0
0
0.234967
0.030067
0
0
0
0
0
1
0.041667
false
0.041667
0.125
0
0.208333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16b268fae933e4415a5583a098a6d7daa28d2e18
849
py
Python
Cap_11/ex11.6.py
gguilherme42/Livro-de-Python
465a509d50476fd1a87239c71ed741639d58418b
[ "MIT" ]
4
2020-04-07T00:38:46.000Z
2022-03-10T03:34:42.000Z
Cap_11/ex11.6.py
gguilherme42/Livro-de-Python
465a509d50476fd1a87239c71ed741639d58418b
[ "MIT" ]
null
null
null
Cap_11/ex11.6.py
gguilherme42/Livro-de-Python
465a509d50476fd1a87239c71ed741639d58418b
[ "MIT" ]
1
2021-04-22T02:45:38.000Z
2021-04-22T02:45:38.000Z
import sqlite3 from contextlib import closing nome = input('Nome do produto: ').lower().capitalize() with sqlite3.connect('precos.db') as conexao: with closing(conexao.cursor()) as cursor: cursor.execute('SELECT * FROM Precos WHERE nome_produto = ?', (nome,)) registro = cursor.fetchone() if not(registro is None): print(f'Nome: {registro[0]} | Preço: R${registro[1]:.2f}') valor = float(input('Novo valor: R$')) cursor.execute('UPDATE Precos SET preco = ? WHERE nome_produto = ?', (valor, registro[0])) if cursor.rowcount == 1: conexao.commit() print('Alteração gravada.') else: conexao.rollback() print('Alteração abortada.') else: print(f'Produto {nome} não encontrado.')
38.590909
102
0.572438
93
849
5.204301
0.526882
0.053719
0.066116
0
0
0
0
0
0
0
0
0.011647
0.292108
849
22
103
38.590909
0.793677
0
0
0.105263
0
0
0.291765
0
0
0
0
0
0
1
0
false
0
0.105263
0
0.105263
0.210526
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16b631fdc9b05e860febb665678ebc3703e11591
4,882
py
Python
jet20/backend/solver.py
JTJL/jet20
2dc01ebf937f8501bcfb15c6641c569f8097ccf5
[ "MIT" ]
1
2020-07-13T19:02:26.000Z
2020-07-13T19:02:26.000Z
jet20/backend/solver.py
JTJL/jet20
2dc01ebf937f8501bcfb15c6641c569f8097ccf5
[ "MIT" ]
null
null
null
jet20/backend/solver.py
JTJL/jet20
2dc01ebf937f8501bcfb15c6641c569f8097ccf5
[ "MIT" ]
null
null
null
import torch import time import copy from jet20.backend.constraints import * from jet20.backend.obj import * from jet20.backend.config import * from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging logger = logging.getLogger(__name__) class Solution(object): def __init__(self,x,_vars,obj_value,status,duals): self.status = status self.obj_value = obj_value self.vars = _vars self.x = x self.duals = None def __str__(self): return "obj_value: %s vars:%s" % (self.obj_value,self.vars) __repr__ = __str__ class Problem(object): def __init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj = obj self.le = le_cons self.eq = eq_cons self.vars = _vars self.n = len(_vars) @classmethod def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device("cpu"),dtype=torch.float64): def convert(x): if x is not None: if isinstance(x,torch.Tensor): return x.type(dtype).to(device) else: return torch.tensor(x,dtype=dtype,device=device) else: return None if obj is not None: obj_Q,obj_b,obj_c = [convert(x) for x in obj] if obj_Q is not None: obj = QuadraticObjective(obj_Q,obj_b,obj_c) elif obj_b is not None: obj = LinearObjective(obj_b,obj_c) if le is not None: le_A,le_b = [convert(x) for x in le] if le_b.ndim == 2 and le_b.size(0) == 1: le_b = le_b.squeeze(0) le = LinearLeConstraints(le_A,le_b) if eq is not None: eq_A,eq_b = [convert(x) for x in eq] if eq_b.ndim == 2 and eq_b.size(0) == 1: eq_b = eq_b.squeeze(0) eq = LinearEqConstraints(eq_A,eq_b) return cls(_vars,obj,le,eq) def float(self): if self.le is not None: le = self.le.float() else: le = None if self.eq is not None: eq = self.eq.float() else: eq = None obj = self.obj.float() return self.__class__(self.vars,obj,le,eq) def double(self): if self.le is not None: le = self.le.double() else: le = None if self.eq is not None: eq = self.eq.double() else: eq = None obj = self.obj.double() return self.__class__(self.vars,obj,le,eq) def to(self,device): if self.le is not None: self.le.to(device) else: le = None if self.eq is not None: self.eq.to(device) else: eq = None obj = self.obj.to(device) return self.__class__(self.vars,obj,le,eq) def build_solution(self,x,obj_value,status,duals): _vars = { var: v.item() for var,v in zip(self.vars,x)} return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class Solver(object): def __init__(self): self.pres = [] self.posts = [] def solve(self,p,config,x=None): for pre in self.pres: start = time.time() p,x = pre.preprocess(p,x,config) logger.debug("preprocessing name:%s, time used:%s",pre.name(),time.time()-start) if x is None: x = torch.zeros(p.n).float().to(config.device) start = time.time() p_f32 = p.float() x = x.float() x,_,status,duals = solve(p_f32,x,config,fast=True) logger.debug("fast mode, time used:%s",time.time()-start) x = x.double() if isinstance(duals,(tuple,list)): duals = [d.double() for d in duals] else: duals = duals.double() if status == SUB_OPTIMAL: start = time.time() # p = p.double() x,_,status,duals = solve(p,x,config,fast=True,duals=duals) logger.debug("fast-precision mode, time used:%s",time.time()-start) if status == SUB_OPTIMAL: start = time.time() x,_,status,duals = solve(p,x,config,fast=False,duals=duals) logger.debug("precision mode, time used:%s",time.time()-start) if status != OPTIMAL: logger.warning("optimal not found, status:%s",status) for post in self.posts: start = time.time() p,x = post.postprocess(p,x,config) logger.debug("postprocessing name:%s, time used:%s",post.name(),time.time()-start) return p.build_solution(x,p.obj(x),status,duals) def register_pres(self,*pres): self.pres.extend(pres) def register_posts(self,*posts): self.posts.extend(posts)
26.247312
102
0.546907
681
4,882
3.773862
0.161527
0.023346
0.042023
0.020233
0.302335
0.236965
0.185992
0.154864
0.132296
0.083268
0
0.006744
0.331831
4,882
185
103
26.389189
0.781116
0.002868
0
0.255814
0
0
0.042566
0
0
0
0
0
0
1
0.100775
false
0
0.062016
0.007752
0.271318
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16b8038b17e6b43264d1acbee80a12ded5b8d440
1,077
py
Python
tests/test_transforms.py
mengfu188/mmdetection.bak
0bc0ea591b5725468f83f9f48630a1e3ad599303
[ "Apache-2.0" ]
2
2020-07-14T13:55:17.000Z
2021-05-07T11:25:31.000Z
tests/test_transforms.py
mengfu188/mmdetection.bak
0bc0ea591b5725468f83f9f48630a1e3ad599303
[ "Apache-2.0" ]
null
null
null
tests/test_transforms.py
mengfu188/mmdetection.bak
0bc0ea591b5725468f83f9f48630a1e3ad599303
[ "Apache-2.0" ]
null
null
null
import torch from mmdet.datasets.pipelines.transforms import Pad from mmdet.datasets.pipelines.transforms import FilterBox import numpy as np import cv2 def test_pad(): raw = dict( img=np.zeros((200, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad = Pad(square=True, pad_val=255) r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() raw = dict( img=np.zeros((402, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad = Pad(square=True, pad_val=255) r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() def test_filter_box(): bboxes = np.array([[0, 0, 10, 10], [10, 10, 20, 20], [10, 10, 19, 20], [10, 10, 20, 19], [10, 10, 19, 19]]) gt_bboxes = np.array([[0, 0, 10, 9]]) result = dict(gt_bboxes=bboxes) fb = FilterBox((10, 10)) fb(result) if __name__ == '__main__': # test_pad() test_filter_box()
22.914894
57
0.535747
153
1,077
3.653595
0.326797
0.050089
0.060823
0.093023
0.654741
0.593918
0.382826
0.382826
0.382826
0.382826
0
0.099607
0.291551
1,077
46
58
23.413043
0.633028
0.009285
0
0.388889
0
0
0.037559
0
0
0
0
0
0
1
0.055556
false
0
0.138889
0
0.194444
0.055556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16ba68b504461ec3bb45c6f18a8ccf9704c15e7e
7,471
py
Python
linprog_curvefit.py
drofp/linprog_curvefit
96ba704edae7cea42d768d7cc6d4036da2ba313a
[ "Apache-2.0" ]
null
null
null
linprog_curvefit.py
drofp/linprog_curvefit
96ba704edae7cea42d768d7cc6d4036da2ba313a
[ "Apache-2.0" ]
3
2019-11-22T08:04:18.000Z
2019-11-26T06:55:36.000Z
linprog_curvefit.py
drofp/linprog_curvefit
96ba704edae7cea42d768d7cc6d4036da2ba313a
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 """Curve fitting with linear programming. Minimizes the sum of error for each fit point to find the optimal coefficients for a given polynomial. Overview: Objective: Sum of errors Subject to: Bounds on coefficients Credit: "Curve Fitting with Linear Programming", H. Swanson and R. E. D. Woolsey """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import enum import string from ortools.linear_solver import pywraplp class ErrorDefinition(enum.Enum): SUM_ABS_DEV = enum.auto() SUM_MAX_DEVIATION = enum.auto() def _generate_variables(solver, points, coeff_ranges, err_max, error_def): """Create coefficient variables. Initial version works for up to 26 variable polynomial. One letter per english alphabet used for coefficient names. TODO(drofp): Figure out naming scheme for arbitrary number of variables. """ num_of_coeff = len(coeff_ranges) variables = [] coeff_names = [] # Add coefficients to variable list. if num_of_coeff == 2: coeff_names.append('m') coeff_names.append('b') else: for letter_cnt in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num in range(num_of_coeff): if coeff_ranges[coeff_num][0] is None: lower_bound = -solver.Infinity() else: lower_bound = coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1] is None: upper_bound = solver.Infinity() else: upper_bound = coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num])) # Add absolute error variables to variable list for point_cnt in range(len(points)): positive_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt + 1) + '_plus') negative_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt + 1) + '_minus') variables.append(positive_err_var) variables.append(negative_err_var) return variables def _generate_objective_fn( solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV): """Generate objective function for given error definition.""" objective = solver.Objective() for variable in variables[num_of_coeff:]: objective.SetCoefficient(variable, 1) return objective def _generate_constraints(solver, points, num_of_coeff, variables): constraints = [] for point_num, point in enumerate(points): # Equivalency constraint constraint = solver.Constraint(point[1], point[1]) # Resultant Coefficient terms for coeff_num, coeff in enumerate(variables[:num_of_coeff]): power = num_of_coeff - coeff_num - 1 x_val = point[0] ** power constraint.SetCoefficient(coeff, x_val) # Error terms ex_plus = variables[num_of_coeff + 2 * point_num] ex_minus = variables[num_of_coeff + 2 * point_num + 1] constraint.SetCoefficient(ex_plus, -1) constraint.SetCoefficient(ex_minus, 1) constraints.append(constraint) return constraints def get_optimal_polynomial( points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None): """Optimize coefficients for any order polynomial. Args: points: A tuple of points, represented as tuples (x, y) coeff_ranges: A tuple of valid coefficient ranges, respresented as tuples (min, max). Nubmer of elements in list determines order of polynomial, from highest order (0th index) to lowest order (nth index). err_def: An ErrorDefinition enum, specifying the definition for error. err_max: An Integer, specifying the maximum error allowable. solver: a ortools.pywraplp.Solver object, if a specific solver instance is requested by caller. Returns: A Dictionary, the desired coefficients mapped to ther values. """ if coeff_ranges is None: raise ValueError('Please provide appropriate coefficient range.') if solver is None: solver = pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables = _generate_variables( solver, points, coeff_ranges, err_max=err_max, error_def=error_def) num_of_coeff = len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff, variables) _generate_constraints(solver, points, num_of_coeff, variables) solver.Solve() var_to_val = dict() for coeff in variables[:num_of_coeff]: var_to_val[coeff.name()] = coeff.solution_value() return var_to_val def demo_optimal_linear_5points(): """Demonstration of getting optimal linear polynomial. Uses 5 points from Swanson's curve fitting paper. """ print('STARTING LINEAR DEMO WITH 5 POINTS FROM SWANSON PAPER') points = (0,1), (1,3), (2,2), (3,4), (4,5) coeff_ranges = ((None, None), (None, None)) # solver = pywraplp.Solver( # 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients = get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges) for elm in optimized_coefficients: print('elm: {}'.format(elm)) print( 'type(optimized_coefficients): {}'.format( type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients)) # m, b = optimized_coefficients # print('Optimized m: {}, b: {}'.format(m, b)) def demo_optimal_linear_10points(): print('STARTING LINEAR DEMO WITH 10 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_10points(): print('STARTING QUADRATIC DEMO WITH 10 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_19points(): print('STARTING QUADRATIC DEMO WITH 19 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] x_vals.extend([5.0, 5.5, 6.0, 6.6, 7.0, 7.6, 8.5, 9.0, 10.0]) y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] y_vals.extend([1.0, 4.0, 3.6, 2.7, 5.7, 4.6, 6.0, 6.8, 7.3]) points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_cubic_10points(): print('STARTING CUBIC DEMO WITH 10 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def main(): demo_optimal_quadratic_19points() if __name__ == '__main__': main()
39.115183
80
0.674073
1,106
7,471
4.349005
0.191682
0.059459
0.044906
0.043243
0.399584
0.330146
0.302079
0.290437
0.232225
0.220998
0
0.04339
0.21028
7,471
191
81
39.115183
0.771864
0.220988
0
0.203252
0
0
0.070753
0.009107
0
0
0
0.005236
0
1
0.081301
false
0
0.04878
0
0.186992
0.105691
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16bce26f2376d0aa7170df9f650a479bf160647c
11,177
py
Python
build-script-helper.py
aciidb0mb3r/swift-stress-tester
aad9df89d2aae4640e9f4e06c234818c6b3ed434
[ "Apache-2.0" ]
null
null
null
build-script-helper.py
aciidb0mb3r/swift-stress-tester
aad9df89d2aae4640e9f4e06c234818c6b3ed434
[ "Apache-2.0" ]
null
null
null
build-script-helper.py
aciidb0mb3r/swift-stress-tester
aad9df89d2aae4640e9f4e06c234818c6b3ed434
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """ This source file is part of the Swift.org open source project Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors Licensed under Apache License v2.0 with Runtime Library Exception See https://swift.org/LICENSE.txt for license information See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors ------------------------------------------------------------------------------ This is a helper script for the main swift repository's build-script.py that knows how to build and install the stress tester utilities given a swift workspace. """ from __future__ import print_function import argparse import sys import os, platform import subprocess def printerr(message): print(message, file=sys.stderr) def main(argv_prefix = []): args = parse_args(argv_prefix + sys.argv[1:]) run(args) def parse_args(args): parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir', default='SourceKitStressTester') parser.add_argument('-v', '--verbose', action='store_true', help='log executed commands') parser.add_argument('--prefix', help='install path') parser.add_argument('--config', default='debug') parser.add_argument('--build-dir', default='.build') parser.add_argument('--multiroot-data-file', help='Path to an Xcode workspace to create a unified build of SwiftSyntax with other projects.') parser.add_argument('--toolchain', required=True, help='the toolchain to use when building this package') parser.add_argument('--update', action='store_true', help='update all SwiftPM dependencies') parser.add_argument('--no-local-deps', action='store_true', help='use normal remote dependencies when building') parser.add_argument('build_actions', help="Extra actions to perform. Can be any number of the following", choices=['all', 'build', 'test', 'install', 'generate-xcodeproj'], nargs="*", default=['build']) parsed = parser.parse_args(args) if ("install" in parsed.build_actions or "all" in parsed.build_actions) and not parsed.prefix: ArgumentParser.error("'--prefix' is required with the install action") parsed.swift_exec = os.path.join(parsed.toolchain, 'usr', 'bin', 'swift') parsed.sourcekitd_dir = os.path.join(parsed.toolchain, 'usr', 'lib') # Convert package_dir to absolute path, relative to root of repo. repo_path = os.path.dirname(__file__) parsed.package_dir = os.path.realpath( os.path.join(repo_path, parsed.package_dir)) # Convert build_dir to absolute path, relative to package_dir. parsed.build_dir = os.path.join(parsed.package_dir, parsed.build_dir) return parsed def run(args): sourcekit_searchpath=args.sourcekitd_dir package_name = os.path.basename(args.package_dir) env = dict(os.environ) # Use local dependencies (i.e. checked out next sourcekit-lsp). if not args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS'] = "1" if args.update: print("** Updating dependencies of %s **" % package_name) try: update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec, build_dir=args.build_dir, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Updating dependencies of %s failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) # The test action creates its own build. No need to build if we are just testing if should_run_any_action(['build', 'install'], args.build_actions): print("** Building %s **" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='build', products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Building %s failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) output_dir = os.path.realpath(os.path.join(args.build_dir, args.config)) if should_run_action("generate-xcodeproj", args.build_actions): print("** Generating Xcode project for %s **" % package_name) try: generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Generating the Xcode project failed') printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action("test", args.build_actions): print("** Testing %s **" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='test', products=['%sPackageTests' % package_name], sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Testing %s failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action("install", args.build_actions): print("** Installing %s **" % package_name) stdlib_dir = os.path.join(args.toolchain, 'usr', 'lib', 'swift', 'macosx') try: install_package(args.package_dir, install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir, rpaths_to_delete=[stdlib_dir], verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Installing %s failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) # Returns true if any of the actions in `action_names` should be run. def should_run_any_action(action_names, selected_actions): for action_name in action_names: if should_run_action(action_name, selected_actions): return True return False def should_run_action(action_name, selected_actions): if action_name in selected_actions: return True elif "all" in selected_actions: return True else: return False def update_swiftpm_dependencies(package_dir, swift_exec, build_dir, env, verbose): args = [swift_exec, 'package', '--package-path', package_dir, '--build-path', build_dir, 'update'] check_call(args, env=env, verbose=verbose) def invoke_swift(package_dir, swift_exec, action, products, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): # Until rdar://53881101 is implemented, we cannot request a build of multiple # targets simultaneously. For now, just build one product after the other. for product in products: invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose) def invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): args = [swift_exec, action, '--package-path', package_dir, '-c', config, '--build-path', build_dir] if multiroot_data_file: args.extend(['--multiroot-data-file', multiroot_data_file]) if action == 'test': args.extend(['--test-product', product]) else: args.extend(['--product', product]) # Tell SwiftSyntax that we are building in a build-script environment so that # it does not need to rebuilt if it has already been built before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath check_call(args, env=env, verbose=verbose) def install_package(package_dir, install_dir, sourcekit_searchpath, build_dir, rpaths_to_delete, verbose): bin_dir = os.path.join(install_dir, 'bin') lib_dir = os.path.join(install_dir, 'lib', 'swift', 'macosx') for directory in [bin_dir, lib_dir]: if not os.path.exists(directory): os.makedirs(directory) # Install sk-stress-test and sk-swiftc-wrapper for product in get_products(package_dir): src = os.path.join(build_dir, product) dest = os.path.join(bin_dir, product) # Create a copy of the list since we modify it rpaths_to_delete_for_this_product = list(rpaths_to_delete) # Add the rpath to the stdlib in in the toolchain rpaths_to_add = ['@executable_path/../lib/swift/macosx'] if product in ['sk-stress-test', 'swift-evolve']: # Make the rpath to sourcekitd relative in the toolchain rpaths_to_delete_for_this_product += [sourcekit_searchpath] rpaths_to_add += ['@executable_path/../lib'] install(src, dest, rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add, verbose=verbose) def install(src, dest, rpaths_to_delete, rpaths_to_add, verbose): copy_cmd=['rsync', '-a', src, dest] print('installing %s to %s' % (os.path.basename(src), dest)) check_call(copy_cmd, verbose=verbose) for rpath in rpaths_to_delete: remove_rpath(dest, rpath, verbose=verbose) for rpath in rpaths_to_add: add_rpath(dest, rpath, verbose=verbose) def generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath, env, verbose): package_name = os.path.basename(package_dir) config_path = os.path.join(package_dir, 'Config.xcconfig') with open(config_path, 'w') as config_file: config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path = os.path.join(package_dir, '%s.xcodeproj' % package_name) args = [swift_exec, 'package', '--package-path', package_dir, 'generate-xcodeproj', '--xcconfig-overrides', config_path, '--output', xcodeproj_path] check_call(args, env=env, verbose=verbose) def add_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-add_rpath', rpath, binary] check_call(cmd, verbose=verbose) def remove_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-delete_rpath', rpath, binary] check_call(cmd, verbose=verbose) def check_call(cmd, verbose, env=os.environ, **kwargs): if verbose: print(' '.join([escape_cmd_arg(arg) for arg in cmd])) return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs) def interleave(value, list): return [item for pair in zip([value] * len(list), list) for item in pair] def escape_cmd_arg(arg): if '"' in arg or ' ' in arg: return '"%s"' % arg.replace('"', '\\"') else: return arg def get_products(package_dir): # FIXME: We ought to be able to query SwiftPM for this info. if package_dir.endswith("/SourceKitStressTester"): return ['sk-stress-test', 'sk-swiftc-wrapper'] elif package_dir.endswith("/SwiftEvolve"): return ['swift-evolve'] else: return [] if __name__ == '__main__': main()
38.277397
204
0.711014
1,524
11,177
5.007874
0.183071
0.041929
0.015723
0.022406
0.380765
0.314858
0.264806
0.238863
0.189072
0.17728
0
0.002782
0.163908
11,177
291
205
38.408935
0.813911
0.131878
0
0.295567
0
0
0.184488
0.036608
0
0
0
0.003436
0
1
0.08867
false
0
0.024631
0.004926
0.17734
0.098522
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16bd3669143df2de8767a9c8bf39a0f217eb03a8
1,701
py
Python
tests/components/deconz/test_scene.py
pcaston/core
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
[ "Apache-2.0" ]
1
2021-07-08T20:09:55.000Z
2021-07-08T20:09:55.000Z
tests/components/deconz/test_scene.py
pcaston/core
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
[ "Apache-2.0" ]
47
2021-02-21T23:43:07.000Z
2022-03-31T06:07:10.000Z
tests/components/deconz/test_scene.py
OpenPeerPower/core
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
[ "Apache-2.0" ]
null
null
null
"""deCONZ scene platform tests.""" from unittest.mock import patch from openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON from openpeerpower.const import ATTR_ENTITY_ID from .test_gateway import ( DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration, ) async def test_no_scenes(opp, aioclient_mock): """Test that scenes can be loaded without scenes being available.""" await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 0 async def test_scenes(opp, aioclient_mock): """Test that scenes works.""" data = { "groups": { "1": { "id": "Light group id", "name": "Light group", "type": "LightGroup", "state": {"all_on": False, "any_on": True}, "action": {}, "scenes": [{"id": "1", "name": "Scene"}], "lights": [], } } } with patch.dict(DECONZ_WEB_REQUEST, data): config_entry = await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 1 assert opp.states.get("scene.light_group_scene") # Verify service calls mock_deconz_put_request( aioclient_mock, config_entry.data, "/groups/1/scenes/1/recall" ) # Service turn on scene await opp.services.async_call( SCENE_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: "scene.light_group_scene"}, blocking=True, ) assert aioclient_mock.mock_calls[1][2] == {} await opp.config_entries.async_unload(config_entry.entry_id) assert len(opp.states.async_all()) == 0
27.885246
82
0.627278
206
1,701
4.92233
0.364078
0.076923
0.063116
0.053254
0.282051
0.234714
0.234714
0.136095
0.136095
0.136095
0
0.007098
0.254556
1,701
60
83
28.35
0.792587
0.042328
0
0.04878
0
0
0.111475
0.046557
0
0
0
0
0.121951
1
0
false
0
0.097561
0
0.097561
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16bdc023e7792aee5f95f6dd1ec12e9328dbed08
4,534
py
Python
model.py
iz2late/baseline-seq2seq
2bfa8981083aed8d30befeb42e41fe78d8ec1641
[ "MIT" ]
1
2021-01-06T20:49:32.000Z
2021-01-06T20:49:32.000Z
model.py
iz2late/baseline-seq2seq
2bfa8981083aed8d30befeb42e41fe78d8ec1641
[ "MIT" ]
null
null
null
model.py
iz2late/baseline-seq2seq
2bfa8981083aed8d30befeb42e41fe78d8ec1641
[ "MIT" ]
null
null
null
import random from typing import Tuple import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch import Tensor class Encoder(nn.Module): def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout): super().__init__() self.input_dim = input_dim self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.dropout = dropout self.embedding = nn.Embedding(input_dim, emb_dim) self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True) self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim) self.dropout = nn.Dropout(dropout) def forward(self, src): embedded = self.dropout(self.embedding(src)) outputs, hidden = self.rnn(embedded) # output of bi-directional rnn should be concatenated hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))) return outputs, hidden class Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim, attn_dim): super().__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.attn_in = (enc_hid_dim * 2) + dec_hid_dim self.attn = nn.Linear(self.attn_in, attn_dim) def forward(self, decoder_hidden, encoder_outputs): src_len = encoder_outputs.shape[0] repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1) encoder_outputs = encoder_outputs.permute(1, 0, 2) energy = torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden, encoder_outputs), dim = 2))) attention = torch.sum(energy, dim=2) return F.softmax(attention, dim=1) class Decoder(nn.Module): def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention): super().__init__() self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.output_dim = output_dim self.dropout = dropout self.attention = attention self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim) self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim) self.dropout = nn.Dropout(dropout) def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs): a = self.attention(decoder_hidden, encoder_outputs) a = a.unsqueeze(1) encoder_outputs = encoder_outputs.permute(1, 0, 2) weighted_encoder_rep = torch.bmm(a, encoder_outputs) weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2) return weighted_encoder_rep def forward(self, input, decoder_hidden, encoder_outputs): input = input.unsqueeze(0) embedded = self.dropout(self.embedding(input)) weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden, encoder_outputs) rnn_input = torch.cat((embedded, weighted_encoder_rep), dim = 2) output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0)) embedded = embedded.squeeze(0) output = output.squeeze(0) weighted_encoder_rep = weighted_encoder_rep.squeeze(0) output = self.out(torch.cat((output, weighted_encoder_rep, embedded), dim = 1)) return output, decoder_hidden.squeeze(0) class Seq2Seq(nn.Module): def __init__(self, encoder, decoder, device): super().__init__() self.encoder = encoder self.decoder = decoder self.device = device def forward(self, src, trg, teacher_forcing_ratio=0.5): batch_size = src.shape[1] max_len = trg.shape[0] trg_vocab_size = self.decoder.output_dim outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device) encoder_outputs, hidden = self.encoder(src) # first input to the decoder is the <sos> token output = trg[0,:] for t in range(1, max_len): output, hidden = self.decoder(output, hidden, encoder_outputs) outputs[t] = output teacher_force = random.random() < teacher_forcing_ratio top1 = output.max(1)[1] output = (trg[t] if teacher_force else top1) return outputs
37.163934
90
0.635862
600
4,534
4.508333
0.158333
0.055453
0.043253
0.031054
0.330499
0.206654
0.173383
0.136044
0.119039
0.090203
0
0.012639
0.267093
4,534
121
91
37.471074
0.801384
0.021394
0
0.189474
0
0
0
0
0
0
0
0
0
1
0.094737
false
0
0.073684
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16be469a1debb4ce731178e138eb07a68236018a
7,907
py
Python
ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py
xuyannus/Machine-Learning-Collection
6d5dcd18d4e40f90e77355d56a2902e4c617ecbe
[ "MIT" ]
3,094
2020-09-20T04:34:31.000Z
2022-03-31T23:59:46.000Z
ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py
xkhainguyen/Machine-Learning-Collection
425d196e9477dbdbbd7cc0d19d29297571746ab5
[ "MIT" ]
79
2020-09-24T08:54:17.000Z
2022-03-30T14:45:08.000Z
ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py
xkhainguyen/Machine-Learning-Collection
425d196e9477dbdbbd7cc0d19d29297571746ab5
[ "MIT" ]
1,529
2020-09-20T16:21:21.000Z
2022-03-31T21:16:25.000Z
import torch import torch.nn as nn import torch.optim as optim from torchtext.datasets import Multi30k from torchtext.data import Field, BucketIterator import numpy as np import spacy import random from torch.utils.tensorboard import SummaryWriter # to print to tensorboard from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint spacy_ger = spacy.load("de") spacy_eng = spacy.load("en") def tokenize_ger(text): return [tok.text for tok in spacy_ger.tokenizer(text)] def tokenize_eng(text): return [tok.text for tok in spacy_eng.tokenizer(text)] german = Field(tokenize=tokenize_ger, lower=True, init_token="<sos>", eos_token="<eos>") english = Field( tokenize=tokenize_eng, lower=True, init_token="<sos>", eos_token="<eos>" ) train_data, valid_data, test_data = Multi30k.splits( exts=(".de", ".en"), fields=(german, english) ) german.build_vocab(train_data, max_size=10000, min_freq=2) english.build_vocab(train_data, max_size=10000, min_freq=2) class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size, num_layers, p): super(Encoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size = hidden_size self.num_layers = num_layers self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) def forward(self, x): # x shape: (seq_length, N) where N is batch size embedding = self.dropout(self.embedding(x)) # embedding shape: (seq_length, N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding) # outputs shape: (seq_length, N, hidden_size) return hidden, cell class Decoder(nn.Module): def __init__( self, input_size, embedding_size, hidden_size, output_size, num_layers, p ): super(Decoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size = hidden_size self.num_layers = num_layers self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) self.fc = nn.Linear(hidden_size, output_size) def forward(self, x, hidden, cell): # x shape: (N) where N is for batch size, we want it to be (1, N), seq_length # is 1 here because we are sending in a single word and not a sentence x = x.unsqueeze(0) embedding = self.dropout(self.embedding(x)) # embedding shape: (1, N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding, (hidden, cell)) # outputs shape: (1, N, hidden_size) predictions = self.fc(outputs) # predictions shape: (1, N, length_target_vocabulary) to send it to # loss function we want it to be (N, length_target_vocabulary) so we're # just gonna remove the first dim predictions = predictions.squeeze(0) return predictions, hidden, cell class Seq2Seq(nn.Module): def __init__(self, encoder, decoder): super(Seq2Seq, self).__init__() self.encoder = encoder self.decoder = decoder def forward(self, source, target, teacher_force_ratio=0.5): batch_size = source.shape[1] target_len = target.shape[0] target_vocab_size = len(english.vocab) outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device) hidden, cell = self.encoder(source) # Grab the first input to the Decoder which will be <SOS> token x = target[0] for t in range(1, target_len): # Use previous hidden, cell as context from encoder at start output, hidden, cell = self.decoder(x, hidden, cell) # Store next output prediction outputs[t] = output # Get the best word the Decoder predicted (index in the vocabulary) best_guess = output.argmax(1) # With probability of teacher_force_ratio we take the actual next word # otherwise we take the word that the Decoder predicted it to be. # Teacher Forcing is used so that the model gets used to seeing # similar inputs at training and testing time, if teacher forcing is 1 # then inputs at test time might be completely different than what the # network is used to. This was a long comment. x = target[t] if random.random() < teacher_force_ratio else best_guess return outputs ### We're ready to define everything we need for training our Seq2Seq model ### # Training hyperparameters num_epochs = 100 learning_rate = 0.001 batch_size = 64 # Model hyperparameters load_model = False device = torch.device("cuda" if torch.cuda.is_available() else "cpu") input_size_encoder = len(german.vocab) input_size_decoder = len(english.vocab) output_size = len(english.vocab) encoder_embedding_size = 300 decoder_embedding_size = 300 hidden_size = 1024 # Needs to be the same for both RNN's num_layers = 2 enc_dropout = 0.5 dec_dropout = 0.5 # Tensorboard to get nice loss plot writer = SummaryWriter(f"runs/loss_plot") step = 0 train_iterator, valid_iterator, test_iterator = BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=True, sort_key=lambda x: len(x.src), device=device, ) encoder_net = Encoder( input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout ).to(device) decoder_net = Decoder( input_size_decoder, decoder_embedding_size, hidden_size, output_size, num_layers, dec_dropout, ).to(device) model = Seq2Seq(encoder_net, decoder_net).to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate) pad_idx = english.vocab.stoi["<pad>"] criterion = nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model: load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer) sentence = "ein boot mit mehreren männern darauf wird von einem großen pferdegespann ans ufer gezogen." for epoch in range(num_epochs): print(f"[Epoch {epoch} / {num_epochs}]") checkpoint = {"state_dict": model.state_dict(), "optimizer": optimizer.state_dict()} save_checkpoint(checkpoint) model.eval() translated_sentence = translate_sentence( model, sentence, german, english, device, max_length=50 ) print(f"Translated example sentence: \n {translated_sentence}") model.train() for batch_idx, batch in enumerate(train_iterator): # Get input and targets and get to cuda inp_data = batch.src.to(device) target = batch.trg.to(device) # Forward prop output = model(inp_data, target) # Output is of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss # doesn't take input in that form. For example if we have MNIST we want to have # output to be: (N, 10) and targets just (N). Here we can view it in a similar # way that we have output_words * batch_size that we want to send in into # our cost function, so we need to do some reshapin. While we're at it # Let's also remove the start token while we're at it output = output[1:].reshape(-1, output.shape[2]) target = target[1:].reshape(-1) optimizer.zero_grad() loss = criterion(output, target) # Back prop loss.backward() # Clip to avoid exploding gradient issues, makes sure grads are # within a healthy range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # Gradient descent step optimizer.step() # Plot to tensorboard writer.add_scalar("Training loss", loss, global_step=step) step += 1 score = bleu(test_data[1:100], model, german, english, device) print(f"Bleu score {score*100:.2f}")
32.539095
103
0.682433
1,119
7,907
4.651475
0.264522
0.026897
0.021518
0.026513
0.206916
0.191354
0.173487
0.173487
0.1195
0.101441
0
0.012877
0.224105
7,907
242
104
32.673554
0.835534
0.246238
0
0.100719
0
0
0.052071
0.007101
0
0
0
0
0
1
0.057554
false
0
0.071942
0.014388
0.18705
0.021583
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16beddc32cad55aeba19e5840d544ba51efbce38
2,533
py
Python
gail_chatbot/light/sqil/light_sentence_imitate_mixin.py
eublefar/gail_chatbot
fcb7798515c0e2c031b5127803eb8a9f1fd4f0ab
[ "MIT" ]
null
null
null
gail_chatbot/light/sqil/light_sentence_imitate_mixin.py
eublefar/gail_chatbot
fcb7798515c0e2c031b5127803eb8a9f1fd4f0ab
[ "MIT" ]
null
null
null
gail_chatbot/light/sqil/light_sentence_imitate_mixin.py
eublefar/gail_chatbot
fcb7798515c0e2c031b5127803eb8a9f1fd4f0ab
[ "MIT" ]
null
null
null
from typing import Dict, Any, List import string from parlai.core.agents import Agent from parlai.core.message import Message from random import sample import pathlib path = pathlib.Path(__file__).parent.absolute() class LightImitateMixin(Agent): """Abstract class that handles passing expert trajectories alongside self-play sampling """ def __init__(self, opt: Dict[str, Any], shared: Dict[str, Any] = None): self.id = "LightChatbotSelfPlay" self.train_step = 0 self.self_speaker_token = "<speaker_self>" self.other_speaker_token = "<speaker_other>" def act(self): raise NotImplementedError() def batch_act(self, observations): self.train_step += 1 # Add generated histories to data ones imitate = [] sample = [] for i, observation in enumerate(observations): sample.extend( [ (dialog[0], dialog[1][:-1]) for dialog in observation["text"] if len(dialog[1]) > 0 ] ) imitate.extend( [ dialog for dialog in observation["text"] if len(dialog[1]) > 0 ] ) self.batch_imitate(imitate) utterances = self.batch_sample(sample) if ( self.train_step % self.episode_num_dialog_dump == 0 ) and self.train_step != 0: self.checkpoint([sample, utterances]) return [{"id": self.id} for _ in observations] def batch_imitate(self, dialogs): """Implement sampling utterances and memorization here""" pass def batch_sample(self, dialogs) -> List[str]: """Implement update here""" pass def batch_update(self): """Update weights here""" pass def _update_histories(self, utterances, other=False): for i in range(len(utterances)): history = self.histories[i] history.append( (self.self_speaker_token if not other else self.other_speaker_token) + utterances[i] ) self.histories[i] = history def _convert_history_to_other(self, history): history = [ turn.replace(self.self_speaker_token, self.other_speaker_token) if self.self_speaker_token in turn else turn.replace(self.other_speaker_token, self.self_speaker_token) for turn in history ] return history
29.8
91
0.586656
280
2,533
5.135714
0.325
0.075104
0.052156
0.069541
0.079277
0.054242
0.054242
0.054242
0.054242
0.054242
0
0.006403
0.321753
2,533
84
92
30.154762
0.830617
0.087248
0
0.081967
0
0
0.025753
0
0
0
0
0
0
1
0.131148
false
0.04918
0.098361
0
0.278689
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16bf4f8f27c28015e220b292e189af4ce08ed99c
4,417
py
Python
httpd.py
whtt8888/TritonHTTPserver
99adf3f1e6c3867bb870cda8434605c59409ea19
[ "MIT" ]
2
2019-04-07T06:11:56.000Z
2019-10-14T05:08:16.000Z
httpd.py
whtt8888/TritonHTTPserver
99adf3f1e6c3867bb870cda8434605c59409ea19
[ "MIT" ]
null
null
null
httpd.py
whtt8888/TritonHTTPserver
99adf3f1e6c3867bb870cda8434605c59409ea19
[ "MIT" ]
null
null
null
import sys import os import socket import time import threading class MyServer: def __init__(self, port, doc_root): self.port = port self.doc_root = doc_root self.host = '127.0.0.1' self.res_200 = "HTTP/1.1 200 OK\r\nServer: Myserver 1.0\r\n" self.res_404 = "HTTP/1.1 404 NOT FOUND\r\nServer: Myserver 1.0\r\n\r\n" self.res_400 = "HTTP/1.1 400 Client Error\r\nServer: Myserver 1.0\r\n\r\n" self.res_close = "HTTP/1.1 Connection:close\r\nServer: Myserver 1.0\r\n\r\n" # map request into dict def req_info(self, request): # 400 malform if request[-4:] != '\r\n\r\n': info = {'url': '400malform'} return info headers = request.splitlines() firstline = headers.pop(0) try: (act, url, version) = firstline.split() except ValueError: info = {'url': '400malform'} return info info = {'act': act, 'url': url, 'version': version} for h in headers: h = h.split(': ') if len(h) < 2: continue field = h[0] value = h[1] info[field] = value # mapping url, return 404 escape or absolute filename # judge whether escape path = '' x = url.split('/') i = 0 while i < len(x): if '' in x: x.remove('') if i < 0 or x[0] == '..' or len(x) == 0: # path escape from file root info['url'] = '404escape' return info if i < len(x) and x[i] == '..': x.remove(x[i]) x.remove(x[i - 1]) i -= 1 else: i += 1 # map index.html if len(x[-1].split('.')) < 2: x.append('index.html') for d in range(len(x)): path = path + '/' + x[d] info['url'] = os.path.realpath(self.doc_root + path) return info # generate response def res_gen(self, reqinfo): path = reqinfo['url'] # 404 escape if path == '404escape': return self.res_404 # 400 malform req if path == "400malform": return self.res_400 try: reqinfo['Host'] and reqinfo['User-Agent'] except KeyError: return self.res_400 # 404 not found if not os.path.isfile(path): return self.res_404 # a valid 200 req else: res = self.res_200 res += "Last-Modified: {}\r\n".format(time.ctime(os.stat(path).st_mtime)) with open(path, "rb") as f: data = f.read() res += "Content-Length: {}\r\n".format(len(data)) if path.split('.')[-1] == 'html': res += 'Content-Type: text/html\r\n\r\n' res = res + str(data, 'utf-8') else: # for jpg and png if path.split('.')[-1] == 'png': res += 'Content-Type: image/png\r\n\r\n' else: res += 'Content-Type: image/jpeg\r\n\r\n' res = res + str(data) return res def createsocket(conn, addr): with conn: try: conn.settimeout(5) except socket.timeout: conn.close() # print('closed') # print('Connected by', addr) while True: req = conn.recv(1024).decode() if not req: break info = server.req_info(req) msg = server.res_gen(info).encode() conn.sendall(msg) # print("msg send finished") # msg = server.res_close.encode() # conn.sendall(msg) break if __name__ == '__main__': input_port = int(sys.argv[1]) input_doc_root = sys.argv[2] server = MyServer(input_port, input_doc_root) # Add code to start your server here threads = [] with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((server.host, server.port)) s.listen() while True: conn, addr = s.accept() t = threading.Thread(target=createsocket(conn, addr), args=(conn, addr)) t.start() threads.append(t) for t in threads: t.join()
31.105634
85
0.479511
556
4,417
3.739209
0.30036
0.016354
0.010101
0.013468
0.100048
0.074074
0.064454
0.054834
0.03848
0.027898
0
0.041481
0.388725
4,417
141
86
31.326241
0.728519
0.089427
0
0.189189
0
0.027027
0.1245
0.00675
0
0
0
0
0
1
0.036036
false
0
0.045045
0
0.171171
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16c081effc971dd24b22b938117db5e30575dfca
1,179
py
Python
pf_pweb_sourceman/task/git_repo_man.py
problemfighter/pf-pweb-sourceman
827b1d92ac992ec1495b128e99137aab1cfa09a0
[ "Apache-2.0" ]
null
null
null
pf_pweb_sourceman/task/git_repo_man.py
problemfighter/pf-pweb-sourceman
827b1d92ac992ec1495b128e99137aab1cfa09a0
[ "Apache-2.0" ]
null
null
null
pf_pweb_sourceman/task/git_repo_man.py
problemfighter/pf-pweb-sourceman
827b1d92ac992ec1495b128e99137aab1cfa09a0
[ "Apache-2.0" ]
null
null
null
from git import Repo from pf_pweb_sourceman.common.console import console from pf_py_file.pfpf_file_util import PFPFFileUtil class GitRepoMan: def get_repo_name_from_url(self, url: str): if not url: return None last_slash_index = url.rfind("/") last_suffix_index = url.rfind(".git") if last_suffix_index < 0: last_suffix_index = len(url) if last_slash_index < 0 or last_suffix_index <= last_slash_index: raise Exception("Invalid repo url {}".format(url)) return url[last_slash_index + 1:last_suffix_index] def clone_or_pull_project(self, path, url, branch): repo_name = self.get_repo_name_from_url(url) if not repo_name: raise Exception("Invalid repo") if not PFPFFileUtil.is_exist(path): console.success("Cloning project: " + repo_name + ", Branch: " + branch) Repo.clone_from(url, branch=branch, to_path=path) else: console.success(repo_name + " Taking pull...") repo = Repo(path) repo.git.checkout(branch) origin = repo.remotes.origin origin.pull()
33.685714
84
0.63274
155
1,179
4.541935
0.348387
0.068182
0.106534
0.042614
0.051136
0
0
0
0
0
0
0.003513
0.275657
1,179
34
85
34.676471
0.820843
0
0
0
0
0
0.066158
0
0
0
0
0
0
1
0.074074
false
0
0.111111
0
0.296296
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16c22952eef284ef2bbd4cfa4e2bbaa9380b0ceb
2,969
py
Python
tool/remote_info.py
shanmukmichael/Asset-Discovery-Tool
82c3f2f5cecb394a1ad87b2e504fbef219a466fd
[ "MIT" ]
null
null
null
tool/remote_info.py
shanmukmichael/Asset-Discovery-Tool
82c3f2f5cecb394a1ad87b2e504fbef219a466fd
[ "MIT" ]
null
null
null
tool/remote_info.py
shanmukmichael/Asset-Discovery-Tool
82c3f2f5cecb394a1ad87b2e504fbef219a466fd
[ "MIT" ]
null
null
null
import socket import paramiko import json Hostname = '34.224.2.243' Username = 'ec2-user' key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def is_connected(): try: # connect to the host -- tells us if the host is actually # reachable socket.create_connection(("8.8.8.8", 53)) return "conneted to the Internet!" except OSError: pass return "Please Connect to the Internet!" is_connected() try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username, key_filename=key) except paramiko.AuthenticationException: print("Failed to connect to {} due to wrong username/password".format(Hostname)) exit(1) except: print("Failed to connect to {} ".format(Hostname)) exit(2) # commands _, stdout_1, _ = ssh.exec_command("hostname") _, stdout_2, _ = ssh.exec_command("hostname -I | awk '{print $1}'") _, stdout_3, _ = ssh.exec_command("cat /sys/class/net/eth0/address") _, stdout_4, _ = ssh.exec_command( "awk -F= '$1=={} {{ print $2 ;}}' /etc/os-release".format('"NAME"')) _, stdout_5, _ = ssh.exec_command("whoami") _, stdout_6, _ = ssh.exec_command("last -F") _, stdout_7, _ = ssh.exec_command("netstat -tnpa | grep 'ESTABLISHED.*sshd'") #_, stdout_8, _ = ssh.exec_command("sudo {}/24".format()) # egrep -o '([0-9]{1,3}\.){3}[0-9]{1,3}' --IP-address # --------------------------------- def remote_data_1(): output_1 = stdout_1.readlines() output_2 = stdout_2.readlines() output_3 = stdout_3.readlines() output_4 = stdout_4.readlines() output_5 = stdout_5.readlines() remote_data_1 = { 'Hostname': '', 'IP': '', 'MAC': '', 'OS': '', 'Currentuser': '', } remote_data_1['Hostname'] = output_1[0].strip('\n') remote_data_1['IP'] = output_2[0].strip('\n') remote_data_1['MAC'] = output_3[0].strip('\n') remote_data_1['OS'] = output_4[0][1:-1].strip('\"') remote_data_1['Currentuser'] = output_5[0].strip('\n') return json.dumps(remote_data_1, indent=4) # ---------------------------------- def remote_data_2_(): output = stdout_6.readlines() data_ = [] filter_ = [] remote_data_2 = { 'Hostname': [], 'IP': [], 'MAC': [], 'Lastseen': [], 'Status': [], } for i in output: data_.append(i.split(' ')) for i in data_: filter_.append(list(filter(None, i))) for i in range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not found') remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8])) if 'logged' in filter_[i][9]: remote_data_2['Status'].append('Active') else: remote_data_2['Status'].append('Inactive') # ssh.close() return remote_data_2
29.39604
84
0.594139
391
2,969
4.253197
0.322251
0.102225
0.059531
0.023452
0.086591
0.032471
0
0
0
0
0
0.037479
0.209161
2,969
100
85
29.69
0.670784
0.088919
0
0.026316
0
0
0.200297
0.028561
0
0
0
0
0
1
0.039474
false
0.026316
0.039474
0
0.131579
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16c3880f871252c2ad2ebcf1bd3aca25678856cb
16,099
py
Python
hvac/api/secrets_engines/kv_v2.py
Famoco/hvac
cdc1854385dd981de38bcb6350f222a52bcf3923
[ "Apache-2.0" ]
null
null
null
hvac/api/secrets_engines/kv_v2.py
Famoco/hvac
cdc1854385dd981de38bcb6350f222a52bcf3923
[ "Apache-2.0" ]
null
null
null
hvac/api/secrets_engines/kv_v2.py
Famoco/hvac
cdc1854385dd981de38bcb6350f222a52bcf3923
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """KvV2 methods module.""" from hvac import exceptions, utils from hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT = 'secret' class KvV2(VaultApiBase): """KV Secrets Engine - Version 2 (API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html """ def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): """Configure backend level settings that are applied to every key in the key-value store. Supported methods: POST: /{mount_point}/config. Produces: 204 (empty body) :param max_versions: The number of versions to keep per key. This value applies to all keys, but a key's metadata setting can overwrite this value. Once a key has more than the configured allowed versions the oldest version will be permanently deleted. Defaults to 10. :type max_versions: int :param cas_required: If true all keys will require the cas parameter to be set on all write requests. :type cas_required: bool :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ params = { 'max_versions': max_versions, } if cas_required is not None: params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return self._adapter.post( url=api_path, json=params, ) def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): """Read the KV Version 2 configuration. Supported methods: GET: /auth/{mount_point}/config. Produces: 200 application/json :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ api_path = utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point, ) response = self._adapter.get(url=api_path) return response.json() def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): """Retrieve the secret at the specified location. Supported methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Specifies the path of the secret to read. This is specified as part of the URL. :type path: str | unicode :param version: Specifies the version to return. If not set the latest version is returned. :type version: int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ params = {} if version is not None: params['version'] = version api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, params=params, ) return response.json() def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): """Create a new version of a secret at the specified location. If the value does not yet exist, the calling token must have an ACL policy granting the create capability. If the value already exists, the calling token must have an ACL policy granting the update capability. Supported methods: POST: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Path :type path: str | unicode :param cas: Set the "cas" value to use a Check-And-Set operation. If not set the write will be allowed. If set to 0 a write will only be allowed if the key doesn't exist. If the index is non-zero the write will only be allowed if the key's current version matches the version specified in the cas parameter. :type cas: int :param secret: The contents of the "secret" dict will be stored and returned on read. :type secret: dict :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ params = { 'options': {}, 'data': secret, } if cas is not None: params['options']['cas'] = cas api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.post( url=api_path, json=params, ) return response.json() def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT): """Set or update data in the KV store without overwriting. :param path: Path :type path: str | unicode :param secret: The contents of the "secret" dict will be stored and returned on read. :type secret: dict :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the create_or_update_secret request. :rtype: dict """ # First, do a read. try: current_secret_version = self.read_secret_version( path=path, mount_point=mount_point, ) except exceptions.InvalidPath: raise exceptions.InvalidPath('No value found at "{path}"; patch only works on existing data.'.format(path=path)) # Update existing secret dict. patched_secret = current_secret_version['data']['data'] patched_secret.update(secret) # Write back updated secret. return self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point, ) def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): """Issue a soft delete of the secret's latest version at the specified location. This marks the version as deleted and will stop it from being returned from reads, but the underlying data will not be removed. A delete can be undone using the undelete path. Supported methods: DELETE: /{mount_point}/data/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to delete. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, ) def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): """Issue a soft delete of the specified versions of the secret. This marks the versions as deleted and will stop them from being returned from reads, but the underlying data will not be removed. A delete can be undone using the undelete path. Supported methods: POST: /{mount_point}/delete/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to delete. This is specified as part of the URL. :type path: str | unicode :param versions: The versions to be deleted. The versioned data will not be deleted, but it will no longer be returned in normal get requests. :type versions: int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ if not isinstance(versions, list) or len(versions) == 0: error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): """Undelete the data for the provided version and path in the key-value store. This restores the data, allowing it to be returned on get requests. Supported methods: POST: /{mount_point}/undelete/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to undelete. This is specified as part of the URL. :type path: str | unicode :param versions: The versions to undelete. The versions will be restored and their data will be returned on normal get requests. :type versions: list of int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ if not isinstance(versions, list) or len(versions) == 0: error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): """Permanently remove the specified version data and numbers for the provided path from the key-value store. Supported methods: POST: /{mount_point}/destroy/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to destroy. This is specified as part of the URL. :type path: str | unicode :param versions: The versions to destroy. Their data will be permanently deleted. :type versions: list of int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ if not isinstance(versions, list) or len(versions) == 0: error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): """Return a list of key names at the specified location. Folders are suffixed with /. The input must be a folder; list on a file will not return a value. Note that no policy-based filtering is performed on keys; do not encode sensitive information in key names. The values themselves are not accessible via this command. Supported methods: LIST: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path: Specifies the path of the secrets to list. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.list( url=api_path, ) return response.json() def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT): """Retrieve the metadata and versions for the secret at the specified path. Supported methods: GET: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path: Specifies the path of the secret to read. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, ) return response.json() def update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): """Updates the max_versions of cas_required setting on an existing path. Supported methods: POST: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path: Path :type path: str | unicode :param max_versions: The number of versions to keep per key. If not set, the backend's configured max version is used. Once a key has more than the configured allowed versions the oldest version will be permanently deleted. :type max_versions: int :param cas_required: If true the key will require the cas parameter to be set on all write requests. If false, the backend's configuration will be used. :type cas_required: bool :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ params = {} if max_versions is not None: params['max_versions'] = max_versions if cas_required is not None: if not isinstance(cas_required, bool): error_msg = 'bool expected for cas_required param, {type} received'.format(type=type(cas_required)) raise exceptions.ParamValidationError(error_msg) params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT): """Delete (permanently) the key metadata and all version data for the specified key. All version history will be removed. Supported methods: DELETE: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to delete. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, )
42.254593
127
0.638611
2,093
16,099
4.794553
0.118968
0.104634
0.020927
0.027902
0.718286
0.667364
0.655805
0.639661
0.614948
0.592725
0
0.005354
0.280639
16,099
380
128
42.365789
0.861152
0.515249
0
0.503597
0
0
0.137899
0.056244
0
0
0
0
0
1
0.093525
false
0
0.014388
0
0.208633
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16c3b1e6ee4edc3e7c6e66622f8ee4afa8a44dad
512
py
Python
android/install-all.py
SaschaWillems/vulkan_slim
642bcf1eaba8bbcb94a8bec61f3454c597af72f9
[ "MIT" ]
28
2017-09-04T18:54:49.000Z
2021-09-18T11:52:04.000Z
android/install-all.py
0xm1nam0/Vulkan
ea726e617f71f5ff5c1503bca134b2a7ad17a1a7
[ "MIT" ]
null
null
null
android/install-all.py
0xm1nam0/Vulkan
ea726e617f71f5ff5c1503bca134b2a7ad17a1a7
[ "MIT" ]
1
2018-07-20T06:51:08.000Z
2018-07-20T06:51:08.000Z
# Install all examples to connected device(s) import subprocess import sys answer = input("Install all vulkan examples to attached device, this may take some time! (Y/N)").lower() == 'y' if answer: BUILD_ARGUMENTS = "" for arg in sys.argv[1:]: if arg == "-validation": BUILD_ARGUMENTS += "-validation" if subprocess.call(("python build-all.py -deploy %s" % BUILD_ARGUMENTS).split(' ')) != 0: print("Error: Not all examples may have been installed!") sys.exit(-1)
36.571429
111
0.644531
70
512
4.671429
0.628571
0.12844
0
0
0
0
0
0
0
0
0
0.0075
0.21875
512
13
112
39.384615
0.81
0.083984
0
0
0
0
0.385439
0
0
0
0
0
0
1
0
false
0
0.181818
0
0.181818
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16c4d3d9ff39c41395ea4a9779719c084f2fc55a
1,726
py
Python
main.py
juangallostra/moonboard
d4a35857d480ee4bed06faee44e0347e1070b6b8
[ "MIT" ]
null
null
null
main.py
juangallostra/moonboard
d4a35857d480ee4bed06faee44e0347e1070b6b8
[ "MIT" ]
null
null
null
main.py
juangallostra/moonboard
d4a35857d480ee4bed06faee44e0347e1070b6b8
[ "MIT" ]
null
null
null
from generators.ahoughton import AhoughtonGenerator from render_config import RendererConfig from problem_renderer import ProblemRenderer from moonboard import get_moonboard from adapters.default import DefaultProblemAdapter from adapters.crg import CRGProblemAdapter from adapters.ahoughton import AhoughtonAdapter import json def main(): # Create Renderer config = RendererConfig() renderer = ProblemRenderer( get_moonboard(2017), DefaultProblemAdapter(), config ) crg_renderer = ProblemRenderer( get_moonboard(2017), CRGProblemAdapter(), config ) ahoughton_renderer_2016 = ProblemRenderer( get_moonboard(2016), AhoughtonAdapter(), config ) ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017 = ProblemRenderer( get_moonboard(2017), AhoughtonAdapter(), config ) ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe') # Load data with open('data/problems.json', 'r') as f: problems = json.load(f) renderer.render_problem(problems['339318'], with_info=True) with open('data/crg.json', 'r') as f: crg_problems = json.load(f) crg_renderer.render_problem(crg_problems['1']) # Ahoughton generator and adapter test # 2016 problem = ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem) # 2017 problem = ahoughton_generator_2017.generate() ahoughton_renderer_2017.render_problem(problem) if __name__ == "__main__": main()
30.280702
113
0.707995
177
1,726
6.655367
0.293785
0.050934
0.091681
0.078947
0.135823
0.06961
0.06961
0
0
0
0
0.052053
0.209733
1,726
56
114
30.821429
0.811584
0.041715
0
0.204545
0
0
0.074029
0.044903
0
0
0
0
0
1
0.022727
false
0
0.181818
0
0.204545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16c4fdb052f6373448ef88971819f508813eb2d7
5,228
py
Python
GearBot/Util/Pages.py
JohnyTheCarrot/GearBot
8a32bfc79f997a154c9abccbf6742a79fc5257b0
[ "MIT" ]
null
null
null
GearBot/Util/Pages.py
JohnyTheCarrot/GearBot
8a32bfc79f997a154c9abccbf6742a79fc5257b0
[ "MIT" ]
null
null
null
GearBot/Util/Pages.py
JohnyTheCarrot/GearBot
8a32bfc79f997a154c9abccbf6742a79fc5257b0
[ "MIT" ]
null
null
null
import discord from Util import Utils, Emoji, Translator page_handlers = dict() known_messages = dict() def on_ready(bot): load_from_disc() def register(type, init, update, sender_only=False): page_handlers[type] = { "init": init, "update": update, "sender_only": sender_only } def unregister(type_handler): if type_handler in page_handlers.keys(): del page_handlers[type_handler] async def create_new(type, ctx, **kwargs): text, embed, has_pages, emoji = await page_handlers[type]["init"](ctx, **kwargs) message: discord.Message = await ctx.channel.send(text, embed=embed) if has_pages or len(emoji) > 0: data = { "type": type, "page": 0, "trigger": ctx.message.id, "sender": ctx.author.id } for k, v in kwargs.items(): data[k] = v known_messages[str(message.id)] = data try: if has_pages: await message.add_reaction(Emoji.get_emoji('LEFT')) for e in emoji: await message.add_reaction(e) if has_pages: await message.add_reaction(Emoji.get_emoji('RIGHT')) except discord.Forbidden: await ctx.send( f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('paginator_missing_perms', ctx, prev=Emoji.get_chat_emoji('LEFT'), next=Emoji.get_chat_emoji('RIGHT'))} {Emoji.get_chat_emoji('WARNING')}") if len(known_messages.keys()) > 500: del known_messages[list(known_messages.keys())[0]] save_to_disc() async def update(bot, message, action, user): message_id = str(message.id) if message_id in known_messages.keys(): type = known_messages[message_id]["type"] if type in page_handlers.keys(): data = known_messages[message_id] if data["sender"] == user or page_handlers[type]["sender_only"] is False: page_num = data["page"] try: trigger_message = await message.channel.get_message(data["trigger"]) except discord.NotFound: trigger_message = None ctx = await bot.get_context(trigger_message) if trigger_message is not None else None text, embed, page = await page_handlers[type]["update"](ctx, message, page_num, action, data) await message.edit(content=text, embed=embed) known_messages[message_id]["page"] = page save_to_disc() return True return False def basic_pages(pages, page_num, action): if action == "PREV": page_num -= 1 elif action == "NEXT": page_num += 1 if page_num < 0: page_num = len(pages) - 1 if page_num >= len(pages): page_num = 0 page = pages[page_num] return page, page_num def paginate(input, max_lines=20, max_chars=1900, prefix="", suffix=""): max_chars -= len(prefix) + len(suffix) lines = str(input).splitlines(keepends=True) pages = [] page = "" count = 0 for line in lines: if len(page) + len(line) > max_chars or count == max_lines: if page == "": # single 2k line, split smaller words = line.split(" ") for word in words: if len(page) + len(word) > max_chars: pages.append(f"{prefix}{page}{suffix}") page = f"{word} " else: page += f"{word} " else: pages.append(f"{prefix}{page}{suffix}") page = line count = 1 else: page += line count += 1 pages.append(f"{prefix}{page}{suffix}") return pages def paginate_fields(input): pages = [] for page in input: page_fields = dict() for name, content in page.items(): page_fields[name] = paginate(content, max_chars=1024) pages.append(page_fields) real_pages = [] for page in pages: page_count = 0 page_fields = dict() for name, parts in page.items(): base_name = name if len(parts) is 1: if page_count + len(name) + len(parts[0]) > 4000: real_pages.append(page_fields) page_fields = dict() page_count = 0 page_fields[name] = parts[0] page_count += len(name) + len(parts[0]) else: for i in range(len(parts)): part = parts[i] name = f"{base_name} ({i+1}/{len(parts)})" if page_count + len(name) + len(part) > 3000: real_pages.append(page_fields) page_fields = dict() page_count = 0 page_fields[name] = part page_count += len(name) + len(part) real_pages.append(page_fields) return real_pages def save_to_disc(): Utils.saveToDisk("known_messages", known_messages) def load_from_disc(): global known_messages known_messages = Utils.fetch_from_disk("known_messages")
33.299363
213
0.55394
633
5,228
4.388626
0.203791
0.065515
0.028798
0.024478
0.192585
0.147588
0.11951
0.078474
0.078474
0.078474
0
0.011758
0.333015
5,228
156
214
33.512821
0.784915
0.005547
0
0.176923
0
0.007692
0.084472
0.048105
0
0
0
0
0
1
0.061538
false
0
0.015385
0
0.115385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16c86dba44c4d72104ae5760fa8ff0a89daa4441
5,793
py
Python
src/mazes.py
tim-fi/pyxel_games
3df9d7e1f3d5436d2051db3f5783bdeab916c054
[ "Unlicense" ]
2
2021-04-03T09:49:46.000Z
2021-12-27T19:32:32.000Z
src/mazes.py
tim-fi/pyxel_games
3df9d7e1f3d5436d2051db3f5783bdeab916c054
[ "Unlicense" ]
null
null
null
src/mazes.py
tim-fi/pyxel_games
3df9d7e1f3d5436d2051db3f5783bdeab916c054
[ "Unlicense" ]
null
null
null
from __future__ import annotations from dataclasses import dataclass, field, InitVar from typing import List, Tuple, Iterator, Iterable, Optional from random import choice import pyxel # ------------------------------------------------------- # Types # ------------------------------------------------------- Maze = Tuple[int, ...] # ------------------------------------------------------- # Constants # ------------------------------------------------------- SCALE = 3 BOARD_WIDTH = 32 BOARD_HEIGHT = 32 CELL_SIZE = 6 CELL_COLOR = 15 WALL_SIZE = 1 WALL_COLOR = 5 # Flags UP = 1 << 0 LEFT = 1 << 1 DOWN = 1 << 2 RIGHT = 1 << 3 VISTED = 1 << 4 # Calculated N_CELLS = BOARD_WIDTH * BOARD_HEIGHT BLOCK_SIZE = CELL_SIZE + WALL_SIZE * 2 WINDOW_WIDTH = BOARD_WIDTH * BLOCK_SIZE WINDOW_HEIGHT = BOARD_HEIGHT * BLOCK_SIZE NEIGHBORS = ((0, -1), (-1, 0), (0, 1), (1, 0)) # ------------------------------------------------------- # Maze # ------------------------------------------------------- @dataclass class Generator: width: int height: int start_pos: InitVar[Optional[Tuple[int, int]]] = None _visited_cells: int = field(init=False, default=0) _stack: List[Tuple[int, int]] = field(init=False, default_factory=list) _maze: List[int] = field(init=False) def __post_init__(self, start_pos: Optional[Tuple[int, int]]): x, y = start_pos = start_pos or (0, 0) self._stack.append(start_pos) self._visited_cells = 1 self._maze = [0 for _ in range(self.width * self.height)] self._maze[y * self.width + x] |= VISTED def _get_neighbors(self, x: int, y: int) -> List[int]: return [ (i, dx, dy) for i, (dx, dy) in enumerate(NEIGHBORS) if ( 0 <= x + dx < self.width and 0 <= y + dy < self.height and self._maze[(y + dy) * self.width + (x + dx)] & VISTED == 0 ) ] def step(self) -> Tuple[Maze, Tuple[int, int], bool]: if self._visited_cells < self.width * self.height: x, y = self._stack[-1] neighbors = self._get_neighbors(x, y) if neighbors: d, dx, dy = choice(neighbors) self._maze[y * self.width + x] |= 1 << d x_, y_ = x + dx, y + dy self._maze[y_ * self.width + x_] |= 1 << ((d + 2) % 4) | VISTED self._stack.append((x_, y_)) self._visited_cells += 1 else: del self._stack[-1] return tuple(self._maze), self._stack[-1], False else: return tuple(self._maze), (0, 0), True # ------------------------------------------------------- # Application # ------------------------------------------------------- @dataclass class App: maze: Maze = field(init=False, default=tuple(0 for _ in range(N_CELLS))) generator: Optional[Generator] = field(init=False, default=None) running: bool = field(init=False, default=False) pos: Tuple[int, int] = field(init=False, default=(0, 0)) def run(self): pyxel.init( WINDOW_WIDTH, WINDOW_HEIGHT, scale=SCALE, caption="Mazes", border_width=SCALE, border_color=pyxel.DEFAULT_PALETTE[5], fps=100 ) pyxel.mouse(True) pyxel.run(self.update, self.draw) def draw(self): pyxel.cls(0) for i, cell in enumerate(self.maze): x, y = i % BOARD_WIDTH, i // BOARD_WIDTH scr_x, scr_y = x * BLOCK_SIZE, y * BLOCK_SIZE pyxel.rect( scr_x, scr_y, BLOCK_SIZE, BLOCK_SIZE, WALL_COLOR ) if cell & VISTED: pyxel.rect( scr_x + WALL_SIZE, scr_y + WALL_SIZE, CELL_SIZE, CELL_SIZE, CELL_COLOR ) if cell & UP: pyxel.rect( scr_x + WALL_SIZE, scr_y, CELL_SIZE, WALL_SIZE, CELL_COLOR ) if cell & LEFT: pyxel.rect( scr_x, scr_y + WALL_SIZE, WALL_SIZE, CELL_SIZE, CELL_COLOR ) if cell & DOWN: pyxel.rect( scr_x + WALL_SIZE, scr_y + WALL_SIZE + CELL_SIZE, CELL_SIZE, WALL_SIZE, CELL_COLOR ) if cell & RIGHT: pyxel.rect( scr_x + WALL_SIZE + CELL_SIZE, scr_y + WALL_SIZE, WALL_SIZE, CELL_SIZE, CELL_COLOR ) x, y = self.pos pyxel.rectb( x * BLOCK_SIZE + WALL_SIZE, y * BLOCK_SIZE + WALL_SIZE, CELL_SIZE, CELL_SIZE, 2 if self.running else 1 ) def update(self): if pyxel.btnp(pyxel.KEY_SPACE) or pyxel.btnp(pyxel.MOUSE_LEFT_BUTTON): self.running = not self.running if self.running and self.generator is None: self.generator = Generator(BOARD_WIDTH, BOARD_HEIGHT, self.pos) if self.running: next_maze, pos, done = self.generator.step() if done: self.running = False self.generator = None self.maze = next_maze self.pos = pos else: self.pos = ( max(0, min(BOARD_WIDTH-1, pyxel.mouse_x // BLOCK_SIZE)), max(0, min(BOARD_HEIGHT-1, pyxel.mouse_y // BLOCK_SIZE)) ) if __name__ == '__main__': App().run()
32.544944
79
0.468324
652
5,793
3.937117
0.174847
0.049864
0.046747
0.049085
0.204908
0.195559
0.147254
0.114141
0.067004
0.067004
0
0.016442
0.359572
5,793
178
80
32.544944
0.675472
0.085793
0
0.153846
0
0
0.002461
0
0
0
0
0
0
1
0.041958
false
0
0.034965
0.006993
0.181818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16c9a5ddd1d3e1f33c18bfd269bc6097b27aa5a2
2,281
py
Python
dvc/__init__.py
zjj2wry/dvc
c9df567938eefd7b1f5b094c15f04e5ce704aa36
[ "Apache-2.0" ]
null
null
null
dvc/__init__.py
zjj2wry/dvc
c9df567938eefd7b1f5b094c15f04e5ce704aa36
[ "Apache-2.0" ]
null
null
null
dvc/__init__.py
zjj2wry/dvc
c9df567938eefd7b1f5b094c15f04e5ce704aa36
[ "Apache-2.0" ]
null
null
null
""" DVC ---- Make your data science projects reproducible and shareable. """ import os import warnings VERSION_BASE = '0.23.2' __version__ = VERSION_BASE PACKAGEPATH = os.path.abspath(os.path.dirname(__file__)) HOMEPATH = os.path.dirname(PACKAGEPATH) VERSIONPATH = os.path.join(PACKAGEPATH, 'version.py') def _update_version_file(): """Dynamically update version file.""" from git import Repo from git.exc import InvalidGitRepositoryError try: repo = Repo(HOMEPATH) except InvalidGitRepositoryError: return __version__ sha = repo.head.object.hexsha short_sha = repo.git.rev_parse(sha, short=6) dirty = '.mod' if repo.is_dirty() else '' ver = '{}+{}{}'.format(__version__, short_sha, dirty) # Write a helper file, that will be installed with the package # and will provide a true version of the installed dvc with open(VERSIONPATH, 'w+') as fobj: fobj.write('# AUTOGENERATED by dvc/__init__.py\n') fobj.write('version = "{}"\n'.format(ver)) return ver def _remove_version_file(): """Remove version.py so that it doesn't get into the release.""" if os.path.exists(VERSIONPATH): os.unlink(VERSIONPATH) if os.path.exists(os.path.join(HOMEPATH, 'setup.py')): # dvc is run directly from source without installation or # __version__ is called from setup.py if os.getenv('APPVEYOR_REPO_TAG', '').lower() != 'true' \ and os.getenv('TRAVIS_TAG', '') == '': __version__ = _update_version_file() else: # pragma: no cover _remove_version_file() else: # pragma: no cover # dvc was installed with pip or something. Hopefully we have our # auto-generated version.py to help us provide a true version from dvc.version import version __version__ = version VERSION = __version__ # Ignore numpy's runtime warnings: https://github.com/numpy/numpy/pull/432. # We don't directly import numpy, but our dependency networkx does, causing # these warnings in some environments. Luckily these warnings are benign and # we can simply ignore them so that they don't show up when you are using dvc. warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
32.126761
78
0.702762
312
2,281
4.955128
0.474359
0.027167
0.032988
0.02458
0.087969
0.036223
0
0
0
0
0
0.00432
0.188075
2,281
70
79
32.585714
0.830454
0.361245
0
0.054054
0
0
0.12605
0
0
0
0
0
0
1
0.054054
false
0
0.135135
0
0.243243
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16c9bf4375ba49f6aaa19ea289549cfbf3ed1092
9,092
py
Python
pkg_dir/src/utils/notion_utils.py
robperch/robase_datalysis
343cb59b16630ca776bd941897ab8da63f20bfe1
[ "MIT" ]
2
2022-01-09T19:18:57.000Z
2022-01-09T19:19:04.000Z
pkg_dir/src/utils/notion_utils.py
robperch/robasecode
343cb59b16630ca776bd941897ab8da63f20bfe1
[ "MIT" ]
4
2022-01-17T02:46:24.000Z
2022-02-20T23:04:05.000Z
pkg_dir/src/utils/notion_utils.py
robperch/robasecode
343cb59b16630ca776bd941897ab8da63f20bfe1
[ "MIT" ]
null
null
null
## MODULE WITH UTIL FUNCTIONS - NOTION "----------------------------------------------------------------------------------------------------------------------" ####################################################### Imports ######################################################## "----------------------------------------------------------------------------------------------------------------------" ## Standard library imports import requests ## Third party imports import pandas as pd ## Local application imports from pkg_dir.config.config import ( creds_file_path as crds_loc, ) from pkg_dir.src.utils.general_utils import ( read_yaml, ) "----------------------------------------------------------------------------------------------------------------------" ####################################################### Functions ###################################################### "----------------------------------------------------------------------------------------------------------------------" ## Read notion database with api def notion_api_call(db_api_url, db_id, headers): """ Read notion database with api :param db_api_url (string): base url provided by Notion to make api calls :param db_id (string): unique id of the database that will be read :param headers (dictionary): dict with authorization and version info :return req (?): response after calling notions api """ ## Configuring reading URL read_url = db_api_url + db_id + "/query" ## Requesting info via the API req = requests.request( "POST", read_url, headers=headers ) ## Verifying API call status print("API interaction status code: ", req.status_code) return req ## Calling a Notion database as a json via Notion's API def get_notion_db_json(db_id): """ Calling a Notion database as a json via Notion's API :param db_id (string): unique id of the database that will be called :return db_json (json): json with the notion's db contents """ ## Reading credentials from yaml file yaml_file = read_yaml(crds_loc) notion_version = yaml_file["notion_api"]["notion_version"] db_api_url = yaml_file["notion_api"]["db_api_url"] api_key = yaml_file["notion_api"]["api_key"] ## Building headers for the API call headers = { "Authorization": "Bearer " + api_key, "Notion-Version": notion_version } ## Calling notion's api req = notion_api_call(db_api_url, db_id, headers) ## Converting the api response to a json db_json = req.json() return db_json ## Crating a schema of the notion database that was read def create_notion_db_schema(db_json, relevant_properties): """ Crating a schema of the notion database that was read :param db_json (json): json object obtained by calling notion's api :param relevant_properties (list): list of string with the names of the relevant properties :return db_schema (dictionary): schema of the table that includes the properties' data type """ ## Selecting a sample entry to go over all of it's properties sample_entry = db_json["results"][0]["properties"] ## Bulding dictionary (schema) of the relevant properties and their datatypes db_schema = { prop: { "data_type": sample_entry[prop]["type"] } for prop in sample_entry if prop in relevant_properties } # print(db_schema) return db_schema ## Building a the blueprint dictionary for the dataframe (orient=index) def notion_db_blueprint_df(db_json, db_schema, index_prop): """ Building a the blueprint dictionary for the dataframe (orient=index) :param db_json (json): json object obtained by calling notion's api :return db_schema (dictionary): schema of the table that includes the properties' data type :param index_prop (string): name of the property that will serve as the df's index :return df_dict (dict): dictionary that will be used to create a dataframe with the json contents """ ## Empty dictionary that will store all the results df_dict = {} ## Iterating over every row in the dataframe for row in db_json["results"]: ## Defining the table's base attributes #### All properties contained in the notion db row_props = row["properties"] #### Name of the index; key attribute in the notion db row_name = row_props[index_prop]["title"][0]["plain_text"] #### Empty list to store all the row contents row_contents = [] ## Iterating over every relevant property in the table for col in db_schema: ## Identifying the datatype of the property data_type = db_schema[col]["data_type"] ## Set of conditions to determine how the row will be treated #### Skipping the index row if data_type == "title": continue #### Searching for data in specific locations for special data types (1) elif data_type in ["select", "person", "created_by"]: try: row_contents.append(row_props[col][data_type]["name"]) except: row_contents.append("No_data") #### Searching for data in specific locations for special data types (2) elif data_type in ["rich_text"]: try: row_contents.append(row_props[col][data_type][0]["text"]["content"]) except: row_contents.append("No_data") #### Searching for data in specific locations for special data types (2) elif data_type in ["formula"]: try: #### Applying conditions based on the type of formula result if row_props[col][data_type]["type"] == "string": row_contents.append(row_props[col][data_type]["string"]) elif row_props[col][data_type]["type"] == "number": row_contents.append(row_props[col][data_type]["number"]) except: row_contents.append("No_data") #### General procedure to find data else: row_contents.append(row_props[col][db_schema[col]["data_type"]]) ## Saving the row contents gathered df_dict[row_name] = row_contents return df_dict ## Obtaining a dataframe from a notion database def notion_json_to_df(db_json, relevant_properties): """ Obtaining a dataframe from a notion database :param db_json (json): json object obtained by calling notion's api :param relevant_properties (list): list of string with the names of the relevant properties :return df_n (dataframe): resulting dataframe crated based on the blueprint generated """ ## General parameters needed to build the dataframe #### Database schema db_schema = create_notion_db_schema(db_json, relevant_properties) #### Property that will be used as the dataframe's index index_prop = [prop for prop in db_schema if db_schema[prop]["data_type"] == "title"][0] ## Building a the blueprint dictionary for the dataframe (orient=index) df_dict = notion_db_blueprint_df(db_json, db_schema, index_prop) ## Creating dataframe with the resulting blueprint dictionary #### Crating dataframe df_n = pd.DataFrame.from_dict(df_dict, orient="index") #### Inserting the table's index as a column at the end of the df df_n.insert( df_n.shape[1], index_prop, df_n.index ) #### Resetting index df_n.reset_index(inplace=True, drop=True) #### Adjusting column names df_n.columns = [col_n for col_n in db_schema] return df_n ## Obtaining a Notion database as dataframe with the selected columns def notion_db_to_df(db_id, relevant_properties): """ Obtaining a Notion database as dataframe with the selected columns :param db_id (string): unique id to identify the notion database :param relevant_properties (list): list of string with the names of the relevant properties :return df_n (dataframe): resulting dataframe crated based on the blueprint generated """ ## Calling a Notion database as a json via Notion's API db_json = get_notion_db_json(db_id) ## Obtaining a dataframe from a notion database df_n = notion_json_to_df(db_json, relevant_properties) return df_n "----------------------------------------------------------------------------------------------------------------------" "----------------------------------------------------------------------------------------------------------------------" ## END OF FILE ## "----------------------------------------------------------------------------------------------------------------------" "----------------------------------------------------------------------------------------------------------------------"
30.006601
120
0.569182
1,082
9,092
4.609982
0.186691
0.020449
0.024058
0.018043
0.486367
0.45008
0.417201
0.394346
0.347033
0.318565
0
0.001142
0.229652
9,092
303
121
30.006601
0.711022
0.434558
0
0.197917
0
0
0.271329
0.198862
0
0
0
0
0
1
0.0625
false
0
0.041667
0
0.166667
0.03125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16caf6d3ac2e6621185a4d16c03069163552a572
8,371
py
Python
libpermian/issueanalyzer/test_baseissue.py
velezd/permian
b52189f44c3112ad933a6b1e303a6b30c272651a
[ "MIT" ]
null
null
null
libpermian/issueanalyzer/test_baseissue.py
velezd/permian
b52189f44c3112ad933a6b1e303a6b30c272651a
[ "MIT" ]
9
2022-02-07T14:14:10.000Z
2022-03-22T09:17:16.000Z
libpermian/issueanalyzer/test_baseissue.py
velezd/permian
b52189f44c3112ad933a6b1e303a6b30c272651a
[ "MIT" ]
3
2022-01-20T09:17:39.000Z
2022-03-08T00:35:58.000Z
import unittest import logging import contextlib from libpermian.settings import Settings from .proxy import IssueAnalyzerProxy from .base import BaseAnalyzer, BaseIssue from .issueset import IssueSet LOGGER = logging.getLogger('test') class NewIssue(BaseIssue): def submit(self): LOGGER.info('submit was called') return super().submit() def make(self): LOGGER.info('make was called') return 'http://issuetracker.example.com/new_issue' def update(self): LOGGER.info('update was called') def _lookup(self): LOGGER.info('lookup was called') return None @property def resolved(self): return False @property def report_url(self): return 'http://issuetracker.example.com/new/foo' class TrackedUnresolvedIssue(NewIssue): def _lookup(self): LOGGER.info('lookup was called') return 'http://issuetracker.example.com/123' @property def resolved(self): return False @property def report_url(self): return 'http://issuetracker.example.com/new/bar' class TrackedResolvedIssue(TrackedUnresolvedIssue): @property def resolved(self): return True class TestNewIssue(unittest.TestCase): def setUp(self): self.settings = Settings({}, {}, []) self.issue = NewIssue(self.settings) def test_properties(self): self.assertTrue(self.issue.new) self.assertFalse(self.issue.tracked) self.assertEqual(self.issue.uri, None) def test_sync(self): # test lookup was called with self.assertLogs('test', level='INFO') as cm: self.issue.sync() self.assertEqual(cm.output, ['INFO:test:lookup was called']) self.test_properties() def test_str(self): self.assertEqual(str(self.issue), self.issue.report_url) class TestTrackedUnresolvedIssue(TestNewIssue): def setUp(self): self.settings = Settings({}, {}, []) self.issue = TrackedUnresolvedIssue(self.settings) def test_properties(self): self.assertFalse(self.issue.new) self.assertTrue(self.issue.tracked) self.assertEqual(self.issue.uri, 'http://issuetracker.example.com/123') def test_str(self): self.assertEqual(str(self.issue), self.issue.uri) # TrackedResolvedIssue should behave the same way as TrackedUnresolvedIssue # so just inherit the whole test case to run the very same test class TestTrackedResolvedIssue(TestTrackedUnresolvedIssue): def setUp(self): self.settings = Settings({}, {}, []) self.issue = TrackedResolvedIssue(self.settings) class TestSubmitDisabled(unittest.TestCase): settings = Settings( { 'issueAnalyzer' : { 'create_issues': False, 'update_issues': False, 'create_issues_instead_of_update': False, } }, {}, [] ) def setUp(self): self.new = NewIssue(self.settings) self.unresolved = TrackedUnresolvedIssue(self.settings) self.resolved = TrackedResolvedIssue(self.settings) # sync the issues so that lookup is not called => logged during submit self.new.sync() self.unresolved.sync() self.resolved.sync() @contextlib.contextmanager def assertUnchanged(self, issue): old_uri = issue.uri old_new = issue.new old_tracked = issue.tracked yield issue self.assertEqual(issue.uri, old_uri) self.assertEqual(issue.new, old_new) self.assertEqual(issue.tracked, old_tracked) def assertSubmitNoop(self, issue): with self.assertUnchanged(issue): with self.assertLogs('test', level='INFO') as cm: issue.submit() issue.submit() self.assertEqual(cm.output, [ "INFO:test:submit was called", "INFO:test:submit was called", ]) def assertSubmitCreate(self, issue): with self.assertLogs('test', level='INFO') as cm: result1 = issue.submit() result2 = issue.submit() self.assertEqual(cm.output, [ "INFO:test:submit was called", "INFO:test:make was called", "INFO:test:submit was called", ]) self.assertEqual(result1, result2) return result1 def assertSubmitUpdate(self, issue): with self.assertUnchanged(issue): with self.assertLogs('test', level='INFO') as cm: result1 = issue.submit() result2 = issue.submit() self.assertEqual(cm.output, [ "INFO:test:submit was called", "INFO:test:update was called", "INFO:test:submit was called", ]) self.assertEqual(result1, result2) return result1 def testNew(self): self.assertSubmitNoop(self.new) def testUnresolved(self): self.assertSubmitNoop(self.unresolved) def testResolved(self): self.assertSubmitNoop(self.resolved) class TestSubmitCreateUpdate(TestSubmitDisabled): settings = Settings( { 'issueAnalyzer' : { 'create_issues': True, 'update_issues': True, 'create_issues_instead_of_update': False, } }, {}, [] ) def testNew(self): result = self.assertSubmitCreate(self.new) self.assertTrue(self.new.new) self.assertTrue(self.new.tracked) self.assertEqual(result, 'http://issuetracker.example.com/new_issue') self.assertEqual(result, self.new.uri) # repeated submit doesn't do anything with self.assertUnchanged(self.new): with self.assertLogs('test', level='INFO') as cm: result = self.new.submit() self.assertEqual(cm.output, [ "INFO:test:submit was called", ]) def testUnresolved(self): self.assertSubmitUpdate(self.unresolved) def testResolved(self): self.assertSubmitUpdate(self.resolved) class TestSubmitCreateOnlyNew(TestSubmitCreateUpdate): settings = Settings( { 'issueAnalyzer' : { 'create_issues': True, 'update_issues': False, 'create_issues_instead_of_update': False, } }, {}, [] ) def testUnresolved(self): self.assertSubmitNoop(self.unresolved) def testResolved(self): self.assertSubmitNoop(self.resolved) class TestSubmitUpdateOnlyTracked(TestSubmitCreateUpdate): settings = Settings( { 'issueAnalyzer' : { 'create_issues': False, 'update_issues': True, 'create_issues_instead_of_update': False, } }, {}, [] ) def testNew(self): self.assertSubmitNoop(self.new) class TestSubmitCreateAlwaysWithUpdateOff(TestSubmitCreateUpdate): settings = Settings( { 'issueAnalyzer' : { 'create_issues': True, 'update_issues': False, # This should have no effect 'create_issues_instead_of_update': True, } }, {}, [] ) def testUnresolved(self): old_uri = self.unresolved.uri result = self.assertSubmitCreate(self.unresolved) self.assertEqual(result, 'http://issuetracker.example.com/new_issue') self.assertEqual(self.unresolved.uri, result) self.assertNotEqual(result, old_uri) def testResolved(self): old_uri = self.resolved.uri result = self.assertSubmitCreate(self.resolved) self.assertEqual(result, 'http://issuetracker.example.com/new_issue') self.assertEqual(self.resolved.uri, result) self.assertNotEqual(result, old_uri) # The update_issue should have no effect when create_issues_instead_of_update # is set to True. class TestSubmitCreateAlwaysWithUpdateOn(TestSubmitCreateAlwaysWithUpdateOff): settings = Settings( { 'issueAnalyzer' : { 'create_issues': True, 'update_issues': True, # This should have no effect 'create_issues_instead_of_update': True, } }, {}, [] )
29.896429
79
0.611158
825
8,371
6.121212
0.146667
0.059406
0.023762
0.041188
0.606139
0.556634
0.53604
0.476832
0.403366
0.364951
0
0.002669
0.283957
8,371
279
80
30.003584
0.83984
0.048859
0
0.537118
0
0
0.141725
0.02339
0
0
0
0
0.222707
1
0.148472
false
0
0.030568
0.021834
0.305677
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16cb0577b93ac4b27ff6f443a2d517ea18cbf9f7
6,421
py
Python
naplib/alignment/prosodylab_aligner/__main__.py
gavinmischler/naplib-python
8cd7a0fc700f1c07243169ec42fc087955885adc
[ "MIT" ]
1
2022-03-02T20:54:23.000Z
2022-03-02T20:54:23.000Z
naplib/alignment/prosodylab_aligner/__main__.py
gavinmischler/gavlib
cacf9180b1442e4aed98b6182d586747a6d6ef90
[ "MIT" ]
null
null
null
naplib/alignment/prosodylab_aligner/__main__.py
gavinmischler/gavlib
cacf9180b1442e4aed98b6182d586747a6d6ef90
[ "MIT" ]
null
null
null
# Copyright (c) 2011-2014 Kyle Gorman and Michael Wagner # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ Command-line driver for the module """ import logging import os import sys import yaml from bisect import bisect from shutil import copyfile from textgrid import MLF from corpus import Corpus from aligner import Aligner from archive import Archive from utilities import splitname, resolve_opts, \ ALIGNED, CONFIG, HMMDEFS, MACROS, SCORES from argparse import ArgumentParser DICTIONARY = "eng.dict" MODEL = "eng.zip" LOGGING_FMT = "%(message)s" # parse arguments argparser = ArgumentParser(prog="{} -m aligner".format(sys.executable), description="Prosodylab-Aligner") argparser.add_argument("-c", "--configuration", help="config file") argparser.add_argument("-d", "--dictionary", metavar="DICT", action="append", help="dictionary file (default: {}) (can specify multiple)".format(DICTIONARY)) argparser.add_argument("-s", "--samplerate", type=int, help="analysis samplerate (in Hz)") argparser.add_argument("-e", "--epochs", type=int, help="# of epochs of training per round") input_group = argparser.add_argument_group() input_group.add_argument("-r", "--read", help="source for a precomputed acoustic model") input_group.add_argument("-t", "--train", help="directory containing data for training") output_group = argparser.add_mutually_exclusive_group(required=True) output_group.add_argument("-a", "--align", help="directory containing data to align") output_group.add_argument("-w", "--write", help="destination for computed acoustic model") verbosity_group = argparser.add_mutually_exclusive_group() verbosity_group.add_argument("-v", "--verbose", action="store_true", help="Verbose output") verbosity_group.add_argument("-V", "--extra-verbose", action="store_true", help="Even more verbose output") args = argparser.parse_args() # hack to allow proper override of default dictionary if not args.dictionary: args.dictionary = [DICTIONARY] # set up logging loglevel = logging.WARNING if args.extra_verbose: loglevel = logging.DEBUG elif args.verbose: loglevel = logging.INFO logging.basicConfig(format=LOGGING_FMT, level=loglevel) # input: pick one if args.train: if args.read: logging.error("Cannot train on persistent model.") exit(1) logging.info("Preparing corpus '{}'.".format(args.train)) opts = resolve_opts(args) corpus = Corpus(args.train, opts) logging.info("Preparing aligner.") aligner = Aligner(opts) logging.info("Training aligner on corpus '{}'.".format(args.train)) aligner.HTKbook_training_regime(corpus, opts["epochs"], flatstart=(args.read is None)) else: if not args.read: args.read = MODEL logging.info("Reading aligner from '{}'.".format(args.read)) # warn about irrelevant flags if args.configuration: logging.warning("Ignoring config flag (-c/--configuration).") args.configuration = None if args.epochs: logging.warning("Ignoring epochs flag (-e/--epochs).") if args.samplerate: logging.warning("Ignoring samplerate flag (-s/--samplerate).") args.samplerate = None # create archive from -r argument archive = Archive(args.read) # read configuration file therefrom, and resolve options with it args.configuration = os.path.join(archive.dirname, CONFIG) opts = resolve_opts(args) # initialize aligner and set it to point to the archive data aligner = Aligner(opts) aligner.curdir = archive.dirname # output: pick one if args.align: # check to make sure we're not aligning on the training data if (not args.train) or (os.path.realpath(args.train) != os.path.realpath(args.align)): logging.info("Preparing corpus '{}'.".format(args.align)) corpus = Corpus(args.align, opts) logging.info("Aligning corpus '{}'.".format(args.align)) aligned = os.path.join(args.align, ALIGNED) scores = os.path.join(args.align, SCORES) aligner.align_and_score(corpus, aligned, scores) logging.debug("Wrote MLF file to '{}'.".format(aligned)) logging.debug("Wrote likelihood scores to '{}'.".format(scores)) logging.info("Writing TextGrids.") size = MLF(aligned).write(args.align) if not size: logging.error("No paths found!") exit(1) logging.debug("Wrote {} TextGrids.".format(size)) elif args.write: # create and populate archive (_, basename, _) = splitname(args.write) archive = Archive.empty(basename) archive.add(os.path.join(aligner.curdir, HMMDEFS)) archive.add(os.path.join(aligner.curdir, MACROS)) # whatever this is, it's not going to work once you move the data if "dictionary" in opts: del opts["dictionary"] with open(os.path.join(archive.dirname, CONFIG), "w") as sink: yaml.dump(opts, sink) (basename, _) = os.path.splitext(args.write) archive_path = os.path.relpath(archive.dump(basename)) logging.info("Wrote aligner to '{}'.".format(archive_path)) # else unreachable logging.info("Success!")
40.13125
102
0.68167
818
6,421
5.295844
0.332518
0.027932
0.022161
0.009695
0.096491
0.063712
0.015235
0
0
0
0
0.001961
0.205731
6,421
159
103
40.383648
0.847451
0.2456
0
0.055046
0
0
0.202748
0.004371
0
0
0
0
0
1
0
false
0
0.110092
0
0.110092
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16cc459343115a5e0d636bad4bf667af5c4f5d6d
4,021
py
Python
init/build_statements.py
andgein/sis-2017-winter-olymp
e6cf290ab2c24a22ca76949895e2a6cc6d818dc0
[ "MIT" ]
null
null
null
init/build_statements.py
andgein/sis-2017-winter-olymp
e6cf290ab2c24a22ca76949895e2a6cc6d818dc0
[ "MIT" ]
null
null
null
init/build_statements.py
andgein/sis-2017-winter-olymp
e6cf290ab2c24a22ca76949895e2a6cc6d818dc0
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import codecs import os import os.path import shutil import subprocess import logging import glob import json CONTEST_DIR = 'polygon-contest' INIT_FILE = 'init.txt' BUILD_DIR = 'build' LANGUAGE = 'russian' FILES_DIR = 'files-' + LANGUAGE def time_limit_from_int(tl): tl //= 1000 return str(tl) + ' секунд' + ('a' if tl == 1 else 'ы') def memory_limit_from_int(ml): return str(ml // (1024 ** 2)) + ' мегабайт' def build_with_text(text, replace_data, result, section='', problem_name=''): text = text.replace('%TEXT%', section + '\n' + replace_data) with codecs.open(os.path.join(BUILD_DIR, 'data.tex'), 'w', 'utf-8') as data_file: data_file.write(text) cwd = os.getcwd() os.chdir(BUILD_DIR) logging.info('Compile problem %s' % problem_name) for _ in range(2): subprocess.check_output(['pdflatex', 'compile.tex']) os.chdir(cwd) shutil.copy(os.path.join(BUILD_DIR, 'compile.pdf'), os.path.join(FILES_DIR, result)) def main(): id_by_name = {} with open(INIT_FILE, 'r', encoding='utf-8') as init: for line in init: if not line.strip(): continue line = line.strip().split('\t') id_by_name[line[11]] = line[2] logging.basicConfig(level=logging.DEBUG, format='%(asctime)s [%(levelname)s] %(message)s') if not os.path.exists(FILES_DIR): logging.info('Create folder for output files: %s' % FILES_DIR) os.mkdir(FILES_DIR) if not os.path.exists(BUILD_DIR): logging.info('Create folder for build files: %s' % BUILD_DIR) os.mkdir(BUILD_DIR) problems_dir = os.path.join(CONTEST_DIR, 'problems') for problem_counter, problem_dir in enumerate(glob.glob(os.path.join(problems_dir, '*')), start=1): statement_dir = os.path.join(problem_dir, 'statements', LANGUAGE) properties_file_name = os.path.join(statement_dir, 'problem-properties.json') logging.info('Read problem properties file %s' % properties_file_name) with codecs.open(properties_file_name, 'r', 'utf-8') as properties_file: properties = json.load(properties_file) name = properties['name'] legend = properties['legend'] input_file = properties['inputFile'] output_file = properties['outputFile'] time_limit = time_limit_from_int(properties['timeLimit']) memory_limit = memory_limit_from_int(properties['memoryLimit']) input_format = properties['input'] output_format = properties['output'] samples = "".join(["\exmp{%s}{%s}%%\n" % (sample['input'], sample['output']) for sample in properties['sampleTests']]) notes = '' if len(properties.get('notes','')) > 0: notes = '\\Note\n' + properties['notes'] shutil.copy('template.tex', os.path.join(BUILD_DIR, 'compile.tex')) shutil.copy('olymp.sty', os.path.join(BUILD_DIR, 'olymp.sty')) with codecs.open('data.tex', 'r', 'utf-8') as data_file: data = data_file.read() problem_name = os.path.basename(problem_dir) problem_id = id_by_name[problem_name] data = data.replace('%NAME%', name).replace('%INPUT_FILE%', input_file).replace('%OUTPUT_FILE%', output_file).\ replace('%TIME_LIMIT%', time_limit).replace('%MEMORY_LIMIT%', memory_limit).\ replace('%ID%', problem_id).\ replace('%PROBLEM_COUNTER%', str(problem_counter)).\ replace('%STATEMENT_DIR%', os.path.join('..', statement_dir).replace('\\', '/') + '/') build_with_text(data, legend + '\n\\InputFile\n' + input_format + '\n\\OutputFile\n' + output_format + "\\begin{example}" + samples +"\\end{example}\n" + notes, problem_name + '.pdf', problem_name=problem_name) if __name__ == '__main__': main()
39.038835
123
0.607063
503
4,021
4.652087
0.254473
0.035897
0.042735
0.025641
0.124786
0.061538
0
0
0
0
0
0.006861
0.238747
4,021
102
124
39.421569
0.757596
0.005223
0
0
0
0
0.167042
0.005751
0
0
0
0
0
1
0.05
false
0
0.1
0.0125
0.175
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16cef8471ab7389079cb6001c00f1c83826a7643
1,546
py
Python
pyvips/error.py
kleisauke/pyvips
ae3b0c09669cfb662e773e8ae69cf589ac15e320
[ "MIT" ]
null
null
null
pyvips/error.py
kleisauke/pyvips
ae3b0c09669cfb662e773e8ae69cf589ac15e320
[ "MIT" ]
null
null
null
pyvips/error.py
kleisauke/pyvips
ae3b0c09669cfb662e773e8ae69cf589ac15e320
[ "MIT" ]
null
null
null
# errors from libvips import sys import logging from pyvips import ffi, vips_lib logger = logging.getLogger(__name__) _is_PY3 = sys.version_info[0] == 3 if _is_PY3: text_type = str else: text_type = unicode ffi.cdef(''' const char* vips_error_buffer (void); void vips_error_clear (void); ''') def _to_bytes(x): """Convert to a byte string. Convert a Python unicode string to a utf-8-encoded byte string. You must call this on strings you pass to libvips. """ if isinstance(x, text_type): x = x.encode() return x def _to_string(x): """Convert to a unicode string. If x is a byte string, assume it is utf-8 and decode to a Python unicode string. You must call this on text strings you get back from libvips. """ if _is_PY3 and isinstance(x, bytes): x = x.decode('utf-8') return x class Error(Exception): """An error from vips. Attributes: message (str): a high-level description of the error detail (str): a string with some detailed diagnostics """ def __init__(self, message, detail=None): self.message = message if detail is None or detail == "": detail = _to_string(ffi.string(vips_lib.vips_error_buffer())) vips_lib.vips_error_clear() self.detail = detail logger.debug('Error %s %s', self.message, self.detail) def __str__(self): return '{0}\n {1}'.format(self.message, self.detail) __all__ = [ '_to_bytes', '_to_string', 'Error', ]
20.891892
76
0.638422
226
1,546
4.159292
0.376106
0.038298
0.014894
0.023404
0.048936
0.048936
0
0
0
0
0
0.008696
0.256145
1,546
73
77
21.178082
0.808696
0.312419
0
0.058824
0
0
0.127872
0
0
0
0
0
0
1
0.117647
false
0
0.088235
0.029412
0.323529
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16d13aced6b20979dea691425018aa9f0ea80fb3
3,168
py
Python
test/examples/integrated/codec/vip/vip_agent.py
rodrigomelo9/uvm-python
e3127eba2cc1519a61dc6f736d862a8dcd6fce20
[ "Apache-2.0" ]
140
2020-01-18T00:14:17.000Z
2022-03-29T10:57:24.000Z
test/examples/integrated/codec/vip/vip_agent.py
Mohsannaeem/uvm-python
1b8768a1358d133465ede9cadddae651664b1d53
[ "Apache-2.0" ]
24
2020-01-18T18:40:58.000Z
2021-03-25T17:39:07.000Z
test/examples/integrated/codec/vip/vip_agent.py
Mohsannaeem/uvm-python
1b8768a1358d133465ede9cadddae651664b1d53
[ "Apache-2.0" ]
34
2020-01-18T12:22:59.000Z
2022-02-11T07:03:11.000Z
#// #// ------------------------------------------------------------- #// Copyright 2011 Synopsys, Inc. #// Copyright 2019-2020 Tuomas Poikela (tpoikela) #// All Rights Reserved Worldwide #// #// Licensed under the Apache License, Version 2.0 (the #// "License"); you may not use this file except in #// compliance with the License. You may obtain a copy of #// the License at #// #// http://www.apache.org/licenses/LICENSE-2.0 #// #// Unless required by applicable law or agreed to in #// writing, software distributed under the License is #// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR #// CONDITIONS OF ANY KIND, either express or implied. See #// the License for the specific language governing #// permissions and limitations under the License. #// ------------------------------------------------------------- #// from uvm import * from .vip_sequencer import vip_sequencer from .vip_driver import vip_driver from .vip_monitor import vip_monitor class vip_agent(UVMAgent): def __init__(self, name, parent=None): super().__init__(name, parent) self.hier_objection = False def build_phase(self, phase): self.sqr = vip_sequencer.type_id.create("sqr", self) self.drv = vip_driver.type_id.create("drv", self) self.tx_mon = vip_monitor.type_id.create("tx_mon", self) self.rx_mon = vip_monitor.type_id.create("rx_mon", self) self.rx_mon.hier_objection = self.hier_objection self.tx_mon.hier_objection = self.hier_objection self.drv.hier_objection = self.hier_objection vif = [] if not UVMConfigDb.get(self, "", "vif", vif): uvm_fatal("VIP/AGT/NOVIF", "No virtual interface specified for self agent instance") self.vif = vif[0] UVMConfigDb.set(self, "tx_mon", "vif", self.vif.tx_mon) UVMConfigDb.set(self, "rx_mon", "vif", self.vif.rx) def connect_phase(self, phase): self.drv.seq_item_port.connect(self.sqr.seq_item_export) async def pre_reset_phase(self, phase): if self.hier_objection: phase.raise_objection(self, "Resetting agent") await self.reset_and_suspend() if self.hier_objection: print("vip_agent dropping objection") phase.drop_objection(self) async def reset_and_suspend(self): #fork await sv.fork_join([ cocotb.fork(self.drv.reset_and_suspend()), cocotb.fork(self.tx_mon.reset_and_suspend()), cocotb.fork(self.rx_mon.reset_and_suspend()) ]) #join self.sqr.stop_sequences() async def suspend(self): await sv.fork_join([ # fork cocotb.fork(self.drv.suspend()), cocotb.fork(self.tx_mon.suspend()), cocotb.fork(self.rx_mon.suspend()), ]) # join async def resume(self): # fork await sv.fork_join([ cocotb.fork(self.drv.resume()), cocotb.fork(self.tx_mon.resume()), cocotb.fork(self.rx_mon.resume()), ]) # join uvm_component_utils(vip_agent)
33
96
0.606376
400
3,168
4.6125
0.3475
0.063415
0.068293
0.045528
0.226558
0.17561
0.083469
0.04336
0.04336
0.04336
0
0.00708
0.242109
3,168
95
97
33.347368
0.761349
0.278093
0
0.156863
0
0
0.065929
0
0
0
0
0
0
1
0.058824
false
0
0.078431
0
0.156863
0.019608
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16d2ceeba676dbb491a1206466347e8ee17c6418
2,485
py
Python
source/code/build-instance-scheduler-template.py
liangruibupt/aws-instance-scheduler
a4e46eec9f39c2e3b95c5bcbe32c036e239d6066
[ "Apache-2.0" ]
null
null
null
source/code/build-instance-scheduler-template.py
liangruibupt/aws-instance-scheduler
a4e46eec9f39c2e3b95c5bcbe32c036e239d6066
[ "Apache-2.0" ]
null
null
null
source/code/build-instance-scheduler-template.py
liangruibupt/aws-instance-scheduler
a4e46eec9f39c2e3b95c5bcbe32c036e239d6066
[ "Apache-2.0" ]
1
2021-04-09T15:01:49.000Z
2021-04-09T15:01:49.000Z
###################################################################################################################### # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. A copy of the License is located at # # # # http://www.apache.org/licenses/ # # # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # # and limitations under the License. # ###################################################################################################################### import json import sys from collections import OrderedDict def get_versioned_template(template_filename, bucket, solution, version, region): with open(template_filename, "rt") as f: template_text = "".join(f.readlines()) template_text = template_text.replace("%bucket%", bucket) template_text = template_text.replace("%solution%", solution) template_text = template_text.replace("%version%", version) if region == 'cn-north-1' or region == 'cn-northwest-1': arn_prefix = "arn:aws-cn" else: arn_prefix = "arn:aws" template_text = template_text.replace("%arn_prefix%", arn_prefix) return json.loads(template_text, object_pairs_hook=OrderedDict) def main(template_file, bucket, solution, version, region): template = get_versioned_template(template_file, bucket, solution, version, region) print(json.dumps(template, indent=4)) main(template_file=sys.argv[1], bucket=sys.argv[2], solution=sys.argv[3], version=sys.argv[4], region=sys.argv[5]) exit(0)
59.166667
118
0.464789
224
2,485
5.044643
0.464286
0.106195
0.070796
0.084956
0.178761
0.069027
0
0
0
0
0
0.00953
0.3666
2,485
41
119
60.609756
0.708386
0.433803
0
0
0
0
0.076707
0
0
0
0
0
0
1
0.1
false
0
0.15
0
0.3
0.05
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16d306bdfaed88804b418d267e2c9f7fdd6fab73
7,965
py
Python
src/parse.py
StanfordAHA/Configuration
a5d404433d32b0ac20544d5bafa9422c979afc16
[ "BSD-3-Clause" ]
null
null
null
src/parse.py
StanfordAHA/Configuration
a5d404433d32b0ac20544d5bafa9422c979afc16
[ "BSD-3-Clause" ]
null
null
null
src/parse.py
StanfordAHA/Configuration
a5d404433d32b0ac20544d5bafa9422c979afc16
[ "BSD-3-Clause" ]
null
null
null
############################################################################### # file -- parse.py -- # Top contributors (to current version): # Nestan Tsiskaridze # This file is part of the configuration finder for the Stanford AHA project. # Copyright (c) 2021 by the authors listed in the file AUTHORS # in the top-level source directory) and their institutional affiliations. # All rights reserved. See the file LICENSE in the top-level source # directory for licensing information. # # Handles parsing of all input files. ############################################################################### import smt_switch as ss import smt_switch.primops as po import smt_switch.sortkinds as sk import argparse import pono as c import sys import re import time import copy import io #import timeit class stream: set0 = [] set1 = [] seq_in = [] seq_out = [] vars = {} var_array_inds = {} constr2terms = [] data_in_size = [] data_out_size = [] clk_name = None rst_n_name = None config_names = [] def read_stream(self, args, fout, agg_set, tb_set, sram_set, symbols, solver): global dim_names # open an annotation file if args.annotation == None: if agg_set: annot_file = args.hwpath+"agg_lake_top_annotation.txt" elif tb_set: annot_file = args.hwpath+"tb_lake_top_annotation.txt" elif sram_set: annot_file = args.hwpath+"sram_lake_top_annotation.txt" else: annot_file = args.hwpath+"lake_top_annotation.txt" else: annot_file = args.annotation cfo = open(annot_file, "r+") clines = cfo.readlines() # Close opend file cfo.close() # Collect the Set0, Set1, I/O sequence, and config_name variables as they appear in btor2 for cln in clines: cln = cln.strip() cln = cln.replace(',', '') cvars = cln.split() if 'var' == cvars[0]: self.vars[cvars[1]] = cvars[3] elif 'input' != cvars[0] and 'output' != cvars[0] and 'var' != cvars[0] and 'if' != cvars[0] and 'SOLVE' != cvars[1]: self.constr2terms.append(cvars) elif 'if' == cvars[0]: self.constr2terms.append(cvars) elif 'SOLVE' == cvars[1]:#specific bits are set only to be solved. Others can be anything, e.g. 0 signal = cvars[0] if ':' in signal: signal_name = signal[:signal.find('[')] ind_start = signal[signal.find('[')+1:signal.find(':')] symb_start = False if ind_start in self.vars: ind_start = int(self.vars[ind_start],0) elif ind_start.isdigit(): ind_start = int(ind_start,0) else: symb_start = True ind_end = signal[signal.find(':')+1:signal.find(']')] symb_end = False if ind_end in self.vars: ind_end = int(self.vars[ind_end],0) elif ind_end.isdigit(): ind_end = int(ind_end,0) else: #case of symbolic symb_end = True if not symb_start and not symb_end: if signal[:signal.find('[')] not in self.var_array_inds: self.var_array_inds[signal[:signal.find('[')]] = [] for i in range(ind_start,ind_end+1): self.var_array_inds[signal_name].append(i) else: #implement later when suport for universal quantifiers is added self.constr2terms.append(cvars) else: if signal[:signal.find('[')] not in self.var_array_inds: self.var_array_inds[signal[:signal.find('[')]] = [] self.var_array_inds[signal[:signal.find('[')]].append(signal[signal.find('[')+1:signal.find(']')]) elif 'SET' == cvars[-1][:-1]: if len(cvars) == 6: rem_dims = cvars[2] dims = [] while (rem_dims != ''): dims.append(int(rem_dims[1:rem_dims.find(':')],0)) rem_dims = rem_dims[rem_dims.find(']')+1:] gen = [0]*len(dims) j = len(dims)-1 while j >= 0: if gen[j] <= dims[j]: build_dims = cvars[-2] for i in gen: build_dims = build_dims + '['+str(i)+']' if cvars[-1][-1:] == '0': self.set0.append(build_dims) else: self.set1.append(build_dims) while (j < len(dims)-1 and gen[j+1] == 0): j += 1 else: gen[j] = 0 j -= 1 gen[j] += 1 else: if cvars[-1][-1:] == '0': self.set0.append(cvars[-2]) else: self.set1.append(cvars[-2]) elif 'SEQUENCE' == cvars[-1]: if len(cvars) == 6: rem_dims = cvars[2] dims = [] while (rem_dims != ''): dims.append(int(rem_dims[1:rem_dims.find(':')],0)) rem_dims = rem_dims[rem_dims.find(']')+1:] if cvars[0] == 'input': self.data_in_size = dims else: self.data_out_size = dims assert len(self.data_in_size) <= 3 assert len(self.data_out_size) <= 3 gen = [0]*len(dims) j = len(dims)-1 while j >= 0: if gen[j] <= dims[j]: build_dims = cvars[-2] for i in gen: build_dims = build_dims + '['+str(i)+']' if cvars[0] == 'input': self.seq_in.append(build_dims) else: self.seq_out.append(build_dims) while (j < len(dims)-1 and gen[j+1] == 0): j += 1 else: gen[j] = 0 j -= 1 gen[j] += 1 else: if cvars[0] == 'input': self.seq_in.append(cvars[-2]) else: self.seq_out.append(cvars[-2]) elif 'SOLVE' == cvars[-1] and ('input' == cvars[0] or 'output' == cvars[0]): #if cvars[3] == 'strg_ub_pre_fetch_0_input_latency': # continue if len(cvars) == 6: dim = int(cvars[2][1:cvars[2].find(':')],0) for i in range(dim+1): self.config_names.append(cvars[-2]+'['+str(i)+']') else: self.config_names.append(cvars[-2]) elif 'CLK' == cvars[-1]: self.clk_name = cvars[-2] elif 'RSTN' == cvars[-1]: self.rst_n_name = cvars[-2] else: assert 'X' == cvars[-1]
40.431472
129
0.417075
836
7,965
3.825359
0.2189
0.030644
0.045028
0.030019
0.417136
0.324891
0.282051
0.252658
0.205754
0.205754
0
0.024121
0.453484
7,965
196
130
40.637755
0.710544
0.099309
0
0.422078
0
0
0.030593
0.014868
0
0
0
0
0.019481
1
0.006494
false
0
0.064935
0
0.155844
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16d53c81f0a6c59b031bb33f8b48778a56657258
7,180
py
Python
aqt/installer.py
pylipp/aqtinstall
e08667cb5c9ced27994c4cde16d0c1b4a4386455
[ "MIT" ]
null
null
null
aqt/installer.py
pylipp/aqtinstall
e08667cb5c9ced27994c4cde16d0c1b4a4386455
[ "MIT" ]
null
null
null
aqt/installer.py
pylipp/aqtinstall
e08667cb5c9ced27994c4cde16d0c1b4a4386455
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # # Copyright (C) 2018 Linus Jahn <lnj@kaidan.im> # Copyright (C) 2019,2020 Hiroshi Miura <miurahr@linux.com> # Copyright (C) 2020, Aurélien Gâteau # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import concurrent.futures import os import pathlib import subprocess import sys import time from logging import getLogger import py7zr import requests from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry from aqt.archives import QtPackage from aqt.helper import altlink, versiontuple from aqt.qtpatch import Updater from aqt.settings import Settings class ExtractionError(Exception): pass class QtInstaller: """ Installer class to download packages and extract it. """ def __init__(self, qt_archives, logging=None, command=None, target_dir=None): self.qt_archives = qt_archives if logging: self.logger = logging else: self.logger = getLogger('aqt') self.command = command if target_dir is None: self.base_dir = os.getcwd() else: self.base_dir = target_dir self.settings = Settings() def retrieve_archive(self, package: QtPackage): archive = package.archive url = package.url name = package.name start_time = time.perf_counter() self.logger.info("Downloading {}...".format(name)) self.logger.debug("Download URL: {}".format(url)) session = requests.Session() retry = Retry(connect=5, backoff_factor=0.5) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) try: r = session.get(url, allow_redirects=False, stream=True) if r.status_code == 302: newurl = altlink(r.url, r.headers['Location'], logger=self.logger) self.logger.info('Redirected URL: {}'.format(newurl)) r = session.get(newurl, stream=True) except requests.exceptions.ConnectionError as e: self.logger.error("Connection error: %s" % e.args) raise e else: try: with open(archive, 'wb') as fd: for chunk in r.iter_content(chunk_size=8196): fd.write(chunk) fd.flush() if self.command is None: with open(archive, 'rb') as fd: self.extract_archive(fd) except Exception as e: exc = sys.exc_info() self.logger.error("Download error: %s" % exc[1]) raise e else: if self.command is not None: self.extract_archive_ext(archive) os.unlink(archive) self.logger.info("Finish installation of {} in {}".format(archive, time.perf_counter() - start_time)) def extract_archive(self, archive): szf = py7zr.SevenZipFile(archive) szf.extractall(path=self.base_dir) szf.close() def extract_archive_ext(self, archive): if self.base_dir is not None: command_args = [self.command, 'x', '-aoa', '-bd', '-y', '-o{}'.format(self.base_dir), archive] else: command_args = [self.command, 'x', '-aoa', '-bd', '-y', archive] try: proc = subprocess.run(command_args, stdout=subprocess.PIPE, check=True) self.logger.debug(proc.stdout) except subprocess.CalledProcessError as cpe: self.logger.error("Extraction error: %d" % cpe.returncode) if cpe.stdout is not None: self.logger.error(cpe.stdout) if cpe.stderr is not None: self.logger.error(cpe.stderr) raise cpe def get_arch_dir(self, arch): if arch.startswith('win64_mingw'): arch_dir = arch[6:] + '_64' elif arch.startswith('win32_mingw'): arch_dir = arch[6:] + '_32' elif arch.startswith('win'): arch_dir = arch[6:] else: arch_dir = arch return arch_dir def make_conf_files(self, qt_version, arch): """Make Qt configuration files, qt.conf and qtconfig.pri""" arch_dir = self.get_arch_dir(arch) try: # prepare qt.conf with open(os.path.join(self.base_dir, qt_version, arch_dir, 'bin', 'qt.conf'), 'w') as f: f.write("[Paths]\n") f.write("Prefix=..\n") # update qtconfig.pri only as OpenSource with open(os.path.join(self.base_dir, qt_version, arch_dir, 'mkspecs', 'qconfig.pri'), 'r+') as f: lines = f.readlines() f.seek(0) f.truncate() for line in lines: if line.startswith('QT_EDITION ='): line = 'QT_EDITION = OpenSource\n' if line.startswith('QT_LICHECK ='): line = 'QT_LICHECK =\n' f.write(line) except IOError as e: self.logger.error("Configuration file generation error: %s\n", e.args, exc_info=True) raise e def install(self): with concurrent.futures.ThreadPoolExecutor(self.settings.concurrency) as executor: futures = [executor.submit(self.retrieve_archive, ar) for ar in self.qt_archives.get_archives()] done, not_done = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_EXCEPTION) if len(not_done) > 0: self.logger.error("Installation error detected.") exit(1) try: for feature in done: feature.result() except Exception: exit(1) def finalize(self): target = self.qt_archives.get_target_config() self.make_conf_files(target.version, target.arch) prefix = pathlib.Path(self.base_dir) / target.version / target.arch updater = Updater(prefix, self.logger) if versiontuple(target.version) < (5, 14, 2): updater.patch_qt(target)
40.111732
110
0.608635
888
7,180
4.833333
0.342342
0.037279
0.020503
0.009087
0.063374
0.047064
0.047064
0.034483
0.020969
0.020969
0
0.009827
0.291365
7,180
178
111
40.337079
0.833726
0.187187
0
0.117647
0
0
0.070442
0
0
0
0
0
0
1
0.058824
false
0.007353
0.110294
0
0.191176
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16d68949a023a20451569c4bd42476cab180bd99
5,398
py
Python
pax/_src/core/utility_modules.py
NTT123/pax
b80e1e4b6bfb763afd6b4fdefa31a051ca8a3335
[ "MIT" ]
11
2021-08-28T17:45:38.000Z
2022-01-26T17:50:03.000Z
pax/_src/core/utility_modules.py
NTT123/pax
b80e1e4b6bfb763afd6b4fdefa31a051ca8a3335
[ "MIT" ]
1
2021-09-13T17:29:33.000Z
2021-09-13T21:50:34.000Z
pax/_src/core/utility_modules.py
NTT123/pax
b80e1e4b6bfb763afd6b4fdefa31a051ca8a3335
[ "MIT" ]
null
null
null
"""Utility Modules.""" from typing import Any, Callable, Dict, List, Optional, Sequence, TypeVar, Union import jax import jax.numpy as jnp from .module import Module, parameters_method T = TypeVar("T", bound=Module) O = TypeVar("O") class ParameterModule(Module): """A PAX module that registers attributes as parameters by default.""" def parameters(self): return self.apply_submodules(lambda x: x.parameters()) class StateModule(Module): """A PAX module that registers attributes as states by default.""" parameters = parameters_method() class LazyModule(Module): """A lazy module is a module that only creates submodules when needed. Example: >>> from dataclasses import dataclass >>> @dataclass ... class MLP(pax.experimental.LazyModule): ... features: list ... ... def __call__(self, x): ... sizes = zip(self.features[:-1], self.features[1:]) ... for i, (in_dim, out_dim) in enumerate(sizes): ... fc = self.get_or_create(f"fc_{i}", lambda: pax.Linear(in_dim, out_dim)) ... x = jax.nn.relu(fc(x)) ... return x ... ... >>> mlp, _ = MLP([1, 2, 3]) % jnp.ones((1, 1)) >>> print(mlp.summary()) MLP(features=[1, 2, 3]) ├── Linear(in_dim=1, out_dim=2, with_bias=True) └── Linear(in_dim=2, out_dim=3, with_bias=True) """ def get_or_create(self, name, create_fn: Callable[[], T]) -> T: """Create and register a new attribute when it is not exist. Return the attribute. """ if hasattr(self, name): value = getattr(self, name) else: assert callable(create_fn), "Expect a callable function" value = create_fn() setattr(self, name, value) return value class Lambda(Module): """Convert a function to a module. Example: >>> net = pax.Lambda(jax.nn.relu) >>> print(net.summary()) x => relu(x) >>> y = net(jnp.array(-1)) >>> y DeviceArray(0, dtype=int32, weak_type=True) """ func: Callable def __init__(self, func: Callable, name: Optional[str] = None): super().__init__(name=name) self.func = func def __call__(self, *args, **kwargs): return self.func(*args, **kwargs) def __repr__(self) -> str: if self.name is not None: return super().__repr__() else: return f"{self.__class__.__qualname__}({self.func.__name__})" def summary(self, return_list: bool = False) -> Union[str, List[str]]: if self.name is not None: name = self.name elif isinstance(self.func, jax.custom_jvp) and hasattr(self.func, "fun"): if hasattr(self.func.fun, "__name__"): name = self.func.fun.__name__ else: name = f"{self.func.fun}" elif hasattr(self.func, "__name__"): name = self.func.__name__ else: name = f"{self.func}" output = f"x => {name}(x)" return [output] if return_list else output class Flattener(Module): """Flatten PAX modules for better performance. Example: >>> net = pax.Linear(3, 3) >>> opt = opax.adam(1e-3)(net.parameters()) >>> flat_mods = pax.experimental.Flattener(model=net, optimizer=opt) >>> net, opt = flat_mods.model, flat_mods.optimizer >>> print(net.summary()) Linear(in_dim=3, out_dim=3, with_bias=True) >>> print(opt.summary()) chain.<locals>.Chain ├── scale_by_adam.<locals>.ScaleByAdam │ ├── Linear(in_dim=3, out_dim=3, with_bias=True) │ └── Linear(in_dim=3, out_dim=3, with_bias=True) └── scale.<locals>.Scale """ treedef_dict: Dict[str, Any] leaves_dict: Dict[str, Sequence[jnp.ndarray]] def __init__(self, **kwargs): """Create a new flattener.""" super().__init__() self.treedef_dict = {} self.leaves_dict = {} for name, value in kwargs.items(): leaves, treedef = jax.tree_flatten(value) self.treedef_dict[name] = treedef self.leaves_dict[name] = leaves def __getattr__(self, name: str) -> Any: if name in self.treedef_dict: treedef = self.treedef_dict[name] leaves = self.leaves_dict[name] value = jax.tree_unflatten(treedef, leaves) return value else: raise AttributeError() def update(self: T, **kwargs) -> T: """Update the flattener. Example: >>> net = pax.Linear(3, 3) >>> flats = pax.experimental.Flattener(net=net) >>> flats = flats.update(net=pax.Linear(4, 4)) >>> print(flats.net.summary()) Linear(in_dim=4, out_dim=4, with_bias=True) """ new_self = self.copy() for name, value in kwargs.items(): leaves, treedef = jax.tree_flatten(value) new_self.treedef_dict[name] = treedef new_self.leaves_dict[name] = leaves return new_self def parameters(self: T) -> T: """Raise an error. Need to reconstruct the original module before getting parameters. """ raise ValueError( "A flattener only stores ndarray leaves as non-trainable states.\n" "Reconstruct the original module before getting parameters." )
29.659341
91
0.582994
682
5,398
4.460411
0.249267
0.031558
0.025312
0.014464
0.219921
0.176857
0.142998
0.095003
0.068047
0.068047
0
0.008729
0.278436
5,398
181
92
29.823204
0.767137
0.368099
0
0.168831
0
0
0.084248
0.016462
0
0
0
0
0.012987
1
0.12987
false
0
0.051948
0.025974
0.402597
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16d80d08df5b20660db28d091611ed67b6dfa076
2,026
py
Python
NoiseFiltersPy/Injector.py
TVect/NoiseFiltersPy
fff1f3113cf9b3e7b8de65421ab9951fd3cb11e5
[ "MIT" ]
6
2019-11-20T19:32:41.000Z
2021-06-25T19:47:26.000Z
NoiseFiltersPy/Injector.py
TVect/NoiseFiltersPy
fff1f3113cf9b3e7b8de65421ab9951fd3cb11e5
[ "MIT" ]
null
null
null
NoiseFiltersPy/Injector.py
TVect/NoiseFiltersPy
fff1f3113cf9b3e7b8de65421ab9951fd3cb11e5
[ "MIT" ]
1
2021-06-25T19:47:34.000Z
2021-06-25T19:47:34.000Z
import numpy as np import pandas as pd from abc import ABC class Injector(ABC): """Base class for the injectors of artificial noise. Attributes ---------- rem_indx : :obj:`List` Removed indexes (rows) from the dataset after the filtering. parameters : :obj:`Dict` Parameters used to define the behaviour of the filter. clean_data : :obj:`Sequence` Filtered independent attributes(X) of the dataset. clean_classes : :obj:`Sequence` Filtered target attributes(y) of the dataset. """ def __init__(self, attributes, labels, rate: float = 0.1) -> None: self._new_noise = [] if not isinstance(attributes, pd.DataFrame): self._attrs = pd.DataFrame(attributes) else: self._attrs = attributes if not isinstance(labels, pd.DataFrame): self._labels = pd.DataFrame(labels) else: self._labels = labels self._rate = rate self.verify() self._num_noise = int(self._rate * self._attrs.shape[0]) self._label_types = set(self.labels[0].unique()) @property def labels(self): return self._labels @property def noise_indx(self): return self._new_noise def verify(self) -> None: if min(self._labels.value_counts()) < 2: raise ValueError("Number of examples in the minority class must be >= 2.") if self._attrs.shape[0] != self.labels.shape[0]: raise ValueError("Attributes and classes must have the sime size.") if self._rate < 0 or self._rate > 1: raise ValueError("") def _gen_random(self, seed: int = None): """[summary] Args: seed (int, optional): [description]. Defaults to 123. """ rng = np.random.default_rng(seed) for example in self._new_noise: self._labels.iloc[example] = rng.choice(list(self._label_types - set(self._labels.iloc[example])))
30.238806
110
0.600197
248
2,026
4.75
0.407258
0.067912
0.03056
0.025467
0.074703
0.04584
0
0
0
0
0
0.009059
0.291708
2,026
67
111
30.238806
0.811847
0.239882
0
0.114286
0
0
0.069226
0
0
0
0
0
0
1
0.142857
false
0
0.085714
0.057143
0.314286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16d86786252483bb0df3775ba6255b1dd3edd2a1
2,181
py
Python
src/app.py
gh640/coding-challenge
3be31d643ac081bfec3495cb8f705c400be82553
[ "MIT" ]
null
null
null
src/app.py
gh640/coding-challenge
3be31d643ac081bfec3495cb8f705c400be82553
[ "MIT" ]
2
2017-11-17T03:14:45.000Z
2019-10-19T07:17:22.000Z
src/app.py
gh640/coding-challenge
3be31d643ac081bfec3495cb8f705c400be82553
[ "MIT" ]
1
2017-11-16T09:33:38.000Z
2017-11-16T09:33:38.000Z
# coding: utf-8 '''フロントコントローラを提供する ''' from math import ceil import os from flask import json from flask import Flask from flask import request from flask import send_from_directory from flask import render_template # from json_loader import load_locations # from json_loader import prepare_locations from models import Location # ページ毎のロケ地表示する LOCATION_ITEMS_PER_PAGE = 20 app = Flask(__name__) app.config['GOOGLE_API_KEY'] = os.environ['GOOGLE_API_KEY'] app.config['ROOT'] = (app.config['APPLICATION_ROOT'] if app.config['APPLICATION_ROOT'] else '') @app.route('/static/<path:path>') def send_js(path): return send_from_directory('static', path) @app.route('/') def index(): return render_template('index.html') @app.route('/location') def location(): req_title = request.args.get('title', None) try: req_page = int(request.args.get('page', 1)) except ValueError as e: req_page = 1 query = Location.selectbase() if req_title: query = query.where(Location.title ** '%{}%'.format(req_title)) total_items = query.count() total_pages = ceil(total_items / LOCATION_ITEMS_PER_PAGE) current_page = req_page if req_page <= total_pages else total_pages query = query.paginate(current_page, LOCATION_ITEMS_PER_PAGE) locations = [l.as_dict() for l in query] return json.jsonify({ 'meta': { 'pager_data': { 'totalItems': total_items, 'totalPages': total_pages, 'currentItems': len(locations), 'currentPage': current_page, 'itemsPerPage': LOCATION_ITEMS_PER_PAGE, }, }, 'entities': { 'locations': locations, }, }) @app.route('/movie') def movie(): req_title = request.args.get('title', None) if not req_title: return json.jsonify([]) query = (Location.select(Location.title) .distinct() .where(Location.title ** '%{}%'.format(req_title))) movies = [{'id': index, 'title': l.title} for index, l in enumerate(query)] return json.jsonify(movies)
23.706522
79
0.629069
262
2,181
5.026718
0.339695
0.036446
0.056948
0.060744
0.095672
0.095672
0.047077
0
0
0
0
0.003041
0.246217
2,181
91
80
23.967033
0.798054
0.056855
0
0.034483
0
0
0.11236
0
0
0
0
0
0
1
0.068966
false
0
0.137931
0.034483
0.293103
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16d86a94620baf9944e6bd338662eefcd3ab573e
2,180
py
Python
corkus/objects/dungeon.py
MrBartusek/corkus.py
031c11e3e251f0bddbcb67415564357460fe7fea
[ "MIT" ]
5
2021-09-10T14:20:15.000Z
2022-01-09T11:27:49.000Z
corkus/objects/dungeon.py
MrBartusek/corkus.py
031c11e3e251f0bddbcb67415564357460fe7fea
[ "MIT" ]
11
2021-08-15T09:39:09.000Z
2022-01-12T14:11:24.000Z
corkus/objects/dungeon.py
MrBartusek/corkus.py
031c11e3e251f0bddbcb67415564357460fe7fea
[ "MIT" ]
2
2021-12-01T23:33:14.000Z
2022-01-12T11:08:18.000Z
from __future__ import annotations from .base import CorkusBase from enum import Enum class DungeonType(Enum): REMOVED = "REMOVED" """Dungeons that were removed from the game in version ``1.14.1`` like ``Skeleton`` or ``Spider``""" REMOVED_MINI = "REMOVED_MINI" """Minidungeons that were reworked in version ``1.17`` like ``Ice`` or ``Ocean``""" STANDARD = "STANDARD" """Generic dungeons like ``Galleon's Graveyard`` or ``Fallen Factory``""" CORRUPTED = "CORRUPTED" """Harder variant of standard dungeons like ``Corrupted Decrepit Sewers`` or ``Corrupted Sand-Swept Tomb``""" class Dungeon(CorkusBase): """Represents a `Dungeon <https://wynncraft.fandom.com/wiki/Dungeons>`_ completed by a :py:class:`Player`""" @property def name(self) -> str: """Name of the dungeon like ``Decrepit Sewers``, ``Galleon's Graveyard`` or ``Fallen Factory``.""" return self._attributes.get("name", "") @property def type(self) -> DungeonType: """Type of the dungeon.""" if self.name.startswith("Corrupted"): return DungeonType.CORRUPTED elif self.name in ( "Zombie", "Animal", "Skeleton", "Spider", "Silverfish",): return DungeonType.REMOVED elif self.name in ( "Jungle", "Ice", "Ocean"): return DungeonType.REMOVED_MINI elif self.name in ( "Decrepit Sewers", "Infested Pit", "Ice Barrows", "Lost Sanctuary", "Sand-Swept Tomb", "Underworld Crypt", "Undergrowth Ruins", "Eldritch Outlook", "Galleon's Graveyard", "Fallen Factory"): return DungeonType.STANDARD else: raise ValueError(f"Invalid dungeon: {self.name}") @property def completed(self) -> int: """Total runs completed by the player. Failed runs are not counted.""" return self._attributes.get("completed", 0) def __repr__(self) -> str: return f"<Dungeon name={self.name!r} completed={self.completed}>"
34.0625
113
0.580734
230
2,180
5.443478
0.430435
0.038339
0.040735
0.033546
0.051118
0.051118
0
0
0
0
0
0.005161
0.288991
2,180
63
114
34.603175
0.802581
0.129358
0
0.12766
0
0
0.224868
0.017857
0
0
0
0
0
1
0.085106
false
0
0.06383
0.021277
0.425532
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16dc5aa7f7c7413a9e340c8bb600ebd849d60e67
2,897
py
Python
hale_hub/outlet_interface.py
tantinlala/hale-hub
da2e6d24e3869ee533d2e272ce87b9e7eede9a79
[ "MIT" ]
null
null
null
hale_hub/outlet_interface.py
tantinlala/hale-hub
da2e6d24e3869ee533d2e272ce87b9e7eede9a79
[ "MIT" ]
null
null
null
hale_hub/outlet_interface.py
tantinlala/hale-hub
da2e6d24e3869ee533d2e272ce87b9e7eede9a79
[ "MIT" ]
null
null
null
import serial import serial.tools.list_ports from hale_hub.constants import STARTING_OUTLET_COMMAND, SERIAL_BAUD_RATE, SERIAL_TIMEOUT from hale_hub.ifttt_logger import send_ifttt_log class _Outlet: def __init__(self, name): self.state = 0 self.name = name class _OutletInterface: def __init__(self): self.outlets = [_Outlet('Outlet 0'), _Outlet('Outlet 1'), _Outlet('Outlet 2')] self.serial_interface = None self.serial_interface_string = None def set_outlet_name(self, name, outlet_id): if outlet_id < len(self.outlets): self.outlets[outlet_id].name = name def set_serial_interface(self, serial_interface_string): try: print('Setting serial interface with description: {}'.format(serial_interface_string)) self.serial_interface_string = serial_interface_string ports = [p.device for p in serial.tools.list_ports.comports() if self.serial_interface_string in p.description] self.serial_interface = serial.Serial(ports[0], SERIAL_BAUD_RATE, timeout=SERIAL_TIMEOUT) except IndexError: send_ifttt_log(__name__, 'No serial ports could be upon!') def _send_outlet_command(self, outlet_id, outlet_state): try: print('Changing outlet {0} to {1} state'.format(outlet_id, outlet_state)) command = bytearray([STARTING_OUTLET_COMMAND + (outlet_id << 1) + outlet_state]) print('Writing {0} to serial'.format(command)) self.serial_interface.write(command) except (serial.SerialException, AttributeError): send_ifttt_log(__name__, 'No serial bytes could be written') if self.serial_interface.is_open(): self.serial_interface.close() self.set_serial_interface(self.serial_interface_string) def toggle_outlet(self, outlet_id): if outlet_id < len(self.outlets): self.outlets[outlet_id].state ^= 1 self._send_outlet_command(outlet_id, self.outlets[outlet_id].state) def turn_on_outlet(self, outlet_id): if outlet_id < len(self.outlets): self.outlets[outlet_id].state = 1 self._send_outlet_command(outlet_id, self.outlets[outlet_id].state) def turn_off_outlet(self, outlet_id): if outlet_id < len(self.outlets): self.outlets[outlet_id].state = 0 self._send_outlet_command(outlet_id, self.outlets[outlet_id].state) def get_outlets(self): return self.outlets _outlet_interface = _OutletInterface() set_outlet_serial_interface = _outlet_interface.set_serial_interface toggle_outlet = _outlet_interface.toggle_outlet turn_on_outlet = _outlet_interface.turn_on_outlet turn_off_outlet = _outlet_interface.turn_off_outlet get_outlets = _outlet_interface.get_outlets set_outlet_name = _outlet_interface.set_outlet_name
41.385714
123
0.706593
376
2,897
5.055851
0.191489
0.088375
0.099947
0.069963
0.295108
0.295108
0.269858
0.224619
0.224619
0.224619
0
0.00522
0.20642
2,897
69
124
41.985507
0.821662
0
0
0.160714
0
0
0.063514
0
0
0
0
0
0
1
0.160714
false
0
0.071429
0.017857
0.285714
0.053571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16dcdf8ea3ba055a8650580e31092f4149c84a27
3,233
py
Python
helix/core.py
carbonscott/helix
e2ee6e1293cae4f0bd1220ed5a41268d20a095db
[ "MIT" ]
null
null
null
helix/core.py
carbonscott/helix
e2ee6e1293cae4f0bd1220ed5a41268d20a095db
[ "MIT" ]
null
null
null
helix/core.py
carbonscott/helix
e2ee6e1293cae4f0bd1220ed5a41268d20a095db
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np def remove_nan(xyzs): return xyzs[~np.isnan(xyzs).any(axis = 1)] def measure_twocores(core_xyz_ref, core_xyz_tar): ''' Measure the following aspects of two helical cores. - Interhelical distance vector between the centers. - Interhelical angle (0-90 degree) ''' # Obtain the centers... center_ref = np.nanmean(core_xyz_ref, axis = 0) center_tar = np.nanmean(core_xyz_tar, axis = 0) # Construct the interhelical distance vector... ih_dvec = center_tar - center_ref # Calculate the length of interhelical distance vector... norm_ih_dvec = np.linalg.norm(ih_dvec) # Obtain the helical core vectors... core_xyz_ref_nonan = remove_nan(core_xyz_ref) core_xyz_tar_nonan = remove_nan(core_xyz_tar) core_vec_ref = core_xyz_ref_nonan[-1] - core_xyz_ref_nonan[0] core_vec_tar = core_xyz_tar_nonan[-1] - core_xyz_tar_nonan[0] # Calculate the interhelical angle... core_vec_ref_unit = core_vec_ref / np.linalg.norm(core_vec_ref) core_vec_tar_unit = core_vec_tar / np.linalg.norm(core_vec_tar) ih_ang = np.arccos( np.dot(core_vec_ref_unit, core_vec_tar_unit) ) return ih_dvec, norm_ih_dvec, core_vec_ref_unit, core_vec_tar_unit, ih_ang def calc_interangle(core_xyz_ref, core_xyz_tar): ''' Measure the following aspects of two helical cores. - Interhelical angle (0-90 degree) ''' # Obtain the helical core vectors... core_xyz_ref_nonan = remove_nan(core_xyz_ref) core_xyz_tar_nonan = remove_nan(core_xyz_tar) core_vec_ref = core_xyz_ref_nonan[-1] - core_xyz_ref_nonan[0] core_vec_tar = core_xyz_tar_nonan[-1] - core_xyz_tar_nonan[0] # Calculate the interhelical angle... core_vec_ref_unit = core_vec_ref / np.linalg.norm(core_vec_ref) core_vec_tar_unit = core_vec_tar / np.linalg.norm(core_vec_tar) inter_angle = np.arccos( np.dot(core_vec_ref_unit, core_vec_tar_unit) ) if inter_angle > np.pi / 2.0: inter_angle = np.pi - inter_angle return inter_angle def calc_interdist(core_xyz_ref, core_xyz_tar): ''' Measure the following aspects of two helical cores. - Interhelical distance vector between the centers. Refers to http://geomalgorithms.com/a07-_distance.html for the method. Q is ref, P is tar. ''' # Obtain the helical core vectors... core_xyz_ref_nonan = remove_nan(core_xyz_ref) core_xyz_tar_nonan = remove_nan(core_xyz_tar) core_vec_ref = core_xyz_ref_nonan[-1] - core_xyz_ref_nonan[0] core_vec_tar = core_xyz_tar_nonan[-1] - core_xyz_tar_nonan[0] # Obtain the starting point... q0 = core_xyz_ref_nonan[0] p0 = core_xyz_tar_nonan[0] w0 = p0 - q0 # Obtain the directional vector with magnitude... v = core_vec_ref u = core_vec_tar # Math part... a = np.dot(u, u) b = np.dot(u, v) c = np.dot(v, v) d = np.dot(u, w0) e = np.dot(v, w0) de = a * c - b * b # Denominator if de == 0: sc, tc = 0, d / b else: sc, tc = (b * e - c * d) / de, (a * e - b * d) / de # Calculate distance... wc = w0 + sc * u - tc * v inter_dist = np.linalg.norm(wc) return inter_dist
33.677083
78
0.683266
531
3,233
3.819209
0.19209
0.117357
0.083826
0.073965
0.619822
0.604043
0.604043
0.579882
0.566075
0.566075
0
0.015336
0.213424
3,233
95
79
34.031579
0.782147
0.275905
0
0.347826
0
0
0
0
0
0
0
0
0
1
0.086957
false
0
0.021739
0.021739
0.173913
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16dd18d4c9d6b529392f25ddf3a0704445995def
675
py
Python
matury/2011/6.py
bartekpacia/informatyka-frycz
6fdbbdea0c6b6a710378f22e90d467c9f91e64aa
[ "MIT" ]
2
2021-03-06T22:09:44.000Z
2021-03-14T14:41:03.000Z
matury/2011/6.py
bartekpacia/informatyka-frycz
6fdbbdea0c6b6a710378f22e90d467c9f91e64aa
[ "MIT" ]
1
2020-03-25T15:42:47.000Z
2020-10-06T21:41:14.000Z
matury/2011/6.py
bartekpacia/informatyka-frycz
6fdbbdea0c6b6a710378f22e90d467c9f91e64aa
[ "MIT" ]
null
null
null
from typing import List with open("dane/liczby.txt") as f: nums: List[int] = [] nums_9_chars: List[int] = [] for line in f: sline = line.strip() num = int(sline, 2) if len(sline) == 9: nums_9_chars.append(num) nums.append(num) count_even = 0 max_num = 0 for num in nums: if num % 2 == 0: count_even += 1 if num > max_num: max_num = num print(f"{count_even=}") print(f"max_num(10): {max_num}, max_num(2): {bin(max_num)[2:]}") sum_9_chars = 0 for num in nums_9_chars: sum_9_chars += num print(f"count of numbers with 9 digits: {len(nums_9_chars)}, their sum: {bin(sum_9_chars)[2:]}")
20.454545
96
0.58963
117
675
3.196581
0.324786
0.112299
0.106952
0.048128
0.069519
0
0
0
0
0
0
0.042169
0.262222
675
32
97
21.09375
0.708835
0
0
0
0
0.043478
0.248889
0.032593
0
0
0
0
0
1
0
false
0
0.043478
0
0.043478
0.130435
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16de052924f6b7a0503a267b4aaeda1587303cff
3,681
py
Python
src/model/ParseInput.py
slavi010/polyhash-2020
a11aa694fbf901be4f4db565cb09800f8f57eae7
[ "MIT" ]
null
null
null
src/model/ParseInput.py
slavi010/polyhash-2020
a11aa694fbf901be4f4db565cb09800f8f57eae7
[ "MIT" ]
null
null
null
src/model/ParseInput.py
slavi010/polyhash-2020
a11aa694fbf901be4f4db565cb09800f8f57eae7
[ "MIT" ]
null
null
null
import os from typing import List from src.model.Etape import Etape from src.model.Grille import Grille from src.model.ItemCase import ItemCase from src.model.PointMontage import PointMontage from src.model.Robot import Robot from src.model.Tache import Tache class ParseInput: """Parser qui permet de lire le fichier texte d'input fourni par Google. Va transformer ce fichier en données et classes exploitables pour nous """ grille: Grille def __init__(self): pass def parse(self, file_path: str) -> Grille: """parse le fichier google et retourne la Grille correspondante :rtype: Grille """ # tests si file_path est un fichier assert os.path.isfile(file_path) with open(file_path, 'r') as file: index: int = 0 # récupère toutes les lignes du fichiers lines: List = file.readlines() # Transformation des lignes en liste d'entiers for index_line in range(len(lines)): lines[index_line] = lines[index_line].split(' ') for index_val in range(len(lines[index_line])): lines[index_line][index_val] = int(lines[index_line][index_val]) # crée un instance de Grille grille = Grille(lines[0][0], lines[0][1]) # instancie dans grille le nombre de robot correspondant # crée les robots for idx_robot in range(lines[0][2]): grille.robots.append(Robot()) # Crée les points de montage, et les place dans la grille for idx_point_montage in range(lines[0][3]): index += 1 grille.add_point_montage(PointMontage(lines[index][0], lines[index][1])) # Récupère le nombre de tour d'horloge autorisé grille.step_simulation = lines[0][5] # Récupére les informations de chaque tâche # instancier dans grille les tâches correspondantes # si une étape (assembly point) n'est pas encore créée dans la grille aux cordonnées correspondantes, # l'instancier et la mettre dans la grille (et ne pas oublier de l'associer à la tâche) # Crée les instances Taches et Etapes for index_tache in range(lines[0][4]): index += 1 tache_tampon: Tache = Tache(lines[index][0], index_tache) index += 1 g_x = 0 g_y = 0 for index_etape in range(lines[index-1][1]): #ajoute les étapes etape = Etape(lines[index][index_etape*2+0], lines[index][index_etape*2+1]) tache_tampon.add_etape(etape) g_x += (etape.x - g_x)/len(tache_tampon.etapes) g_y += (etape.y - g_y)/len(tache_tampon.etapes) #ajoute les paramètres dans la classe tache tache_tampon.centre_gravite = ItemCase(int(g_x), int(g_y)) tache_tampon.distance_centre_gravite = max(tache_tampon.etapes, key=lambda etape: tache_tampon.centre_gravite.distance(etape)) \ .distance(tache_tampon.centre_gravite) grille.add_tache(tache_tampon) # calcul la distance et la surface aproximative entre chaque étape for etape_from, etape_to in zip(tache_tampon.etapes[0::1], tache_tampon.etapes[1::1]): tache_tampon.distance += etape_from.distance(etape_to) tache_tampon.surface += etape_from.distance(etape_to) return grille
40.9
123
0.594947
470
3,681
4.525532
0.323404
0.072402
0.03385
0.018336
0.082746
0.026328
0
0
0
0
0
0.01247
0.32464
3,681
90
124
40.9
0.843121
0.265145
0
0.0625
0
0
0.000753
0
0
0
0
0
0.020833
1
0.041667
false
0.020833
0.166667
0
0.270833
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16e1f96bcb5b1ba1faf14b289b7309040c63b043
1,619
py
Python
homework_1/tests/test_3.py
mag-id/epam_python_autumn_2020
2488817ba039f5722030a23edc97abe9f70a9a30
[ "MIT" ]
null
null
null
homework_1/tests/test_3.py
mag-id/epam_python_autumn_2020
2488817ba039f5722030a23edc97abe9f70a9a30
[ "MIT" ]
null
null
null
homework_1/tests/test_3.py
mag-id/epam_python_autumn_2020
2488817ba039f5722030a23edc97abe9f70a9a30
[ "MIT" ]
null
null
null
""" Unit tests for module `homework_1.tasks.task_3`. """ from tempfile import NamedTemporaryFile from typing import Tuple import pytest from homework_1.tasks.task_3 import find_maximum_and_minimum @pytest.mark.parametrize( ["file_content", "expected_result"], [ pytest.param( "0\n", (0, 0), id="'0\n', result is (0, 0).", ), pytest.param( "1\n2\n3\n4\n5\n", (1, 5), id="'1\n2\n3\n4\n5\n', result is (1, 5).", ), pytest.param( "1\n-2\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n11\n-12\n", (-12, 11), id="'1\n-2\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n11\n-12\n', result: (11,-12).", ), pytest.param( "11\n-12\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n1\n-2\n", (-12, 11), id="'11\n-12\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n1\n-2\n', result: (11,-12).", ), pytest.param( "\n".join(str(num) for num in range(0, 667000)), (0, 666999), id="Integers from 0 to 666999 delimited by '\n'.", ), ], ) def test_find_maximum_and_minimum(file_content: str, expected_result: Tuple[int, int]): """ Mocks file using `NamedTemporaryFile` instance with writed `file_content` inside, where `file_name` == `file.name`. Passes test if `find_maximum_and_minimum`(`file.name`) is equal to `expected_result`. """ with NamedTemporaryFile(mode="wt") as file: file.write(file_content) file.seek(0) assert find_maximum_and_minimum(file.name) == expected_result
29.436364
87
0.542928
251
1,619
3.398406
0.310757
0.021102
0.065651
0.098476
0.341149
0.267292
0.126612
0.126612
0.126612
0.126612
0
0.104363
0.277949
1,619
54
88
29.981481
0.625321
0.155034
0
0.307692
0
0.102564
0.28132
0.139535
0
0
0
0
0.025641
1
0.025641
false
0
0.102564
0
0.128205
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16e747bd6febb0a03dbe8fb17268efc47ff0c0ee
7,999
py
Python
transitfeed/transfer.py
cclauss/transitfeed
54a4081b59bfa015d5f0405b68203e61762d4a52
[ "Apache-2.0" ]
9
2015-07-21T17:41:25.000Z
2020-08-26T13:37:08.000Z
transitfeed/transfer.py
cclauss/transitfeed
54a4081b59bfa015d5f0405b68203e61762d4a52
[ "Apache-2.0" ]
4
2015-06-11T18:40:16.000Z
2020-04-03T20:31:40.000Z
transitfeed/transfer.py
cclauss/transitfeed
54a4081b59bfa015d5f0405b68203e61762d4a52
[ "Apache-2.0" ]
4
2016-02-09T21:45:50.000Z
2020-07-30T21:52:50.000Z
#!/usr/bin/python2.5 # Copyright (C) 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from gtfsobjectbase import GtfsObjectBase import problems as problems_module import util class Transfer(GtfsObjectBase): """Represents a transfer in a schedule""" _REQUIRED_FIELD_NAMES = ['from_stop_id', 'to_stop_id', 'transfer_type'] _FIELD_NAMES = _REQUIRED_FIELD_NAMES + ['min_transfer_time'] _TABLE_NAME = 'transfers' _ID_COLUMNS = ['from_stop_id', 'to_stop_id'] def __init__(self, schedule=None, from_stop_id=None, to_stop_id=None, transfer_type=None, min_transfer_time=None, field_dict=None): self._schedule = None if field_dict: self.__dict__.update(field_dict) else: self.from_stop_id = from_stop_id self.to_stop_id = to_stop_id self.transfer_type = transfer_type self.min_transfer_time = min_transfer_time if getattr(self, 'transfer_type', None) in ("", None): # Use the default, recommended transfer, if attribute is not set or blank self.transfer_type = 0 else: try: self.transfer_type = util.NonNegIntStringToInt(self.transfer_type) except (TypeError, ValueError): pass if hasattr(self, 'min_transfer_time'): try: self.min_transfer_time = util.NonNegIntStringToInt(self.min_transfer_time) except (TypeError, ValueError): pass else: self.min_transfer_time = None if schedule is not None: # Note from Tom, Nov 25, 2009: Maybe calling __init__ with a schedule # should output a DeprecationWarning. A schedule factory probably won't # use it and other GenericGTFSObject subclasses don't support it. schedule.AddTransferObject(self) def ValidateFromStopIdIsPresent(self, problems): if util.IsEmpty(self.from_stop_id): problems.MissingValue('from_stop_id') return False return True def ValidateToStopIdIsPresent(self, problems): if util.IsEmpty(self.to_stop_id): problems.MissingValue('to_stop_id') return False return True def ValidateTransferType(self, problems): if not util.IsEmpty(self.transfer_type): if (not isinstance(self.transfer_type, int)) or \ (self.transfer_type not in range(0, 4)): problems.InvalidValue('transfer_type', self.transfer_type) return False return True def ValidateMinimumTransferTime(self, problems): if not util.IsEmpty(self.min_transfer_time): if self.transfer_type != 2: problems.MinimumTransferTimeSetWithInvalidTransferType( self.transfer_type) # If min_transfer_time is negative, equal to or bigger than 24h, issue # an error. If smaller than 24h but bigger than 3h issue a warning. # These errors are not blocking, and should not prevent the transfer # from being added to the schedule. if (isinstance(self.min_transfer_time, int)): if self.min_transfer_time < 0: problems.InvalidValue('min_transfer_time', self.min_transfer_time, reason="This field cannot contain a negative " \ "value.") elif self.min_transfer_time >= 24*3600: problems.InvalidValue('min_transfer_time', self.min_transfer_time, reason="The value is very large for a " \ "transfer time and most likely " \ "indicates an error.") elif self.min_transfer_time >= 3*3600: problems.InvalidValue('min_transfer_time', self.min_transfer_time, type=problems_module.TYPE_WARNING, reason="The value is large for a transfer " \ "time and most likely indicates " \ "an error.") else: # It has a value, but it is not an integer problems.InvalidValue('min_transfer_time', self.min_transfer_time, reason="If present, this field should contain " \ "an integer value.") return False return True def GetTransferDistance(self): from_stop = self._schedule.stops[self.from_stop_id] to_stop = self._schedule.stops[self.to_stop_id] distance = util.ApproximateDistanceBetweenStops(from_stop, to_stop) return distance def ValidateFromStopIdIsValid(self, problems): if self.from_stop_id not in self._schedule.stops.keys(): problems.InvalidValue('from_stop_id', self.from_stop_id) return False return True def ValidateToStopIdIsValid(self, problems): if self.to_stop_id not in self._schedule.stops.keys(): problems.InvalidValue('to_stop_id', self.to_stop_id) return False return True def ValidateTransferDistance(self, problems): distance = self.GetTransferDistance() if distance > 10000: problems.TransferDistanceTooBig(self.from_stop_id, self.to_stop_id, distance) elif distance > 2000: problems.TransferDistanceTooBig(self.from_stop_id, self.to_stop_id, distance, type=problems_module.TYPE_WARNING) def ValidateTransferWalkingTime(self, problems): if util.IsEmpty(self.min_transfer_time): return if self.min_transfer_time < 0: # Error has already been reported, and it does not make sense # to calculate walking speed with negative times. return distance = self.GetTransferDistance() # If min_transfer_time + 120s isn't enough for someone walking very fast # (2m/s) then issue a warning. # # Stops that are close together (less than 240m appart) never trigger this # warning, regardless of min_transfer_time. FAST_WALKING_SPEED= 2 # 2m/s if self.min_transfer_time + 120 < distance / FAST_WALKING_SPEED: problems.TransferWalkingSpeedTooFast(from_stop_id=self.from_stop_id, to_stop_id=self.to_stop_id, transfer_time=self.min_transfer_time, distance=distance) def ValidateBeforeAdd(self, problems): result = True result = self.ValidateFromStopIdIsPresent(problems) and result result = self.ValidateToStopIdIsPresent(problems) and result result = self.ValidateTransferType(problems) and result result = self.ValidateMinimumTransferTime(problems) and result return result def ValidateAfterAdd(self, problems): valid_stop_ids = True valid_stop_ids = self.ValidateFromStopIdIsValid(problems) and valid_stop_ids valid_stop_ids = self.ValidateToStopIdIsValid(problems) and valid_stop_ids # We need both stop IDs to be valid to able to validate their distance and # the walking time between them if valid_stop_ids: self.ValidateTransferDistance(problems) self.ValidateTransferWalkingTime(problems) def Validate(self, problems=problems_module.default_problem_reporter): if self.ValidateBeforeAdd(problems) and self._schedule: self.ValidateAfterAdd(problems) def _ID(self): return tuple(self[i] for i in self._ID_COLUMNS) def AddToSchedule(self, schedule, problems): schedule.AddTransferObject(self, problems)
40.811224
91
0.665833
960
7,999
5.34375
0.248958
0.072515
0.081871
0.066667
0.288499
0.218713
0.168031
0.137622
0.111891
0.111891
0
0.010007
0.262908
7,999
195
92
41.020513
0.860075
0.1989
0
0.26087
0
0
0.076766
0
0
0
0
0
0
1
0.108696
false
0.014493
0.021739
0.007246
0.289855
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16e7d64f5a23705a73ced1fae75f2e7697ae34b2
2,067
py
Python
social/urls.py
Kizito-Alberrt/insta-social
c632e901cd81b0b139f88ad55236efd6c7ddbef1
[ "MIT" ]
null
null
null
social/urls.py
Kizito-Alberrt/insta-social
c632e901cd81b0b139f88ad55236efd6c7ddbef1
[ "MIT" ]
null
null
null
social/urls.py
Kizito-Alberrt/insta-social
c632e901cd81b0b139f88ad55236efd6c7ddbef1
[ "MIT" ]
null
null
null
from django.urls import path from . import views from . views import UserPostListView, PostDetailView, PostDeleteview, PostCreateView, PostUpdateView,CommentUpdateView, VideoCreateView, video_update urlpatterns = [ path('',views.base, name='base'), path('login',views.login, name='login'), path('register',views.register, name='register'), path('index',views.index, name='index'), path('logout',views.logout, name='logout'), path('like_post', views.like_post, name='like_post'), path('find_friends',views.find_friends, name='find_friends'), path('profile',views.profile, name='profile'), path('profile_update', views.profile_update, name='profile_update'), path('user/<str:username>', UserPostListView.as_view(), name='user_posts'), path('post/<int:pk>/',PostDetailView.as_view(), name='post_details' ), path('post/<int:pk>/delete/',PostDeleteview.as_view(), name='post_delete' ), path('profile_posts',views.profile_posts, name='profile_posts'), path('results',views.results, name='results'), path('post/new/',PostCreateView.as_view(), name='post-create' ), path('post_update',views.post_update, name='post_update'), path('post/<int:pk>/update',PostUpdateView.as_view(), name='post-update' ), path('profile_photos',views.profile_photos, name='profile_photos'), path('comment_update/<int:id>',views.comment_update, name='comment_update'), path('comment/<int:pk>/update',CommentUpdateView.as_view(), name='comment-update' ), path('delete/<int:id>',views.delete, name='delete'), path('favourite',views.favourite, name='favourite'), path('favourite_posts',views.favourite_posts, name='favourite_posts'), path('video/new/',VideoCreateView.as_view(), name='video-create' ), path('post/<int:pk>/video',video_update.as_view(), name='video_update' ), # path('<str:username>',views.userprofile, name='userprofile'), path('video_posts',views.video_posts, name='video_posts'), path('user_videos',views.user_videos,name='user_videos'), ]
43.0625
149
0.701016
258
2,067
5.445736
0.178295
0.034164
0.05694
0.037011
0
0
0
0
0
0
0
0
0.111756
2,067
47
150
43.978723
0.765251
0.029511
0
0
0
0
0.301548
0.03345
0
0
0
0
0
1
0
false
0
0.09375
0
0.09375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16e8783047883ecc17068c1f63c87b161a271a5f
1,054
py
Python
vtkplotter_examples/other/dolfin/collisions.py
ismarou/vtkplotter-examples
1eefcc026be169ab7a77a5bce6dec8044c33b554
[ "MIT" ]
4
2020-07-30T02:38:29.000Z
2021-09-12T14:30:18.000Z
vtkplotter_examples/other/dolfin/collisions.py
ismarou/vtkplotter-examples
1eefcc026be169ab7a77a5bce6dec8044c33b554
[ "MIT" ]
null
null
null
vtkplotter_examples/other/dolfin/collisions.py
ismarou/vtkplotter-examples
1eefcc026be169ab7a77a5bce6dec8044c33b554
[ "MIT" ]
null
null
null
''' compute_collision() will compute the collision of all the entities with a Point while compute_first_collision() will always return its first entry. Especially if a point is on an element edge this can be tricky. You may also want to compare with the Cell.contains(Point) tool. ''' # Script by Rudy at https://fenicsproject.discourse.group/t/ # any-function-to-determine-if-the-point-is-in-the-mesh/275/3 import dolfin from vtkplotter.dolfin import shapes, plot, printc n = 4 Px = 0.5 Py = 0.5 mesh = dolfin.UnitSquareMesh(n, n) bbt = mesh.bounding_box_tree() collisions = bbt.compute_collisions(dolfin.Point(Px, Py)) collisions1st = bbt.compute_first_entity_collision(dolfin.Point(Px, Py)) printc("collisions : ", collisions) printc("collisions 1st: ", collisions1st) for cell in dolfin.cells(mesh): contains = cell.contains(dolfin.Point(Px, Py)) printc("Cell", cell.index(), "contains P:", contains, c=contains) ########################################### pt = shapes.Point([Px, Py], c='blue') plot(mesh, pt, text=__doc__)
35.133333
75
0.705882
156
1,054
4.685897
0.532051
0.038304
0.049248
0.06156
0.057456
0
0
0
0
0
0
0.013158
0.134725
1,054
29
76
36.344828
0.788377
0.385199
0
0
0
0
0.08557
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.25
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16e8943240219eac91364d8b6c27599e32680763
622
py
Python
alice_check_train/__main__.py
AsciiShell/Alice-Check-Train
49d5804d28a237756a7cf27e451ff56166fbee5c
[ "MIT" ]
null
null
null
alice_check_train/__main__.py
AsciiShell/Alice-Check-Train
49d5804d28a237756a7cf27e451ff56166fbee5c
[ "MIT" ]
null
null
null
alice_check_train/__main__.py
AsciiShell/Alice-Check-Train
49d5804d28a237756a7cf27e451ff56166fbee5c
[ "MIT" ]
null
null
null
import datetime import os from alice_check_train.main import rasp_to_text from alice_check_train.rasp_api import get_rasp, filter_rasp def main(): key = os.getenv('RASP_KEY') station_from = os.getenv('STATION_FROM') station_to = os.getenv('STATION_TO') date = datetime.date.today().strftime('%Y-%m-%d') js = get_rasp(key, station_from, station_to, date) filtered = filter_rasp(js['segments'], 300) message = rasp_to_text(filtered, 1000) if len(message) > 1024: print('Too long message: {} > 1024'.format(len(message))) print(message) if __name__ == '__main__': main()
25.916667
65
0.688103
90
622
4.444444
0.422222
0.06
0.07
0.095
0
0
0
0
0
0
0
0.029354
0.178457
622
23
66
27.043478
0.753425
0
0
0
0
0
0.130225
0
0
0
0
0
0
1
0.058824
false
0
0.235294
0
0.294118
0.117647
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
16e89821c774aa40fe5b74ea387488fc99280078
7,309
py
Python
aws-KNN-RESTful.py
cakebytheoceanLuo/k-NN
52c66b5e38490431b3079c2baaad38785802f4e5
[ "Apache-2.0" ]
1
2021-11-16T13:22:09.000Z
2021-11-16T13:22:09.000Z
aws-KNN-RESTful.py
cakebytheoceanLuo/k-NN
52c66b5e38490431b3079c2baaad38785802f4e5
[ "Apache-2.0" ]
null
null
null
aws-KNN-RESTful.py
cakebytheoceanLuo/k-NN
52c66b5e38490431b3079c2baaad38785802f4e5
[ "Apache-2.0" ]
null
null
null
# https://medium.com/@kumon/how-to-realize-similarity-search-with-elasticsearch-3dd5641b9adb # https://docs.aws.amazon.com/opensearch-service/latest/developerguide/knn.html import sys import requests import h5py import numpy as np import json import aiohttp import asyncio import time import httpx from requests.auth import HTTPBasicAuth from statistics import mean # if len(sys.argv) != 2: # print("Type in the efSearch!") # sys.exit() # path = '/tmp/sift-128-euclidean.hdf5.1M' # float dataset # path = '/tmp/sift-128-euclidean.hdf5' # float dataset path = '/home/ubuntu/sift-128-euclidean.hdf5' # float dataset output_csv = '/tmp/sift-es.csv' # url = 'http://127.0.0.1:9200/sift-index/' host = 'https://vpc-....ap-southeast-1.es.amazonaws.com/' # single node # host = 'https://vpc-....ap-southeast-1.es.amazonaws.com/' # two nodes url = host + 'sift-index/' requestHeaders = {'content-type': 'application/json'} # https://stackoverflow.com/questions/51378099/content-type-header-not-supported auth = HTTPBasicAuth('admin', 'I#vu7bTAHB') # Build an index #https://stackoverflow.com/questions/17301938/making-a-request-to-a-restful-api-using-python # PUT sift-index data = '''{ "settings": { "index": { "knn": true, "knn.space_type": "l2", "knn.algo_param.m": 6, "knn.algo_param.ef_construction": 50, "knn.algo_param.ef_search": 50, "refresh_interval": -1, "translog.flush_threshold_size": "10gb", "number_of_replicas": 0 } }, "mappings": { "properties": { "sift_vector": { "type": "knn_vector", "dimension": 128 } } } }''' # https://medium.com/@kumon/how-to-realize-similarity-search-with-elasticsearch-3dd5641b9adb response = requests.put(url, data=data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB')) # response = requests.put(url, data=data, verify=False, headers=requestHeaders, auth=auth) assert response.status_code==requests.codes.ok # cluster_url = 'http://127.0.0.1:9200/_cluster/settings' cluster_url = host + '_cluster/settings' cluster_data = '''{ "persistent" : { "knn.algo_param.index_thread_qty": 16 } } ''' response = requests.put(cluster_url, data=cluster_data, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'), headers=requestHeaders) assert response.status_code==requests.codes.ok # Bulkload into index bulk_template = '{ "index": { "_index": "sift-index", "_id": "%s" } }\n{ "sift_vector": [%s] }\n' hf = h5py.File(path, 'r') for key in hf.keys(): print("A key of hf is %s" % key) #Names of the groups in HDF5 file. vectors = np.array(hf["train"][:]) num_vectors, dim = vectors.shape print("num_vectors: %d" % num_vectors) print("dim: %d" % dim) bulk_data = "" start = time.time() for (id,vector) in enumerate(vectors): assert len(vector)==dim vector_str = "" for num in vector: vector_str += str(num) + ',' vector_str = vector_str[:-1] id_str = str(id) single_bulk_done = bulk_template % (id_str, vector_str) bulk_data += single_bulk_done if (id+1) % 100000 == 0: print(str(id+1)) # POST _bulk response = requests.put(url + '_bulk', data=bulk_data, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'), headers=requestHeaders) assert response.status_code==requests.codes.ok bulk_data = "" end = time.time() print("Insert Time: %d mins" % ((end - start) / 60.0)) # Unit: min # refresh_url = 'http://127.0.0.1:9200/sift-index/_settings' refresh_url = host + 'sift-index/_settings' refresh_data = '''{ "index" : { "refresh_interval": "1s" } } ''' response = requests.put(refresh_url, data=refresh_data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB')) assert response.status_code==requests.codes.ok # response = requests.post('http://127.0.0.1:9200/sift-index/_refresh', verify=False, headers=requestHeaders) # assert response.status_code==requests.codes.ok # merge_url = 'http://127.0.0.1:9200/sift-index/_forcemerge?max_num_segments=1' merge_url = host + 'sift-index/_forcemerge?max_num_segments=1' merge_response = requests.post(merge_url, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'), timeout=600) assert merge_response.status_code==requests.codes.ok # warmup_url = 'http://127.0.0.1:9200/_opendistro/_knn/warmup/sift-index' warmup_url = host + '_opendistro/_knn/warmup/sift-index' warmup_response = requests.get(warmup_url, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB')) assert warmup_response.status_code==requests.codes.ok # Send queries total_time = 0 # in ms hits = 0 # for recall calculation query_template = ''' { "size": 50, "query": {"knn": {"sift_vector": {"vector": [%s],"k": 50}}} } ''' queries = np.array(hf["test"][:]) nq = len(queries) neighbors = np.array(hf["neighbors"][:]) # distances = np.array(hf["distances"][:]) num_queries, q_dim = queries.shape print("num_queries: %d" % num_queries) print("q_dim: %d" % q_dim) assert q_dim==dim ef_search_list = [50, 100, 150, 200, 250, 300] for ef_search in ef_search_list: ef_data = '''{ "index": { "knn.algo_param.ef_search": %d } }''' ef_data = ef_data % ef_search ### Update Index Setting: efSearch response = requests.put(url + '_settings', data=ef_data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB')) assert response.status_code==requests.codes.ok total_time_list = [] hits_list = [] for count in range(5): total_time = 0 # in ms hits = 0 # for recall calculation query_template = ''' ''' single_query = '''{}\n{"size": 50, "query": {"knn": {"sift_vector": {"vector": [%s],"k": 50}}}}\n''' for (id,query) in enumerate(queries): assert len(query)==dim query_str = "" for num in query: query_str += str(num) + ',' query_str = query_str[:-1] # GET sift-index/_search single_query_done = single_query % (query_str) query_template += single_query_done query_data = query_template # print(query_data) response = requests.get(url + '_msearch', data=query_data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'), stream=True) assert response.status_code==requests.codes.ok # print(response.text) result = json.loads(response.text) # QPS total_time = result['took'] # tooks = [] # for i in range(len(queries)): # for ele in result['responses']: # tooks.append(int(ele['took'])) for id in range(len(queries)): # Recall neighbor_id_from_result = [] for ele in result['responses'][id]['hits']['hits']: neighbor_id_from_result.append(int(ele['_id'])) assert len(neighbor_id_from_result)==50 # print("neighbor_id_from_result: ") # print(neighbor_id_from_result) neighbor_id_gt = neighbors[id][0:50] # topK=50 # print("neighbor_id_gt") # print(neighbor_id_gt) hits_q = len(list(set(neighbor_id_from_result) & set(neighbor_id_gt))) # print("# hits of this query with topk=50: %d" % hits_q) hits += hits_q total_time_list.append(total_time) hits_list.append(hits) print(total_time_list) total_time_avg = mean(total_time_list[2:-1]) hits_avg = mean(hits_list) QPS = 1.0 * nq / (total_time_avg / 1000.0) recall = 1.0 * hits_avg / (nq * 50) print(ef_search, QPS, recall)
33.374429
142
0.675332
1,021
7,309
4.657199
0.225269
0.022713
0.04164
0.043533
0.3796
0.34448
0.283912
0.260358
0.19306
0.148055
0
0.03303
0.159119
7,309
218
143
33.527523
0.740644
0.255028
0
0.150327
0
0.013072
0.261644
0.052886
0
0
0
0
0.078431
1
0
false
0
0.071895
0
0.071895
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0