text string | size int64 | token_count int64 |
|---|---|---|
#Written by Shitao Tang
# --------------------------------------------------------
import logging.config
logging.config.fileConfig("logging.config")
| 149 | 34 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
import utils
from botocore.exceptions import ClientError
import logger
import os
from aws_lambda_powertools import Tracer
tracer = Tracer()
tenant_stack_mapping_table_name = os.environ['TENANT_STACK_MAPPING_TABLE_NAME']
dynamodb = boto3.resource('dynamodb')
codepipeline = boto3.client('codepipeline')
cloudformation = boto3.client('cloudformation')
table_tenant_stack_mapping = dynamodb.Table(tenant_stack_mapping_table_name)
stack_name = 'stack-{0}'
@tracer.capture_lambda_handler
def provision_tenant(event, context):
logger.info(event)
tenant_details = json.loads(event['body'])
try:
response_ddb = table_tenant_stack_mapping.put_item(
Item={
'tenantId': tenant_details['tenantId'],
'stackName': stack_name.format(tenant_details['tenantId']),
'applyLatestRelease': True,
'codeCommitId': ''
}
)
logger.info(response_ddb)
response_codepipeline = codepipeline.start_pipeline_execution(
name='serverless-saas-pipeline'
)
logger.info(response_ddb)
except Exception as e:
raise
else:
return utils.create_success_response("Tenant Provisioning Started")
@tracer.capture_lambda_handler
#this method uses IAM Authorization and protected using a resource policy. This method is also invoked async
def deprovision_tenant(event, context):
logger.info("Request received to deprovision a tenant")
logger.info(event)
tenantid_to_deprovision = event['tenantId']
try:
response_ddb = table_tenant_stack_mapping.delete_item(
Key={
'tenantId': tenantid_to_deprovision
}
)
logger.info(response_ddb)
response_cloudformation = cloudformation.delete_stack(
StackName=stack_name.format(tenantid_to_deprovision)
)
logger.info(response_cloudformation)
except Exception as e:
raise
else:
return utils.create_success_response("Tenant Deprovisioning Started")
| 2,324 | 697 |
#!/usr/bin/env python
# encoding:utf-8
# file: homework1.py
# 1、请将 "1,2,3",变成 ["1","2","3"]
# 方法1
a = '1,2,3'
print(a.split(','))
# 方法2 遍历一下
a = '1,2,3'
sep = ','
result = []
for i in range(len(a)):
if a[i] == sep:
continue
result.append(a[i])
print(result)
"""
第一种方法比较简单,略过。
第二种方法,遍历的思路是对的,但是只针对本题给出的示例字符串有效,如果将字符串改成下面的样式:
b = '1,23,45,678'
c = '1, 23, 4, 5'
该怎么遍历呢?可以尝试迭代一下。
"""
# 优化版,能处理空格等,只用一个循环
a = ',1,23, 456 , ab c,,,'
sep = '\t ,'
tmp_str = ''
result = []
for i in a:
if i in sep:
if tmp_str:
result.append(tmp_str)
tmp_str = ''
continue
tmp_str += i
print(result)
| 643 | 408 |
"""
Selection sort algorithm implementation.
Time Complexity:
Best O(n^2)
Worst O(n^2)
Average O(n^2)
Space Complexity: O(1)
"""
def selection_sort(array):
for i in range(len(array)):
min_index = i
for j in range(i + 1, len(array)):
if array[j] < array[min_index]:
min_index = j
array[i], array[min_index] = array[min_index], array[i]
return array
if __name__ == '__main__':
unsorted_array = [5, 3, 6, 2, 10, -23, 0]
selected_array = selection_sort(unsorted_array)
print(selected_array)
| 575 | 213 |
class Order(object):
def __init__(self, value):
self.__value = value
@property
def value(self):
return self.__value
| 154 | 44 |
import argparse
from .pycritty import subparsers, formatter
install_parser = subparsers.add_parser(
'install',
formatter_class=formatter(),
help="Install a config file or theme from a url",
argument_default=argparse.SUPPRESS,
)
install_parser.add_argument(
'url',
help='URL where the config is located',
)
install_parser.add_argument(
'-n', '--name',
metavar='NAME',
default='',
help='Name of the config/theme once installed',
)
install_parser.add_argument(
'-o', '--override',
action='store_true',
help='Override existing config',
)
group = install_parser.add_mutually_exclusive_group()
group.add_argument(
'-t', '--theme',
action='store_true',
help='Install as theme',
)
group.add_argument(
'-c', '--config',
action='store_true',
help='Install as a config file in your saves directory (default)',
)
| 885 | 288 |
"""
sonde3
-------
PySonde is a module for reading water quality data from various sensor
formats.
"""
from setuptools import Command, setup, find_packages
# note: the minimum version numbers are just what I know will work,
# but they could probably be a few versions lower
setup(
name='sonde3',
version='0.2',
license='BSD',
author='Evan Turner',
author_email='evan.turner@twdb.state.tx.us',
description='A utility library for reading various water quality '
'data formats',
long_description=__doc__,
keywords='sonde water quality format environment ysi',
packages=find_packages(),
include_package_data=True,
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
'Topic :: Software Development :: Libraries :: Python Modules'
],
tests_require=[
'nose>=1.3.7',
'configobj>=4.7.2',
],
test_suite='nose.collector',
)
| 1,209 | 347 |
def divide_elementos_en_lista(lista, divisor):
# Utilizando la programación defensiva para evitar que el usuario ingrese un 0 como divisor.
try:
return [i / divisor for i in lista]
except ZeroDivisionError as e:
print(e)
return 'No se puede dividir entre 0'
lista = range(10)
if __name__ == "__main__":
divisor = int(input('Escribe el divisor: '))
print(divide_elementos_en_lista(lista, divisor)) | 442 | 148 |
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
from openomics_web.utils.str_utils import longest_common_prefix
def DataTableColumnSelect(columns):
"""
Args:
columns:
"""
longest_common_prefixes = longest_common_prefix(columns)
return html.Div([
html.Div(['Select the gene id/name column to index by:']),
dcc.Dropdown(
id='data-table-genes-col-name',
options=[{'label': col, 'value': col} for col in columns],
style={
'width': '100%',
},
value=columns[0],
),
html.Div(['Select the column prefixes to import:']),
dcc.Dropdown(
id='data-table-columns-select',
options=[{'label': col, 'value': col} for col in longest_common_prefixes],
style={
'width': '100%',
},
multi=True,
)
])
def ExpressionDataTable(df):
"""
Args:
df:
"""
return html.Div(
className="row",
children=[
html.Div(
dt.DataTable(
id='expression-datatable',
columns=[{"name": i, "id": i} for i in df.columns],
page_current=0,
page_size=20,
page_action='custom',
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='multi',
sort_by=[],
style_as_list_view=True,
style_cell={
'overflow': 'hidden',
'textOverflow': 'clip',
'whiteSpace': 'normal'
},
style_data={'width': '30px'},
style_data_conditional=[
{'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
],
style_table={"maxHeight": '800px',
'width': '800px',
'marginTop': '5px',
'marginBottom': '10px',
'overflowX': 'scroll'
},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
row_selectable="multi",
selected_rows=[],
# virtualization=True,
),
style={'height': 750, 'overflowY': 'scroll'},
className='six columns'
),
html.Div(
id='table-paging-with-graph-container',
className="five columns"
)
]
)
operators = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
def split_filter_part(filter_part):
"""
Args:
filter_part:
"""
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if (v0 == value_part[-1] and v0 in ("'", '"', '`')):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3
def expression_data_view():
return html.Div(id='table-container', children=[dt.DataTable(
id="data-table",
row_selectable='multi',
# sorting=True,
# filtering=True,
css=[{
"selector": ".dash-cell div.dash-cell-value",
"rule": "display: inline; "
"white-space: inherit; "
"overflow: auto; "
"text-overflow: inherit;"
}],
style_cell={
"whiteSpace": "no-wrap",
"overflow": "hidden",
"textOverflow": "ellipsis",
"maxWidth": 100,
'fontWeight': 100,
'fontSize': '11pt',
'fontFamily': 'Courier New',
'backgroundColor': '#1F2132'
},
style_header={
'backgroundColor': '#1F2132',
'textAlign': 'center'
},
style_table={
"maxHeight": "310px",
'width': '320px',
'marginTop': '5px',
'marginBottom': '10px',
},
# n_fixed_rows=1,
# n_fixed_columns=1
)])
| 5,259 | 1,429 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import datetime
import pandas as pd
from bokeh.models.widgets import Tabs
from warehouse import CassandraStorage
import util.util as util
from util.util import pandas_factory,symbol_list
from bokeh.models import ColumnDataSource, Panel
from bokeh.layouts import column, row, gridplot,layout
from bokeh.plotting import curdoc, figure, show
from bokeh.models import LinearAxis, Range1d
from bokeh.models import HoverTool
from bokeh.palettes import all_palettes
from random import choice
from visualization.compare_tab import compare_plot
from visualization.single_tab import candlestick,stream_price
from visualization.economy_tab import geomap,economy_plot,stream_news
from pytz import timezone
"""TAB 1"""
# layout
p1,stock_select,summaryText,financialText,s=candlestick()
p2,update=stream_price()
l1=column(row(stock_select),
gridplot([[p1],[p2]], toolbar_location="right", plot_width=1300),
row(summaryText,financialText,s))
tab1 = Panel(child = l1, title = 'Stock: Streaming & Fundamental')
"""TAB 2"""
p,p2,widget,stats,corr=compare_plot()
l2=column(row(widget,stats,corr),gridplot([[p],[p2]], toolbar_location="right", plot_width=1300))
tab2=Panel(child = l2, title = 'Stock: Comparison')
"""TAB 3"""
div,update2=stream_news()
p,bea_select=geomap()
e1,e2,e3,e4,economy_select1,economy_select2,economy_select3,economy_select4=economy_plot()
l3=column(row(p,column(bea_select,div)),
gridplot([[column(economy_select1,e1),column(economy_select2,e2)],
[column(economy_select3,e3),column(economy_select4,e4)]],
toolbar_location="right", plot_width=1300))
tab3 = Panel(child = l3, title = 'Economy')
"""document"""
dashboard = Tabs(tabs = [tab1,tab2,tab3])
curdoc().add_root(dashboard)
curdoc().add_periodic_callback(update, 600)
curdoc().add_periodic_callback(update2, 600)
curdoc().title = "Financial Market Visualization & Analysis"
| 1,982 | 729 |
import os
OPTIMIZE = os.environ.get("OPTIMIZE", "1") == "1"
# basic (non-accuracy loss) optimization
REMOVE_REDUNDANT_OPERATOR = os.environ.get("REMOVE_REDUNDANT_OPERATOR", "1") == "1"
SIMPLIFY_ELEMENTWISE = os.environ.get("SIMPLIFY_ELEMENTWISE", "1") == "1"
REPLACE_SCALAR_OPERATOR = os.environ.get("REPLACE_SCALAR_OPERATOR", "1") == "1"
REMOVE_NO_EFFECT_OPERATOR = os.environ.get("REMOVE_NO_EFFECT_OPERATOR", "1") == "1"
ELEMENTWISE_KERNEL_FUSION = os.environ.get("ELEMENTWISE_KERNEL_FUSION", "1") == "1"
SIMPLIFY_ELEMENTWISE_SEQUENTIAL = os.environ.get("SIMPLIFY_ELEMENTWISE_SEQUENTIAL", "1") == "1"
SIMPLIFY_ASSOCIATIVE_OPERATOR = os.environ.get("SIMPLIFY_ASSOCIATIVE_OPERATOR", "1") == "1"
SIMPLIFY_COMMUTATIVE_OPERATOR = os.environ.get("SIMPLIFY_COMMUTATIVE_OPERATOR", "1") == "1"
MERGE_SGEMM_AND_ELEMENTWISE_MUL = os.environ.get("MERGE_SGEMM_AND_ELEMENTWISE_MUL", "1") == "1"
OPTIMIZE_CHANNEL_MODE = os.environ.get("OPTIMIZE_CHANNEL_MODE", "1") == "1"
EXTRACT_UNIFORM_LITERAL = os.environ.get("EXTRACT_UNIFORM_LITERAL", "0") == "1"
CONSTANT_FOLDING = os.environ.get("CONSTANT_FOLDING", "1") == "1"
# compression
CONV_FILTER_PRUNING = os.environ.get("CONV_FILTER_PRUNING", "0") == "1"
CONV_SVD_COMPRESSION = os.environ.get("CONV_SVD_COMPRESSION", "0") == "1"
# memory allocation
VALIDATE_GENERATED_SOURCE = os.environ.get("VALIDATE_GENERATED_SOURCE", "1") == "1"
OPTIMIZE_INPLACE_OPERATION = os.environ.get("OPTIMIZE_INPLACE_OPERATION", "1") == "1"
OPTIMIZE_MEMORY_ALLOCATION = os.environ.get("OPTIMIZE_MEMORY_ALLOCATION", "1") == "1"
# webgl backend
WEBGL_OPTIMIZE_TEXTURE_SIZE = os.environ.get("WEBGL_OPTIMIZE_TEXTURE_SIZE", "1") == "1"
| 1,650 | 799 |
import sys
from src import Bot, Client, Collector, \
Predictor, PredictorLearnThread, Scribe, \
Trader, TraderThreadCleaner, GarbageCleanerThread
if __name__ == '__main__':
use_proxy = len(sys.argv) > 1 and sys.argv[1] == '-p'
pool = {
'client': Client(use_proxy),
'bot': Bot(use_proxy),
'collector': Collector(),
'predictor': Predictor(),
'scribe': Scribe(),
'trader': Trader(),
}
for _, entity in pool.items():
entity.set_pool(pool)
garbage_cleaning_thread = GarbageCleanerThread(pool['bot'])
garbage_cleaning_thread.setDaemon(True)
garbage_cleaning_thread.start()
predictor_learn_thread = PredictorLearnThread(pool['predictor'], pool['client'], pool['bot'])
predictor_learn_thread.setDaemon(True)
predictor_learn_thread.start()
trader_thread_cleaner = TraderThreadCleaner(pool['trader'], pool['bot'])
trader_thread_cleaner.setDaemon(True)
trader_thread_cleaner.start()
| 994 | 337 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/resources/campaign_draft.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.enums import campaign_draft_status_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_campaign__draft__status__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/resources/campaign_draft.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\022CampaignDraftProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n6google/ads/googleads/v6/resources/campaign_draft.proto\x12!google.ads.googleads.v6.resources\x1a\x39google/ads/googleads/v6/enums/campaign_draft_status.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xad\x05\n\rCampaignDraft\x12\x45\n\rresource_name\x18\x01 \x01(\tB.\xe0\x41\x05\xfa\x41(\n&googleads.googleapis.com/CampaignDraft\x12\x1a\n\x08\x64raft_id\x18\t \x01(\x03\x42\x03\xe0\x41\x03H\x00\x88\x01\x01\x12\x45\n\rbase_campaign\x18\n \x01(\tB)\xe0\x41\x05\xfa\x41#\n!googleads.googleapis.com/CampaignH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x0b \x01(\tH\x02\x88\x01\x01\x12\x46\n\x0e\x64raft_campaign\x18\x0c \x01(\tB)\xe0\x41\x03\xfa\x41#\n!googleads.googleapis.com/CampaignH\x03\x88\x01\x01\x12_\n\x06status\x18\x06 \x01(\x0e\x32J.google.ads.googleads.v6.enums.CampaignDraftStatusEnum.CampaignDraftStatusB\x03\xe0\x41\x03\x12(\n\x16has_experiment_running\x18\r \x01(\x08\x42\x03\xe0\x41\x03H\x04\x88\x01\x01\x12(\n\x16long_running_operation\x18\x0e \x01(\tB\x03\xe0\x41\x03H\x05\x88\x01\x01:q\xea\x41n\n&googleads.googleapis.com/CampaignDraft\x12\x44\x63ustomers/{customer_id}/campaignDrafts/{base_campaign_id}~{draft_id}B\x0b\n\t_draft_idB\x10\n\x0e_base_campaignB\x07\n\x05_nameB\x11\n\x0f_draft_campaignB\x19\n\x17_has_experiment_runningB\x19\n\x17_long_running_operationB\xff\x01\n%com.google.ads.googleads.v6.resourcesB\x12\x43\x61mpaignDraftProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_campaign__draft__status__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CAMPAIGNDRAFT = _descriptor.Descriptor(
name='CampaignDraft',
full_name='google.ads.googleads.v6.resources.CampaignDraft',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.CampaignDraft.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A(\n&googleads.googleapis.com/CampaignDraft', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='draft_id', full_name='google.ads.googleads.v6.resources.CampaignDraft.draft_id', index=1,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='base_campaign', full_name='google.ads.googleads.v6.resources.CampaignDraft.base_campaign', index=2,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A#\n!googleads.googleapis.com/Campaign', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='google.ads.googleads.v6.resources.CampaignDraft.name', index=3,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='draft_campaign', full_name='google.ads.googleads.v6.resources.CampaignDraft.draft_campaign', index=4,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A#\n!googleads.googleapis.com/Campaign', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='google.ads.googleads.v6.resources.CampaignDraft.status', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='has_experiment_running', full_name='google.ads.googleads.v6.resources.CampaignDraft.has_experiment_running', index=6,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='long_running_operation', full_name='google.ads.googleads.v6.resources.CampaignDraft.long_running_operation', index=7,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352An\n&googleads.googleapis.com/CampaignDraft\022Dcustomers/{customer_id}/campaignDrafts/{base_campaign_id}~{draft_id}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_draft_id', full_name='google.ads.googleads.v6.resources.CampaignDraft._draft_id',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_base_campaign', full_name='google.ads.googleads.v6.resources.CampaignDraft._base_campaign',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_name', full_name='google.ads.googleads.v6.resources.CampaignDraft._name',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_draft_campaign', full_name='google.ads.googleads.v6.resources.CampaignDraft._draft_campaign',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_has_experiment_running', full_name='google.ads.googleads.v6.resources.CampaignDraft._has_experiment_running',
index=4, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_long_running_operation', full_name='google.ads.googleads.v6.resources.CampaignDraft._long_running_operation',
index=5, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=243,
serialized_end=928,
)
_CAMPAIGNDRAFT.fields_by_name['status'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_campaign__draft__status__pb2._CAMPAIGNDRAFTSTATUSENUM_CAMPAIGNDRAFTSTATUS
_CAMPAIGNDRAFT.oneofs_by_name['_draft_id'].fields.append(
_CAMPAIGNDRAFT.fields_by_name['draft_id'])
_CAMPAIGNDRAFT.fields_by_name['draft_id'].containing_oneof = _CAMPAIGNDRAFT.oneofs_by_name['_draft_id']
_CAMPAIGNDRAFT.oneofs_by_name['_base_campaign'].fields.append(
_CAMPAIGNDRAFT.fields_by_name['base_campaign'])
_CAMPAIGNDRAFT.fields_by_name['base_campaign'].containing_oneof = _CAMPAIGNDRAFT.oneofs_by_name['_base_campaign']
_CAMPAIGNDRAFT.oneofs_by_name['_name'].fields.append(
_CAMPAIGNDRAFT.fields_by_name['name'])
_CAMPAIGNDRAFT.fields_by_name['name'].containing_oneof = _CAMPAIGNDRAFT.oneofs_by_name['_name']
_CAMPAIGNDRAFT.oneofs_by_name['_draft_campaign'].fields.append(
_CAMPAIGNDRAFT.fields_by_name['draft_campaign'])
_CAMPAIGNDRAFT.fields_by_name['draft_campaign'].containing_oneof = _CAMPAIGNDRAFT.oneofs_by_name['_draft_campaign']
_CAMPAIGNDRAFT.oneofs_by_name['_has_experiment_running'].fields.append(
_CAMPAIGNDRAFT.fields_by_name['has_experiment_running'])
_CAMPAIGNDRAFT.fields_by_name['has_experiment_running'].containing_oneof = _CAMPAIGNDRAFT.oneofs_by_name['_has_experiment_running']
_CAMPAIGNDRAFT.oneofs_by_name['_long_running_operation'].fields.append(
_CAMPAIGNDRAFT.fields_by_name['long_running_operation'])
_CAMPAIGNDRAFT.fields_by_name['long_running_operation'].containing_oneof = _CAMPAIGNDRAFT.oneofs_by_name['_long_running_operation']
DESCRIPTOR.message_types_by_name['CampaignDraft'] = _CAMPAIGNDRAFT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CampaignDraft = _reflection.GeneratedProtocolMessageType('CampaignDraft', (_message.Message,), {
'DESCRIPTOR' : _CAMPAIGNDRAFT,
'__module__' : 'google.ads.googleads.v6.resources.campaign_draft_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.CampaignDraft)
})
_sym_db.RegisterMessage(CampaignDraft)
DESCRIPTOR._options = None
_CAMPAIGNDRAFT.fields_by_name['resource_name']._options = None
_CAMPAIGNDRAFT.fields_by_name['draft_id']._options = None
_CAMPAIGNDRAFT.fields_by_name['base_campaign']._options = None
_CAMPAIGNDRAFT.fields_by_name['draft_campaign']._options = None
_CAMPAIGNDRAFT.fields_by_name['status']._options = None
_CAMPAIGNDRAFT.fields_by_name['has_experiment_running']._options = None
_CAMPAIGNDRAFT.fields_by_name['long_running_operation']._options = None
_CAMPAIGNDRAFT._options = None
# @@protoc_insertion_point(module_scope)
| 11,800 | 4,781 |
# -*- coding: utf-8 -*-
"""
Created on 09 Jul 2020 23:36:50
@author: jiahuei
"""
| 81 | 49 |
from __future__ import annotations
from typing import Dict, Union
from discord import Guild, TextChannel
import traceback
from .. import botState, lib
from ..baseClasses import serializable
from ..cfg import cfg
from ..game import sdbGame, sdbDeck
from ..reactionMenus import SDBSignupMenu
class BasedGuild(serializable.Serializable):
"""A class representing a guild in discord, and storing extra bot-specific information about it.
:var id: The ID of the guild, directly corresponding to a discord guild's ID.
:vartype id: int
:var dcGuild: This guild's corresponding discord.Guild object
:vartype dcGuild: discord.Guild
"""
def __init__(self, id : int, dcGuild: Guild, commandPrefix : str = cfg.defaultCommandPrefix,
runningGames: Dict[TextChannel, Union[sdbGame.SDBGame, sdbGame.GameChannelReservation]] = {}, decks: Dict[str, dict] = {}, modRoleID = -1,
scrapbookChannelId: int = -1, scrapbookMinCookies: int = 1):
"""
:param int id: The ID of the guild, directly corresponding to a discord guild's ID.
:param discord.Guild guild: This guild's corresponding discord.Guild object
"""
if not isinstance(dcGuild, Guild):
raise lib.exceptions.NoneDCGuildObj("Given dcGuild of type '" + dcGuild.__class__.__name__ + \
"', expecting discord.Guild")
self.id = id
self.dcGuild = dcGuild
if not commandPrefix:
raise ValueError("Empty command prefix provided")
self.commandPrefix = commandPrefix
self.runningGames = runningGames
self.decks = decks
self.activeDecks = {}
self.modRoleID = modRoleID
self.modRole = None
self.scrapbookChannelId = scrapbookChannelId
self.scrapbookMinCookies = scrapbookMinCookies
async def startGameSignups(self, owner, channel, deckName, expansionNames, rounds):
if deckName not in self.decks:
raise NameError("Unknown deck name: " + deckName)
if channel.guild.id != self.id:
raise RuntimeError("Attempted to start a game in a channel not owned by this guild: " + channel.name + "#" + str(channel.id))
if channel in self.runningGames:
raise ValueError("Attempted to start a game in a channel which aleady contains a running game: " + channel.name + "#" + str(channel.id))
if deckName in self.activeDecks:
gameDeck = self.activeDecks[deckName]
else:
try:
gameDeck = sdbDeck.SDBDeck(self.decks[deckName]["meta_path"])
except RuntimeError as e:
gameDeck = None
await channel.send("An unexpected error occurred when building the deck, the error has been logged.\nPlease try playing with a different deck!")
botState.logger.log("BasedGuild", "startGameSignups",
"Exception occured when trying to build a deck before starting a game",
eventType=type(e).__name__, trace=traceback.format_exception(type(e), e, e.__traceback__))
if gameDeck is not None:
self.runningGames[channel] = sdbGame.SDBGame(owner, gameDeck, expansionNames, channel, rounds, self)
signupMsg = await channel.send("")
signupMenu = SDBSignupMenu.SDBSignupMenu(signupMsg, self.runningGames[channel], lib.timeUtil.timeDeltaFromDict(cfg.timeouts.gameJoinMenu))
botState.reactionMenusDB[signupMsg.id] = signupMenu
await signupMenu.updateMessage()
self.decks[deckName]["plays"] += 1
def toDict(self, **kwargs) -> dict:
"""Serialize this BasedGuild into dictionary format to be saved to file.
:return: A dictionary containing all information needed to reconstruct this BasedGuild
:rtype: dict
"""
return {"commandPrefix" : self.commandPrefix, "decks": self.decks, "modRoleID": self.modRole.id if self.modRole is not None else -1,
"scrapbookChannelId": self.scrapbookChannelId, "scrapbookMinCookies": self.scrapbookMinCookies}
@classmethod
def fromDict(cls, guildDict: dict, **kwargs) -> BasedGuild:
"""Factory function constructing a new BasedGuild object from the information
in the provided guildDict - the opposite of BasedGuild.toDict
:param int id: The discord ID of the guild
:param dict guildDict: A dictionary containing all information required to build the BasedGuild object
:return: A BasedGuild according to the information in guildDict
:rtype: BasedGuild
"""
if "id" not in kwargs:
raise NameError("Required kwarg missing: id")
guildID = kwargs["id"]
dcGuild = botState.client.get_guild(guildID)
if not isinstance(dcGuild, Guild):
raise lib.exceptions.NoneDCGuildObj("Could not get guild object for id " + str(guildID))
if "commandPrefix" in guildDict:
return BasedGuild(guildID, dcGuild, commandPrefix=guildDict["commandPrefix"], decks=guildDict["decks"] if "decks" in guildDict else {}, modRoleID=guildDict["modRoleID"] if "modRoleID" in guildDict else -1,
scrapbookChannelId=guildDict.get("scrapbookChannelId", -1), scrapbookMinCookies=guildDict.get("scrapbookMinCookies", 1))
return BasedGuild(guildID, dcGuild, decks=guildDict["decks"] if "decks" in guildDict else {}, modRoleID=guildDict["modRoleID"] if "modRoleID" in guildDict else -1,
scrapbookChannelId=guildDict.get("scrapbookChannelId", -1), scrapbookMinCookies=guildDict.get("scrapbookMinCookies", 1))
| 5,733 | 1,667 |
from dataspot.config.configurators.network_configurators.plot_height_configurator import PlotHeightConfigurator
from dataspot.config.configurators.network_configurators.plot_width_configurator import PlotWidthConfigurator
from dataspot.config.configurators.network_configurators.xrange_configurator import XRangeConfigurator
from dataspot.config.configurators.network_configurators.yrange_configurator import YRangeConfigurator
from dataspot.config.configurators.node_configurators.node_size_configurator import NodeSizeConfigurator
from dataspot.config.configurators.node_configurators.golden_sources_configurator import GoldenSourcesConfigurator
class NetworkConfiguratorBuilder(object):
"""
The NetworkConfiguratorBuilder builds all of the items needed to set the basic conditions of the configurators.
The following variables will be set:
[*] Plot width
[*] Plot height
[*] X-range
[*] Y-range
[*] Node-size-config (Interval based configuration setting the possible sizes a node can take, score-based)
[*] Golden Sources (Golden Sources are the absolute root of your configurators analysis. These objects are often the main
starting points of conducting your analysis.)
"""
def __init__(self, config):
"""
:param config: The config parameter is a dictionary containing all of the Dataspot basic configurations. An
example of the basic structure can be found in examples/dataspot_config_example.json
:type config: dict
"""
if not isinstance(config, dict):
raise TypeError("The configuration that has been provided is not of a dictionary type")
self.__network_config = config
self.__plot_width = None
self.__plot_height = None
self.__x_range = None
self.__y_range = None
self.__node_size_config = None
self.__golden_sources = None
def set_network_config(self, config):
"""
:param config: The config parameter is a dictionary containing all of the Dataspot basic configurations. An
example of the basic structure can be found in examples/dataspot_config_example.json
:type config: dict
"""
if not isinstance(config, dict):
raise TypeError("The configuration that has been provided is not of a dictionary type")
self.__network_config = config
def get_network_config(self):
"""
:return: The Dataspot config is a dictionary containing all of the Dataspot basic configurations. An
example of the basic structure can be found in examples/dataspot_config_example.json
:rtype: dict
"""
return self.__network_config
def set_plot_width(self, config):
"""
:param config: The config parameter is a dictionary containing all of the Dataspot basic configurations. An
example of the basic structure can be found in examples/dataspot_config_example.json
:type config: dict
"""
if not isinstance(config, dict):
raise TypeError("The configuration that has been provided is not of a dictionary type")
plot_width_configurator = PlotWidthConfigurator(config=config)
plot_width_configurator.build()
plot_width = plot_width_configurator.get_plot_width_config()
self.__plot_width = plot_width
def get_plot_width(self):
"""
:return: Returns the integer value for the width of the plot the configurators analysis will be placed in.
:rtype: int
"""
return self.__plot_width
def set_plot_height(self, config):
"""
:param config: The config parameter is a dictionary containing all of the Dataspot basic configurations. An
example of the basic structure can be found in examples/dataspot_config_example.json
:type config: dict
"""
if not isinstance(config, dict):
raise TypeError("The configuration that has been provided is not of a dictionary type")
plot_height_configurator = PlotHeightConfigurator(config=config)
plot_height_configurator.build()
plot_height = plot_height_configurator.get_plot_height_config()
self.__plot_height = plot_height
def get_plot_height(self):
"""
:return: Returns the integer value for the height of the plot the configurators analysis will be placed in.
:rtype: int
"""
return self.__plot_height
def set_x_range(self, config):
"""
:param config: The config parameter is a dictionary containing all of the Dataspot basic configurations. An
example of the basic structure can be found in examples/dataspot_config_example.json
:type config: dict
"""
if not isinstance(config, dict):
raise TypeError("The configuration that has been provided is not of a dictionary type")
x_range_configurator = XRangeConfigurator(config=config)
x_range_configurator.build()
x_range = x_range_configurator.get_x_range_config()
self.__x_range = x_range
def get_x_range(self):
"""
:return: Returns a list containing the two extremes (int) for the x-axis for the configurators graph.
:rtype: list
"""
return self.__x_range
def set_y_range(self, config):
"""
:param config: The config parameter is a dictionary containing all of the Dataspot basic configurations. An
example of the basic structure can be found in examples/dataspot_config_example.json
:type config: dict
"""
if not isinstance(config, dict):
raise TypeError("The configuration that has been provided is not of a dictionary type")
y_range_configurator = YRangeConfigurator(config=config)
y_range_configurator.build()
y_range = y_range_configurator.get_y_range_config()
self.__y_range = y_range
def get_y_range(self):
"""
:return: Returns a list containing the two extremes (int) for the y-axis for the configurators graph.
:rtype: list
"""
return self.__y_range
def set_node_size_config(self, config):
"""
:param config: The config parameter is a dictionary containing all of the Dataspot basic configurations. An
example of the basic structure can be found in examples/dataspot_config_example.json
:type config: dict
"""
if not isinstance(config, dict):
raise TypeError("The configuration that has been provided is not of a dictionary type")
node_size_configurator = NodeSizeConfigurator(config=config)
node_size_configurator.build()
node_size_config = node_size_configurator.get_node_size_config()
self.__node_size_config = node_size_config
def get_node_size_config(self):
"""
:return: A dictionairy containing an interval-based configuration, on which the node sizes are determined.
Dataspot takes the calculated root score and matches this with one of the interval levels in this
configuration.
:rtype: dict
"""
return self.__node_size_config
def set_golden_sources(self, config):
"""
:param config: The config parameter is a dictionary containing all of the Dataspot basic configurations. An
example of the basic structure can be found in examples/dataspot_config_example.json
:type config: dict
"""
if not isinstance(config, dict):
raise TypeError("The configuration that has been provided is not of a dictionary type")
golden_sources_configurator = GoldenSourcesConfigurator(config=config)
golden_sources_configurator.build()
golden_sources = golden_sources_configurator.get_golden_sources_config()
self.__golden_sources = golden_sources
def get_golden_sources(self):
"""
:return: A list containing all of the golden sources of the configurators graph.
:rtype: list
"""
return self.__golden_sources
def build(self):
"""
The build function prepares all of the configurators configuration components at once.
"""
config = self.get_network_config()
self.set_plot_width(config=config)
self.set_plot_height(config=config)
self.set_x_range(config=config)
self.set_y_range(config=config)
self.set_node_size_config(config=config)
self.set_golden_sources(config=config)
| 8,720 | 2,239 |
import sys
from subdomain_takeover_tools.confirm_agile_crm import is_valid as agile_crm_is_valid
from subdomain_takeover_tools.confirm_azure_app_service import is_valid as azure_app_service_is_valid
from subdomain_takeover_tools.confirm_azure_edge_cdn import is_valid as azure_edge_cdn_is_valid
from subdomain_takeover_tools.confirm_azure_traffic_manager import is_valid as azure_traffic_manager_is_valid
from subdomain_takeover_tools.confirm_bigcartel import is_valid as bigcartel_is_valid
from subdomain_takeover_tools.confirm_cargo import is_valid as cargo_is_valid
from subdomain_takeover_tools.confirm_elb import is_valid as elb_is_valid
from subdomain_takeover_tools.confirm_fastly import is_valid as fastly_is_valid
from subdomain_takeover_tools.confirm_github import is_valid as github_is_valid
from subdomain_takeover_tools.confirm_pantheon import is_valid as pantheon_is_valid
from subdomain_takeover_tools.confirm_s3 import is_valid as s3_is_valid
from subdomain_takeover_tools.confirm_shopify import is_valid as shopify_is_valid
from subdomain_takeover_tools.confirm_surge import is_valid as surge_is_valid
from subdomain_takeover_tools.confirm_tumblr import is_valid as tumblr_is_valid
from subdomain_takeover_tools.confirm_unclaimed import is_valid as unclaimed_is_valid
def main():
inverse = '--inverse' in sys.argv
strict = '--strict' in sys.argv
data = sys.stdin.read()
lines = data.strip().split('\n')
for line in lines:
if not line.strip():
continue
elif ']\t\t' not in line:
raise IOError("Unexpected input received, currently only subtake output is supported")
(service, target, domain) = _process_line(line)
_process_subtake_output(service, target, domain, inverse, strict)
def _process_line(line):
(parts, domain) = line.split('\t\t')
if ': ]' in parts:
service = parts[1:-3]
target = ''
else:
(service, target) = parts[1:-2].split(': ')
return service, target, domain
def _process_subtake_output(service, target, domain, inverse, strict):
result = _perform_check(service, target, domain)
if result is None:
return
# xor
if inverse != result:
print(domain)
def _perform_check(service, target, domain):
if service == 'agilecrm':
return agile_crm_is_valid(domain, target)
elif service == 'azure':
if target.endswith('azurewebsites.net'):
return azure_app_service_is_valid(domain, target)
elif target.endswith('azureedge.net'):
return azure_edge_cdn_is_valid(domain, target)
elif target.endswith('trafficmanager.net'):
return azure_traffic_manager_is_valid(domain, target)
else:
# other Azure services are not yet supported
return None
elif service == 'bigcartel':
return bigcartel_is_valid(domain, target)
elif service == 'cargo':
return cargo_is_valid(domain, target)
elif service == 'elasticbeanstalk':
return elb_is_valid(domain, target)
elif service == 'fastly':
return fastly_is_valid(domain, target)
elif service == 'github':
return github_is_valid(domain, target)
elif service == 'github':
return github_is_valid(domain, target)
elif service == 'pantheon':
return pantheon_is_valid(domain, target)
elif service == 's3 bucket':
return s3_is_valid(domain, target)
elif service == 'shopify':
return shopify_is_valid(domain, target)
elif service == 'surge':
return surge_is_valid(domain, target)
elif service == 'tumblr':
return tumblr_is_valid(domain, target)
elif service == 'unclaimed':
return unclaimed_is_valid(domain, target)
else:
return None
if __name__ == "__main__":
main()
| 3,842 | 1,182 |
from .app import MarathonApp, MarathonHealthCheck
from .base import MarathonResource, MarathonObject
from .constraint import MarathonConstraint
from .deployment import MarathonDeployment, MarathonDeploymentAction, MarathonDeploymentStep
from .endpoint import MarathonEndpoint
from .group import MarathonGroup
from .info import MarathonInfo, MarathonConfig, MarathonZooKeeperConfig
from .queue import MarathonQueueItem
from .task import MarathonTask
| 449 | 111 |
"""
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import unittest
import jsonrpc
import urllib
from StringIO import StringIO
class TestProxy(unittest.TestCase):
def urlopen(self, url, data):
self.postdata = data
return StringIO(self.respdata)
def setUp(self):
self.postdata=""
self.urllib_openurl = urllib.urlopen
urllib.urlopen = self.urlopen
def tearDown(self):
urllib.urlopen = self.urllib_openurl
def test_ProvidesProxyMethod(self):
s = jsonrpc.ServiceProxy("http://localhost/")
self.assert_(callable(s.echo))
def test_MethodCallCallsService(self):
s = jsonrpc.ServiceProxy("http://localhost/")
self.respdata='{"result":"foobar","error":null,"id":""}'
echo = s.echo("foobar")
self.assertEquals(self.postdata, jsonrpc.dumps({"method":"echo", 'params':['foobar'], 'id':'jsonrpc'}))
self.assertEquals(echo, 'foobar')
self.respdata='{"result":null,"error":"MethodNotFound","id":""}'
try:
s.echo("foobar")
except jsonrpc.JSONRPCException,e:
self.assertEquals(e.error, "MethodNotFound")
| 1,953 | 624 |
# Generated by Django 2.2.13 on 2020-06-25 10:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deployments', '0028_auto_20200618_0904'),
]
operations = [
migrations.AddField(
model_name='project',
name='actual_expenditure',
field=models.IntegerField(blank=True, null=True, verbose_name='actual expenditure'),
),
migrations.AlterField(
model_name='project',
name='budget_amount',
field=models.IntegerField(blank=True, null=True, verbose_name='budget amount'),
),
]
| 650 | 211 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import base64
import hmac
import hashlib
from requests.auth import AuthBase
def generate_auth_headers(message, api_key, secret_key, passphrase):
timestamp = str(time.time())
message = timestamp + message
hmac_key = base64.b64decode(secret_key)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = base64.b64encode(signature.digest())
return {
'Content-Type': 'Application/JSON',
'CB-ACCESS-SIGN': signature_b64,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': api_key,
'CB-ACCESS-PASSPHRASE': passphrase
}
class CoinbaseAuth(AuthBase):
def __init__(self, api_key, api_secret, api_passphrase):
self.api_key = api_key
self.api_secret = api_secret
self.api_passphrase = api_passphrase
def __call__(self, r):
message = r.method + r.path_url + r.body
r.headers.update(generate_auth_headers(message,
self.api_key,
self.api_secret,
self.api_passphrase))
return r
| 1,047 | 401 |
import numpy as np
def softmax(x):
exp_x = np.exp(x)
return exp_x / np.sum(exp_x, axis=0)
def sigmoid(x):
return 1 / (1+np.exp(-x))
def deriv_sigmoid(s):
"""Derivative of the sigmoid given the output s of the sigmoid"""
return s * (1 - s)
def relu(x):
return x * (x > 0)
def heaviside_step(x):
return np.array(x > 0, dtype=float)
class MultilayerPerceptronNN():
def __init__(self, dim=(784, 64, 10), activ='sigmoid'):
self.dim = dim
self.w = []
for i in range(len(dim)-1):
lim = 1 / np.sqrt(dim[i])
self.w.append(np.random.uniform(-lim, lim, (dim[i+1], dim[i]+1)))
self.best_w = self.w
if activ == 'sigmoid':
self.activ = sigmoid
self.d_activ = deriv_sigmoid
elif activ == 'relu':
self.activ = relu
self.d_activ = heaviside_step
else:
raise Exception(f"Activation function '{activ}' not supported")
def forward(self, x):
_, batch_size = x.shape
ones_row = np.ones((1, batch_size))
hidden = []
for i in range(len(self.dim) - 2):
x = np.concatenate((x, ones_row))
hidden.append(x)
x = self.w[i] @ x
hidden.append(x)
x = self.activ(x)
x = np.concatenate((x, ones_row))
hidden.append(x)
x = self.w[-1] @ x
y = softmax(x)
return y, hidden
def backprop(self, hidden, y, target):
delta = y - target
dw = []
for i in range(len(self.dim) - 1):
deriv = delta @ hidden[2*len(self.dim)-4-2*i].T
dw.append(deriv)
if i != len(self.dim) - 2:
d_activ = self.d_activ(hidden[2*len(self.dim)-4-2*i])
delta = self.w[len(self.dim)-2-i].T @ delta * d_activ
delta = delta[:-1]
dw.reverse()
return dw
def one_hot(labels):
"""Build one-hot vectors of the labels"""
t = np.zeros((10, len(labels)))
t[labels, range(len(labels))] = 1
return t
def accuracy(y, labels):
"""Proportion of outputs y which match the labels"""
guess = np.argmax(y, axis=0)
nb_correct = np.sum(guess == labels)
return nb_correct / len(labels)
def ce_loss(y, target):
"""Cross-entropy loss"""
return -np.mean(np.sum(np.log(y)*target, axis=0))
def train(model, data, n_epoch, batch_size, lr0, decay_rate=0):
"""Train Neural Net model"""
inp, labels, inp_val, labels_val = data
n_examples = len(labels)
assert batch_size <= n_examples
n_itr = int(np.ceil(n_examples*n_epoch/batch_size))
print_itr_step = 100
print('Total iterarions:', n_itr)
idx_permut = np.concatenate([np.random.permutation(n_examples)
for _ in range(n_epoch+2)])
idx_permut = idx_permut[:(n_itr+1)*batch_size].reshape((n_itr+1, -1))
labels_one_hot = one_hot(labels)
labels_val_one_hot = one_hot(labels_val)
y, hidden = model.forward(inp[:, idx_permut[0]])
y_val, _ = model.forward(inp_val)
# Loss and accuracy of the training batch and validation set
log = {'loss': [ce_loss(y, labels_one_hot[:, idx_permut[0]])],
'acc': [accuracy(y, labels[idx_permut[0]])],
'vloss': [ce_loss(y_val, labels_val_one_hot)],
'vacc': [accuracy(y_val, labels_val)],}
best_vloss = log['vloss'][0]
for itr in range(n_itr):
epoch = int(itr*batch_size/n_examples)
dw = model.backprop(hidden, y, labels_one_hot[:, idx_permut[itr]])
lr = lr0 / (1 + decay_rate*epoch)
for i in range(len(model.dim) - 1):
model.w[i] -= lr * dw[i]
y, hidden = model.forward(inp[:, idx_permut[itr+1]])
y_val, _ = model.forward(inp_val)
log['loss'].append(ce_loss(y, labels_one_hot[:, idx_permut[itr+1]]))
log['acc'].append(accuracy(y, labels[idx_permut[itr+1]]))
log['vloss'].append(ce_loss(y_val, labels_val_one_hot))
log['vacc'].append(accuracy(y_val, labels_val))
# Store the weights yielding the best validation loss
if log['vloss'][-1] < best_vloss:
for i in range(len(model.dim) - 1):
model.best_w[i] = model.w[i].copy()
# Keep track of the loss
if itr%print_itr_step == 0 or itr == n_itr-1:
info = f"Iteration {itr}/{n_itr} (epoch {epoch})"
info += f" ; loss={log['loss'][itr]} ; vloss={log['vloss'][itr]}"
print(info)
for i in range(len(model.dim) - 1):
model.w[i] = model.best_w[i].copy()
return log
def normalize(data):
"""Min-Max normalization: rescale to [0,1]"""
data_min = data.min(axis=1).reshape((-1, 1))
data_max = data.max(axis=1).reshape((-1, 1))
data_range = (data_max - data_min) + (data_max == data_min)
return (data - data_min) / data_range
def prepare(images, labels, p_validation=10):
"""Normalize and split train/validation sets"""
n_examples = len(images)
inputs = images.reshape((n_examples, -1))
normalized_inputs = normalize(inputs)
permutations = np.random.permutation(n_examples)
n_validation = round(p_validation * n_examples)
validation_ids = permutations[:n_validation]
train_ids = permutations[n_validation:]
inputs_valid = normalized_inputs[validation_ids]
labels_valid = labels[validation_ids]
inputs_train = normalized_inputs[train_ids]
labels_train = labels[train_ids]
return inputs_train.T, labels_train, inputs_valid.T, labels_valid | 5,606 | 2,072 |
from pyDatalog import pyDatalog
import action
import match
from reg import *
from logicalview import *
from flow_common import TABLE_LSP_EGRESS_FIRST, TABLE_LRP_INGRESS_IP_ROUTE, \
TABLE_EMBED2_METADATA, TABLE_DROP_PACKET, TABLE_OUTPUT_PKT
pyDatalog.create_terms('Table, Priority, Match, Action')
pyDatalog.create_terms('Action1, Action2, Action3, Action4, Action5')
pyDatalog.create_terms('Match1, Match2, Match3, Match4, Match5')
pyDatalog.create_terms('embed_metadata, extract_metadata, pipeline_forward')
pyDatalog.create_terms('redirect_other_chassis')
pyDatalog.create_terms('_gateway_state_sum, gateway_ofport')
pyDatalog.create_terms('_gateway_ofport, _gateway_ofport_readd')
pyDatalog.create_terms('A, B, C, X, Y, Z, UUID_CHASSIS')
# it does not count deleting-element in, because it was only consume by
# adding(_gateway_ofport) below
(_gateway_state_sum[X] == sum_(State, for_each=Z)) <= (
remote_chassis(UUID_CHASSIS, PHY_CHASSIS, State1) &
lr_array(LR, UUID_LR, State2) &
(LR[LR_CHASSIS_UUID] == UUID_CHASSIS) &
(State == State1 + State2) & (State >= 0) &
(X == None) &
(Z == PHY_CHASSIS[PCH_UUID])
)
(_gateway_ofport[X] == tuple_(Y, order_by=Z)) <= (
remote_chassis(UUID_CHASSIS, PHY_CHASSIS, State1) &
lr_array(LR, UUID_LR, State2) &
(LR[LR_CHASSIS_UUID] == UUID_CHASSIS) &
(State == State1 + State2) & (State >= 0) &
(_gateway_state_sum[A] == B) &
(X == ('adding', B)) &
(Y == PHY_CHASSIS[PCH_OFPORT]) &
(Z == PHY_CHASSIS[PCH_UUID])
)
(_gateway_ofport[X] == tuple_(Y, order_by=Z)) <= (
(X == ('deleting', State_DEL)) &
remote_chassis(UUID_CHASSIS, PHY_CHASSIS, State1) &
lr_array(LR, UUID_LR, State2) &
(LR[LR_CHASSIS_UUID] == UUID_CHASSIS) &
(State == State1 + State2) & (State < 0) &
(Y == PHY_CHASSIS[PCH_OFPORT]) &
(Z == PHY_CHASSIS[PCH_UUID])
)
(_gateway_ofport_readd[X] == tuple_(Y, order_by=Z)) <= (
(X == ('readding', State_ADD)) &
(_gateway_ofport[A] == B) & (A[0] == 'deleting') &
remote_chassis(UUID_CHASSIS, PHY_CHASSIS, State1) &
lr_array(LR, UUID_LR, State2) &
(LR[LR_CHASSIS_UUID] == UUID_CHASSIS) &
(State == State1 + State2) & (State >= 0) &
(Y == PHY_CHASSIS[PCH_OFPORT]) &
(Z == PHY_CHASSIS[PCH_UUID])
)
(gateway_ofport[X] == Y) <= (_gateway_ofport[X] == Y)
(gateway_ofport[X] == Y) <= (_gateway_ofport_readd[X] == Y)
# it may output same flows, because we have adding and readding
redirect_other_chassis(Priority, Match, Action, State) <= (
(Priority == 1) &
(gateway_ofport[X] == OFPORT) &
(State == X[1]) & (State != 0) &
match.match_none(Match) &
action.load(1, NXM_Reg(REG_FLAG_IDX, FLAG_REDIRECT_BIT_IDX,
FLAG_REDIRECT_BIT_IDX), Action1) &
action.bundle_load(NXM_Reg(REG_OUTPORT_IDX), OFPORT, Action2) &
action.resubmit_table(TABLE_EMBED2_METADATA, Action3) &
action.resubmit_table(TABLE_OUTPUT_PKT, Action4) &
(Action == Action1 + Action2 + Action3 + Action4)
)
redirect_other_chassis(Priority, Match, Action) <= (
(Priority == 0) &
match.match_none(Match) &
action.resubmit_table(TABLE_DROP_PACKET, Action)
)
embed_metadata(Priority, Match, Action) <= (
(Priority == 0) &
match.match_none(Match) &
action.move(NXM_Reg(REG_DP_IDX, 0, 23),
NXM_Reg(TUN_ID_IDX, 0, 23), Action1) &
action.move(NXM_Reg(REG_SRC_IDX, 0, 15),
NXM_Reg(TUN_METADATA0_IDX, 0, 15), Action2) &
action.move(NXM_Reg(REG_DST_IDX, 0, 15),
NXM_Reg(TUN_METADATA0_IDX, 16, 31), Action3) &
action.move(NXM_Reg(REG_FLAG_IDX, 0, 31),
NXM_Reg(TUN_METADATA0_IDX, 32, 63), Action4) &
(Action == Action1 + Action2 + Action3 + Action4)
)
extract_metadata(Priority, Match, Action) <= (
(Priority == 0) &
match.match_none(Match) &
action.move(NXM_Reg(TUN_ID_IDX, 0, 23),
NXM_Reg(REG_DP_IDX, 0, 23), Action1) &
action.move(NXM_Reg(TUN_METADATA0_IDX, 0, 15),
NXM_Reg(REG_SRC_IDX, 0, 15), Action2) &
action.move(NXM_Reg(TUN_METADATA0_IDX, 16, 31),
NXM_Reg(REG_DST_IDX, 0, 15), Action3) &
action.move(NXM_Reg(TUN_METADATA0_IDX, 32, 63),
NXM_Reg(REG_FLAG_IDX, 0, 31), Action4) &
(Action == Action1 + Action2 + Action3 + Action4)
)
pipeline_forward(Priority, Match, Action) <= (
(Priority == 1) &
match.ip_proto(Match1) &
# a ip packet with 00 macaddress means it was a redirect packet which
# send out by other host, deliver this packet to LR to help redirect
match.eth_dst("00:00:00:00:00:00", Match2) &
match.reg_flag(FLAG_REDIRECT, Match3) &
(Match == Match1 + Match2 + Match3) &
# TABLE_LRP_INGRESS_FIRST table is a tracing-point
# as well and dec_ttl, skip that table
action.resubmit_table(TABLE_LRP_INGRESS_IP_ROUTE, Action)
)
# it is a regular packet, foward to lsp egress table immediately
pipeline_forward(Priority, Match, Action) <= (
(Priority == 0) &
match.match_none(Match) &
action.resubmit_table(TABLE_LSP_EGRESS_FIRST, Action)
)
| 5,123 | 2,203 |
"""
75. Sort Colors
https://leetcode.com/problems/sort-colors/
Time complexity: O()
Space complexity: O()
"""
from typing import List
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
p0 = curr = 0
p2 = len(nums) - 1
while p2 >= curr:
if nums[curr] == 0:
nums[p0], nums[curr] = nums[curr], nums[p0]
p0 += 1
curr += 1
elif nums[curr] == 1:
curr += 1
else:
nums[p2], nums[curr] = nums[curr], nums[p2]
p2 -= 1
# nums.sort()
ans = [
[2,0,2,1,1,0] # [0,0,1,1,2,2]
]
for trails in ans:
print(Solution().maxProduct(trails))
| 850 | 322 |
# Generated by Django 2.1.5 on 2019-02-16 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("people", "0010_contact_default_contact"),
]
operations = [
migrations.AlterField(
model_name="contact",
name="default_contact",
field=models.BooleanField(
blank=True, default=False, null=True, unique=True
),
),
]
| 469 | 147 |
# Copyright 2020-present, Netherlands Institute for Sound and Vision (Nanne van Noord)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import json
import sys
from abc import ABC, abstractmethod
from dane.errors import APIRegistrationError, MissingEndpointError
from requests.utils import requote_uri
class Document():
"""This is a class representation of a document in DANE, it holds both data
and some logic.
:param target: Dict containing `id`, `url`, and `type` keys to described
the target document.
:type target: dict
:param creator: Dict containing `id`, and `type` keys to describe the
document owner/creator.
:type creator: dict
:param api: Reference to a class:`base_classes.base_handler` which is
used to communicate with the server.
:type api: :class:`base_classes.base_handler`, optional
:param _id: ID of the document, assigned by DANE-server
:type _id: int, optional
:param created_at: Creation date
:param updated_at: Last modified date
"""
VALID_TYPES = ["Dataset", "Image", "Video", "Sound", "Text"]
VALID_AGENTS = ["Organization", "Human", "Software"]
def __init__(self, target, creator, api=None, _id=None,
created_at=None, updated_at=None):
if not {"id", "url", "type"} <= target.keys() and len(target['id']) > 2:
raise KeyError("Target object must contains at least the `id`," + \
"url, and type properties")
if target['type'] not in self.VALID_TYPES:
raise ValueError("Invalid target type. Valid types are: {}".format(
", ".join(self.VALID_TYPES)))
self.target = target
self.target['url'] = requote_uri(str(self.target['url']).strip())
if not {"id", "type"} <= creator.keys():
raise KeyError("Creator object must contains at least the `id` " + \
"and type properties")
if creator['type'] not in self.VALID_AGENTS:
raise ValueError("Invalid creator type. Valid types are: {}".format(
", ".join(self.VALID_AGENTS)))
self.creator = creator
self.created_at = created_at
self.updated_at = updated_at
self.api = api
self._id = _id
def __str__(self):
return self.to_json()
def to_json(self, indent=None):
"""Returns this document serialised as JSON, excluding the API reference.
:return: JSON string of the document
:rtype: str
"""
out = {}
for kw in vars(self):
if kw == 'api':
continue
elif kw == '_id' and self._id is None:
continue
else:
out[kw] = getattr(self, kw)
return json.dumps(out, indent=indent)
@staticmethod
def from_json(json_str):
"""Constructs a :class:`dane.Document` instance from a JSON string
:param json_str: Serialised :class:`dane.Document`
:type json_str: str or dict
:return: JSON string of the document
:rtype: :class:`dane.Document`
"""
if isinstance(json_str, str):
json_str = json.loads(json_str)
return Document(**json_str)
def set_api(self, api):
"""Set the API for the document
:param api: Reference to a :class:`base_classes.base_handler` which is
used to communicate with the database, and queueing system.
:type api: :class:`base_classes.base_handler`, optional
:return: self
"""
self.api = api
return self
def register(self):
"""Register this document in DANE, this will assign an _id to the
document. Requires an API to be set.
:return: self
"""
if self._id is not None:
raise APIRegistrationError('Document already registered')
elif self.api is None:
raise MissingEndpointError('No endpoint found to'\
'register document')
self._id = self.api.registerDocument(document=self)
return self
def delete(self):
"""Delete this document. Requires an API to be set.
"""
if self.api is None:
raise MissingEndpointError('No API found')
return self.api.deleteDocument(document=self)
def getAssignedTasks(self, task_key = None):
"""Retrieve tasks assigned to this document. Accepts an optional
task_key to filter for a specific type of tasks. Requires an
API to be set.
:param task_key: Key of task type to filter for
:type task_key: string, optional
:return: list of dicts with task keys and ids."""
if self._id is None:
raise APIRegistrationError('Document needs to be registered')
elif self.api is None:
raise MissingEndpointError('No endpoint found to'\
'query tasks')
return self.api.getAssignedTasks(self._id, task_key)
| 5,590 | 1,553 |
import numpy as np
class Variable:
def __init__(self, data: np.ndarray) -> None:
self.data = data
if __name__ == '__main__':
x = Variable(np.array(1.0))
print(x.data)
| 189 | 70 |
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
import torch, torchvision
from torchvision import transforms
from torch.nn import functional as F
from torchvision.models.utils import load_state_dict_from_url
SEED = 123
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
def get_model(task = 'any_ptergium'):
task_types = ['any_pterygium', 'referable_pterygium']
assert task in task_types, f"Pick from {task_types}"
state_dict = load_state_dict_from_url(f'https://github.com/SERI-EPI-DS/pterygium_detection/releases/download/v1.0/{task}.pth')
# Binary pterygium
model=torchvision.models.vgg16_bn(num_classes = 2)
model.load_state_dict(state_dict)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device);
model.eval();
return model, device
def get_data_loader(root_dir, num_workers, batch_size):
image_transformations = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
def test_valid_file(path):
try:
_ = Image.open(path)
except:
return False
return True
asp_dataset = torchvision.datasets.ImageFolder(root_dir,
is_valid_file = test_valid_file,
transform= image_transformations)
data_loader = torch.utils.data.DataLoader(asp_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
return data_loader
def get_predictions(model, data_loader, device):
predictions = []
with torch.no_grad():
for data in tqdm(data_loader):
inputs= data[0].to(device)
preds = model(inputs)
predictions.append(F.softmax(preds.detach(), dim=1).cpu().numpy())
predictions=np.concatenate(predictions)
return predictions
def main(args):
model, device = get_model(args.task_type)
dataloader = get_data_loader(args.folder_path, args.workers, args.batch_size)
predictions = get_predictions(model, dataloader, device)
files = [i[0].replace(args.folder_path, '') for i in dataloader.dataset.imgs]
df = pd.DataFrame({'files':files, 'prediction_probability': predictions[:,1]})
df.to_csv(args.df_save_path, index=False)
return
if __name__ == '__main__':
args = argparse.ArgumentParser(description='Store model predictions as a csv')
args.add_argument('task_type',
default='any_pterygium',
const='any_pterygium',
nargs='?',
choices=['any_pterygium', 'referable_pterygium'],
help='which model to load: (default: %(default)s)')
args.add_argument('folder_path', type=str,
help='path to folder with images')
args.add_argument('-w', '--workers', default=6, type=int,
help='number of cores to use in parallel')
args.add_argument('-b', '--batch_size', default=64, type=int,
help='batch size')
args.add_argument('-s', '--df_save_path', default='./predictions.csv', type=str,
help='path to save predictions')
main(args.parse_args())
| 3,551 | 1,131 |
import pytest
from xndtools.kernel_generator.utils import NormalizedTypeMap
from xnd import xnd
import test_array as m
long_t = NormalizedTypeMap()('long')
def assert_equal(x, y):
assert x == y and x.dtype == y.dtype
def test_array_range_input():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
r = m.test_array_range_input(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(a,
xnd([0, 1, 2], dtype=long_t)) # because `a` matches exactly
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
r = m.test_array_range_input(x)
assert_equal(r, xnd(12, type=long_t))
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
assert_equal(a, xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t))
# Strided kernel
# TODO
def test_array_range_inplace():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
r = m.test_array_range_inplace(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(a, xnd([0, 1, 2], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
r = m.test_array_range_inplace(x)
assert_equal(r, xnd(12, type=long_t))
assert_equal(x, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([1, 0, 3, 1, 5, 2, 7], dtype=long_t))
# Strided kernel
# TODO
def test_array_range_inout():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
r = m.test_array_range_inout(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(a, xnd([0, 1, 2], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
with pytest.raises(ValueError, match=r'.* must be C-contiguous .*'):
r = m.test_array_range_inout(x)
# Strided kernel
# TODO
def test_array_range_input_output():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
o, r = m.test_array_range_input_output(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([1, 2, 3], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
o, r = m.test_array_range_input_output(x)
assert_equal(r, xnd(12, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
assert_equal(a, xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t))
# Strided kernel
# TODO
def test_array_range_inplace_output():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
o, r = m.test_array_range_inplace_output(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([0, 1, 2], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
o, r = m.test_array_range_inplace_output(x)
assert_equal(r, xnd(12, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(x, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([1, 0, 3, 1, 5, 2, 7], dtype=long_t))
# Strided kernel
# TODO
def test_array_range_inout_output():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
o, r = m.test_array_range_inout_output(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([0, 1, 2], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
with pytest.raises(ValueError, match=r'.* must be C-contiguous .*'):
o, r = m.test_array_range_inout_output(x)
# Strided kernel
# TODO
def test_array_range_output():
# using C, F, or Xnd kernel if defined
o, r = m.test_array_range_output(xnd(3, type=long_t))
assert_equal(r, xnd(0, type=long_t)) # could be random
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
def test_array_range_hide():
# using C, F, or Xnd kernel if defined
r = m.test_array_range_hide(xnd(3, type=long_t))
assert r.type == xnd(0, type=long_t).type
# r value is random
| 4,417 | 2,051 |
from . import CCProtocol, SABProtocol, SCProtocol, WSR98DProtocol
__all__ = ["CCProtocol", "SABProtocol", "SCProtocol", "WSR98DProtocol"] | 137 | 46 |
#!/usr/bin/env python
from .zplconvert import ZPLConvert
from .zplparser import zpl_parse
from .zpltools import Printer, PrinterError
__version__ = '0.0.4'
| 158 | 57 |
from __future__ import absolute_import
from . import db_util
from ..proto import krama_pb2
from ..conf import common
import MySQLdb
import os
from google.protobuf import text_format
import simplejson
from ..protobufjson.protobuf_json import *
# CREATE TABLE executions_tab
# (
# exec_id INT NOT NULL AUTO_INCREMENT,
# job_name VARCHAR(512),
# project_name VARCHAR(512),
# depends_on VARCHAR(2048),
# status INT NOT NULL,
# start_time BIGINT DEFAULT -1,
# end_time BIGINT DEFAULT -1,
# retry INT,
# pid INT,
# completion_percentage FLOAT DEFAULT -1.0,
# PRIMARY KEY(exec _id,job_name)
# )
{
"exec_id":{"value":"","data_type":"float"}
}
class Executions_tab:
def __init__(self):
self.db=db_util.DbUtil()
def insert_job(self,job_proto,project_name,exec_id,project_path):
arg_dict=self.proto_to_arg_dict(job_proto=job_proto,
project_name=project_name,exec_id=exec_id
,project_path=project_path)
self.insert_dict(arg_dict)
def update_job(self,job_proto,project_name,exec_id):
arg_dict=self.proto_to_arg_dict(job_proto=job_proto,
project_name=project_name,exec_id=exec_id)
self.update_dict(arg_dict)
def update_row(self,row):
self.update_proto(row)
arg_dict=self.row_to_arg_dict(db_job=row)
self.update_dict(arg_dict=arg_dict)
def update_proto(self,row):
"""
Updates the execution prototxt
Args:
row:
Returns:
"""
current_exec_path=row["project_path"]+'/.executions/exec_'+str(row["exec_id"])
current_exec_proto_path=current_exec_path+'/main.prototxt'
current_exec_json_path=current_exec_path+'/main.json'
if os.path.exists(current_exec_path) and os.path.exists(current_exec_proto_path):
schedule_graph=krama_pb2.ScheduleGraph()
text_format.Merge(text=open(current_exec_proto_path).read(),message=schedule_graph)
for idx,schedule_job in enumerate(schedule_graph.schedule_job):
if str(schedule_job.name) == row["job_name"]:
schedule_graph.schedule_job[idx].status=common.EXECUTION_STATUS_DICT[int(str(row["status"]))]
open(current_exec_proto_path,'w').write(str(schedule_graph))
open(current_exec_json_path, 'w').write(simplejson.dumps(pb2json(schedule_graph)))
#@staticmethod
def row_to_arg_dict(self,db_job):
arg_dict={}
#1 exec_id
arg_dict['exec_id']=str(db_job['exec_id'])
#2 project_name
arg_dict['project_name']="'"+str(db_job['project_name'])+"'"
#3 project_name
arg_dict['project_path']="'"+str(db_job['project_path'])+"'"
#4 job_name
arg_dict['job_name']="'"+str(db_job['job_name'])+"'"
#5 depends_on
arg_dict['depends_on']="'"+str(db_job['depends_on'])+"'"
#6 status
arg_dict['status']=str(db_job['status'])
#7 pid
arg_dict['pid']=str(db_job['pid'])
#8 start_time
arg_dict['start_time']=str(db_job['start_time'])
#9 end_time
arg_dict['end_time']=str(db_job['end_time'])
#10 retry
arg_dict['retry']=str(db_job['retry'])
#11 completion_percentage
arg_dict['completion_percentage']=str(db_job['completion_percentage'])
return arg_dict
#@staticmethod
def proto_to_arg_dict(self,job_proto,project_name,exec_id,project_path):
arg_dict={}
#1 exec_id
arg_dict['exec_id']=str(exec_id)
#2 project_name
arg_dict['project_name']="'"+str(project_name)+"'"
#3 project_path
arg_dict['project_path']="'"+str(project_path)+"'"
#4 job_name
if job_proto.HasField('name') and len(str(job_proto.name))>0:
arg_dict['job_name']="'"+str(job_proto.name)+"'"
#5 depends_on
arg_dict['depends_on']="'"+str(','.join(job_proto.depends_on))+"'"
#6 status
if job_proto.HasField('status') and len(str(job_proto.status))>0:
arg_dict['status']=str(job_proto.depends_on)
else:
arg_dict['status']=str(common.EXECUTION_STATUS_UNKNOWN)
#7 pid
if job_proto.HasField('pid') and len(str(job_proto.pid))>0:
arg_dict['pid']=str(job_proto.pid)
#8 start_time
if job_proto.HasField('start_time') and len(str(job_proto.start_time))>0:
arg_dict['start_time']=str(job_proto.start_time)
#9 end_time
if job_proto.HasField('end_time') and len(str(job_proto.end_time))>0:
arg_dict['end_time']=str(job_proto.end_time)
#10 retry
if job_proto.HasField('retry') and len(str(job_proto.retry))>0:
arg_dict['retry']=str(job_proto.retry)
else:
arg_dict['retry']=str(common.DEFAULT_EXECUTION_RETRY)
#11 completion_percentage
if job_proto.HasField('completion_percentage') and len(str(job_proto.completion_percentage))>0:
arg_dict['completion_percentage']=str(job_proto.completion_percentage)
else:
arg_dict['completion_percentage']=str(common.DEFAULT_EXECUTION_COMPLETION_PERC)
return arg_dict
def insert_dict(self,arg_dict):
statement="INSERT INTO executions_tab ("+str(",".join(arg_dict.keys()))+\
") VALUES ("+str(','.join(arg_dict.values()))+");"
self.db.execute(statement=statement)
def update_dict(self,arg_dict):
statement=""
statement="UPDATE executions_tab SET "+str(", ".join([k+'='+v for (k,v) in arg_dict.items() ]))+\
" WHERE "+str(' and '.join([k+'='+v for (k,v) in arg_dict.items()
if k in common.EXECUTIONS_TAB_PRIMARY_KEYS]))+";"
self.db.execute(statement=statement)
open(common.EXECUTION_UPDATE_TRIGGER_PATH,
'w').write(arg_dict["project_path"].replace("'","")+
'/.executions/exec_'+arg_dict['exec_id']+'/main.json')
def get_all_jobs_executions_tab(self,project_name,exec_id):
statement="SELECT * FROM executions_tab where project_name='"+str(project_name)\
+"' and exec_id="+str(exec_id)+";"
return self.db.fetch_dict(statement)
def get_all_executions_tab(self):
statement="SELECT * FROM executions_tab";
return self.db.fetch_dict(statement)
def close(self):
self.db.close()
if __name__=="__main__":
e=Executions_tab() | 6,578 | 2,252 |
"""
.. module: hubcommander.bot_components.bot_classes
:platform: Unix
:copyright: (c) 2017 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Mike Grima <mgrima@netflix.com>
"""
class BotPlugin:
def __init__(self):
pass
def setup(self, secrets, **kwargs):
raise NotImplementedError()
class BotCommander(BotPlugin):
def __init__(self):
super().__init__()
self.commands = {}
def setup(self, secrets, **kwargs):
pass
class BotAuthPlugin(BotPlugin):
def __init__(self):
super().__init__()
def setup(self, secrets, **kwargs):
pass
def authenticate(self, *args, **kwargs):
raise NotImplementedError()
| 764 | 252 |
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
HTTP_CHOICE = (
('HTTP', 'HTTP'),
('HTTPS', 'HTTPS')
)
REQUEST_TYPE_CHOICE = (
('POST', 'POST'),
('GET', 'GET'),
('PUT', 'PUT'),
('DELETE', 'DELETE')
)
REQUEST_PARAMETER_TYPE_CHOICE = (
('form-data', '表单(form-data)'),
('raw', '源数据(raw)'),
('Restful', 'Restful')
)
PARAMETER_TYPE_CHOICE = (
('text', 'text'),
('file', 'file')
)
HTTP_CODE_CHOICE = (
('200', '200'),
('404', '404'),
('400', '400'),
('502', '502'),
('500', '500'),
('302', '302'),
)
EXAMINE_TYPE_CHOICE = (
('no_check', '不校验'),
('only_check_status', '校验http状态'),
('json', 'JSON校验'),
('entirely_check', '完全校验'),
('Regular_check', '正则校验'),
)
UNIT_CHOICE = (
('m', '分'),
('h', '时'),
('d', '天'),
('w', '周'),
)
RESULT_CHOICE = (
('PASS', '成功'),
('FAIL', '失败'),
)
TASK_CHOICE = (
('circulation', '循环'),
('timing', '定时'),
)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# ==================扩展用户====================================
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, verbose_name='用户', related_name='user')
phone = models.CharField(max_length=11, default='', blank=True, verbose_name='手机号')
def __unicode__(self):
return self.user.username
def __str__(self):
return self.phone
class Project(models.Model):
"""
项目表
"""
ProjectType = (
('Web', 'Web'),
('App', 'App')
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50, verbose_name='项目名称')
version = models.CharField(max_length=50, verbose_name='版本')
type = models.CharField(max_length=50, verbose_name='类型', choices=ProjectType)
description = models.CharField(max_length=1024, blank=True, null=True, verbose_name='描述')
status = models.BooleanField(default=True, verbose_name='状态')
LastUpdateTime = models.DateTimeField(auto_now=True, verbose_name='最近修改时间')
createTime = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, max_length=1024, verbose_name='创建人')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = '项目'
verbose_name_plural = '项目'
class ProjectDynamic(models.Model):
"""
项目动态
"""
id = models.AutoField(primary_key=True)
project = models.ForeignKey(Project, related_name='dynamic_project', on_delete=models.CASCADE, verbose_name='所属项目')
time = models.DateTimeField(max_length=128, verbose_name='操作时间')
type = models.CharField(max_length=50, verbose_name='操作类型')
operationObject = models.CharField(max_length=50, verbose_name='操作对象')
user = models.ForeignKey(User, blank=True, null=True, related_name='userName',
on_delete=models.SET_NULL, verbose_name='操作人')
description = models.CharField(max_length=1024, blank=True, null=True, verbose_name='描述')
def __unicode__(self):
return self.type
class Meta:
verbose_name = '项目动态'
verbose_name_plural = '项目动态'
class ProjectMember(models.Model):
"""
项目成员
"""
CHOICES = (
('超级管理员', '超级管理员'),
('开发人员', '开发人员'),
('测试人员', '测试人员')
)
id = models.AutoField(primary_key=True)
permissionType = models.CharField(max_length=50, verbose_name='权限角色', choices=CHOICES)
project = models.ForeignKey(Project, related_name='member_project', on_delete=models.CASCADE, verbose_name='所属项目')
user = models.ForeignKey(User, related_name='member_user', on_delete=models.CASCADE, verbose_name='用户')
def __unicode__(self):
return self.permissionType
def __str__(self):
return self.permissionType
class Meta:
verbose_name = '项目成员'
verbose_name_plural = '项目成员'
class GlobalHost(models.Model):
"""
host域名
"""
id = models.AutoField(primary_key=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE, verbose_name='项目')
name = models.CharField(max_length=50, verbose_name='名称')
host = models.CharField(max_length=1024, verbose_name='Host地址')
description = models.CharField(max_length=1024, blank=True, null=True, verbose_name='描述')
status = models.BooleanField(default=True, verbose_name='状态')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = 'HOST'
verbose_name_plural = 'HOST管理'
class CustomMethod(models.Model):
"""
自定义方法
"""
id = models.AutoField(primary_key=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE, verbose_name='项目')
name = models.CharField(max_length=50, verbose_name='方法名')
description = models.CharField(max_length=1024, blank=True, null=True, verbose_name='描述')
type = models.CharField(max_length=50, verbose_name='类型')
dataCode = models.TextField(verbose_name='代码')
status = models.BooleanField(default=True, verbose_name='状态')
def __unicode__(self):
return self.name
class Meta:
verbose_name = '自定义方法'
verbose_name_plural = '自定义方法'
class ApiGroupLevelFirst(models.Model):
"""
接口一级分组
"""
id = models.AutoField(primary_key=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE, verbose_name='项目')
name = models.CharField(max_length=50, verbose_name='接口一级分组名称')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = '接口分组'
verbose_name_plural = '接口分组'
class ApiInfo(models.Model):
"""
接口信息
"""
id = models.AutoField(primary_key=True)
project = models.ForeignKey(Project, related_name='api_project', on_delete=models.CASCADE, verbose_name='所属项目')
apiGroupLevelFirst = models.ForeignKey(ApiGroupLevelFirst, blank=True, null=True,
related_name='First',
on_delete=models.SET_NULL, verbose_name='所属一级分组')
name = models.CharField(max_length=50, verbose_name='接口名称')
httpType = models.CharField(max_length=50, default='HTTP', verbose_name='http/https', choices=HTTP_CHOICE)
requestType = models.CharField(max_length=50, verbose_name='请求方式', choices=REQUEST_TYPE_CHOICE)
apiAddress = models.CharField(max_length=1024, verbose_name='接口地址')
requestParameterType = models.CharField(max_length=50, verbose_name='请求参数格式', choices=REQUEST_PARAMETER_TYPE_CHOICE)
status = models.BooleanField(default=True, verbose_name='状态')
mockStatus = models.BooleanField(default=False, verbose_name="mock状态")
mockCode = models.CharField(max_length=50, blank=True, null=True, verbose_name='HTTP状态', choices=HTTP_CODE_CHOICE)
data = models.TextField(blank=True, null=True, verbose_name='mock内容')
lastUpdateTime = models.DateTimeField(auto_now=True, verbose_name='最近更新')
userUpdate = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, max_length=50, verbose_name='更新人',
related_name='ApiUpdateUser')
description = models.CharField(max_length=1024, blank=True, null=True, verbose_name='描述')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = '接口'
verbose_name_plural = '接口管理'
class ApiHead(models.Model):
id = models.AutoField(primary_key=True)
api = models.ForeignKey(ApiInfo, on_delete=models.CASCADE, verbose_name="所属接口", related_name='headers')
name = models.CharField(max_length=1024, verbose_name="标签")
value = models.CharField(max_length=1024, blank=True, null=True, verbose_name='内容')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = '请求头'
verbose_name_plural = '请求头管理'
class ApiParameter(models.Model):
id = models.AutoField(primary_key=True)
api = models.ForeignKey(ApiInfo, on_delete=models.CASCADE, verbose_name="所属接口", related_name='requestParameter')
name = models.CharField(max_length=1024, verbose_name="参数名")
_type = models.CharField(default="String", max_length=1024, verbose_name='参数类型', choices=(('Int', 'Int'), ('String', 'String')))
value = models.CharField(max_length=1024, blank=True, null=True, verbose_name='参数值')
required = models.BooleanField(default=True, verbose_name="是否必填")
restrict = models.CharField(max_length=1024, blank=True, null=True, verbose_name="输入限制")
description = models.CharField(max_length=1024, blank=True, null=True, verbose_name="描述")
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = '请求参数'
verbose_name_plural = '请求参数管理'
class ApiParameterRaw(models.Model):
id = models.AutoField(primary_key=True)
api = models.OneToOneField(ApiInfo, on_delete=models.CASCADE, verbose_name="所属接口", related_name='requestParameterRaw')
data = models.TextField(blank=True, null=True, verbose_name='内容')
class Meta:
verbose_name = '请求参数Raw'
class ApiResponse(models.Model):
id = models.AutoField(primary_key=True)
api = models.ForeignKey(ApiInfo, on_delete=models.CASCADE, verbose_name="所属接口", related_name='response')
name = models.CharField(max_length=1024, verbose_name="参数名")
_type = models.CharField(default="String", max_length=1024, verbose_name='参数类型', choices=(('Int', 'Int'), ('String', 'String')))
value = models.CharField(max_length=1024, blank=True, null=True, verbose_name='参数值')
required = models.BooleanField(default=True, verbose_name="是否必含")
description = models.CharField(max_length=1024, blank=True, null=True, verbose_name="描述")
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = '返回参数'
verbose_name_plural = '返回参数管理'
class APIRequestHistory(models.Model):
"""
接口请求历史
"""
id = models.AutoField(primary_key=True)
api = models.ForeignKey(ApiInfo, on_delete=models.CASCADE, verbose_name='接口')
requestTime = models.DateTimeField(auto_now_add=True, verbose_name='请求时间')
requestType = models.CharField(max_length=50, verbose_name='请求方法')
requestAddress = models.CharField(max_length=1024, verbose_name='请求地址')
httpCode = models.CharField(max_length=50, verbose_name='HTTP状态')
def __unicode__(self):
return self.requestAddress
class Meta:
verbose_name = '接口请求历史'
verbose_name_plural = '接口请求历史'
class ApiOperationHistory(models.Model):
"""
API操作历史
"""
id = models.AutoField(primary_key=True)
api = models.ForeignKey(ApiInfo, on_delete=models.CASCADE, verbose_name='接口')
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, max_length=50, verbose_name='用户姓名')
time = models.DateTimeField(auto_now_add=True, verbose_name='操作时间')
description = models.CharField(max_length=1024, blank=True, null=True, verbose_name='操作内容')
def __unicode__(self):
return self.description
class Meta:
verbose_name = '接口操作历史'
verbose_name_plural = '接口操作历史'
class AutomationGroupLevelFirst(models.Model):
"""
自动化用例一级分组
"""
id = models.AutoField(primary_key=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE, verbose_name='项目')
name = models.CharField(max_length=50, verbose_name='用例一级分组')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = '用例分组'
verbose_name_plural = '用例分组管理'
class AutomationTestCase(models.Model):
"""
自动化测试用例
"""
id = models.AutoField(primary_key=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE, verbose_name='所属项目')
automationGroupLevelFirst = models.ForeignKey(AutomationGroupLevelFirst, blank=True, null=True,
on_delete=models.SET_NULL, verbose_name='所属用例一级分组', related_name="automationGroup")
# automationGroupLevelSecond = models.ForeignKey(AutomationGroupLevelSecond, blank=True, null=True,
# on_delete=models.SET_NULL, verbose_name='所属用例二级分组')
caseName = models.CharField(max_length=50, verbose_name='用例名称')
user = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, verbose_name="创建人",
related_name="createUser")
description = models.CharField(max_length=1024, blank=True, null=True, verbose_name='描述')
updateTime = models.DateTimeField(auto_now=True, verbose_name='更新时间')
def __unicode__(self):
return self.caseName
def __str__(self):
return self.caseName
class Meta:
verbose_name = '自动化测试用例'
verbose_name_plural = '自动化测试用例'
class AutomationCaseApi(models.Model):
"""
用例执行接口
"""
id = models.AutoField(primary_key=True)
automationTestCase = models.ForeignKey(AutomationTestCase, on_delete=models.CASCADE,
verbose_name='用例', related_name="api")
name = models.CharField(max_length=50, verbose_name='接口名称')
httpType = models.CharField(max_length=50, default='HTTP', verbose_name='HTTP/HTTPS', choices=HTTP_CHOICE)
requestType = models.CharField(max_length=50, verbose_name='请求方式', choices=REQUEST_TYPE_CHOICE)
apiAddress = models.CharField(max_length=1024, verbose_name='接口地址')
requestParameterType = models.CharField(max_length=50, verbose_name='参数请求格式', choices=REQUEST_PARAMETER_TYPE_CHOICE)
formatRaw = models.BooleanField(default=False, verbose_name="是否转换成源数据")
examineType = models.CharField(default='no_check', max_length=50, verbose_name='校验方式', choices=EXAMINE_TYPE_CHOICE)
httpCode = models.CharField(max_length=50, blank=True, null=True, verbose_name='HTTP状态', choices=HTTP_CODE_CHOICE)
responseData = models.TextField(blank=True, null=True, verbose_name='返回内容')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = '用例接口'
verbose_name_plural = '用例接口管理'
class AutomationHead(models.Model):
"""
请求头
"""
id = models.AutoField(primary_key=True)
automationCaseApi = models.ForeignKey(AutomationCaseApi, related_name='header',
on_delete=models.CASCADE, verbose_name='接口')
name = models.CharField(max_length=1024, verbose_name='参数名')
value = models.CharField(max_length=1024, verbose_name='内容')
interrelate = models.BooleanField(default=False, verbose_name='是否关联')
def __unicode__(self):
return self.value
class Meta:
verbose_name = '请求头'
verbose_name_plural = '请求头管理'
class AutomationParameter(models.Model):
"""
请求的参数
"""
id = models.AutoField(primary_key=True)
automationCaseApi = models.ForeignKey(AutomationCaseApi, related_name='parameterList',
on_delete=models.CASCADE, verbose_name='接口')
name = models.CharField(max_length=1024, verbose_name='参数名')
value = models.CharField(max_length=1024, verbose_name='内容', blank=True, null=True)
interrelate = models.BooleanField(default=False, verbose_name='是否关联')
def __unicode__(self):
return self.value
class Meta:
verbose_name = '接口参数'
verbose_name_plural = '接口参数管理'
class AutomationParameterRaw(models.Model):
"""
请求的源数据参数
"""
id = models.AutoField(primary_key=True)
automationCaseApi = models.OneToOneField(AutomationCaseApi, related_name='parameterRaw',
on_delete=models.CASCADE, verbose_name='接口')
data = models.TextField(verbose_name='源数据请求参数', blank=True, null=True)
class Meta:
verbose_name = '源数据参数'
verbose_name_plural = '源数据参数管理'
class AutomationResponseJson(models.Model):
"""
返回JSON参数
"""
id = models.AutoField(primary_key=True)
automationCaseApi = models.ForeignKey(AutomationCaseApi, related_name='response',
on_delete=models.CASCADE, verbose_name='接口')
name = models.CharField(max_length=1024, verbose_name='JSON参数', blank=True, null=True)
tier = models.CharField(max_length=1024, verbose_name='层级关系', blank=True, null=True)
type = models.CharField(max_length=1024, verbose_name="关联类型", default="json", choices=(('json', 'json'),('Regular', 'Regular')))
def __str__(self):
return self.name
class Meta:
verbose_name = '结果JSON参数'
verbose_name_plural = '结果JSON参数管理'
class AutomationTestResult(models.Model):
"""
手动执行结果
"""
id = models.AutoField(primary_key=True)
automationCaseApi = models.OneToOneField(AutomationCaseApi, on_delete=models.CASCADE, verbose_name='接口'
, related_name="test_result")
url = models.CharField(max_length=1024, verbose_name='请求地址')
requestType = models.CharField(max_length=1024, verbose_name='请求方式', choices=REQUEST_TYPE_CHOICE)
host = models.CharField(max_length=1024, verbose_name='测试地址', null=True, blank=True)
header = models.CharField(max_length=1024, blank=True, null=True, verbose_name='请求头')
parameter = models.TextField(blank=True, null=True, verbose_name='请求参数')
statusCode = models.CharField(blank=True, null=True, max_length=1024, verbose_name='期望HTTP状态', choices=HTTP_CODE_CHOICE)
examineType = models.CharField(max_length=1024, verbose_name='匹配规则')
data = models.TextField(blank=True, null=True, verbose_name='规则内容')
result = models.CharField(max_length=50, verbose_name='测试结果', choices=RESULT_CHOICE)
httpStatus = models.CharField(max_length=50, blank=True, null=True, verbose_name='http状态', choices=HTTP_CODE_CHOICE)
responseData = models.TextField(blank=True, null=True, verbose_name='实际返回内容')
testTime = models.DateTimeField(auto_now_add=True, verbose_name='测试时间')
def __unicode__(self):
return self.httpStatus
class Meta:
verbose_name = '手动测试结果'
verbose_name_plural = '手动测试结果管理'
class AutomationTestTask(models.Model):
"""
用例定时任务
"""
id = models.AutoField(primary_key=True)
project = models.OneToOneField(Project, on_delete=models.CASCADE, verbose_name='项目')
Host = models.ForeignKey(GlobalHost, on_delete=models.CASCADE, verbose_name='HOST')
name = models.CharField(max_length=50, verbose_name='任务名称')
type = models.CharField(max_length=50, verbose_name='类型', choices=TASK_CHOICE)
frequency = models.IntegerField(blank=True, null=True, verbose_name='间隔')
unit = models.CharField(max_length=50, blank=True, null=True, verbose_name='单位', choices=UNIT_CHOICE)
startTime = models.DateTimeField(max_length=50, verbose_name='开始时间')
endTime = models.DateTimeField(max_length=50, verbose_name='结束时间')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
verbose_name = '用例定时任务'
verbose_name_plural = '用例定时任务管理'
class AutomationTaskRunTime(models.Model):
"""
用例执行开始和结束时间
"""
id = models.AutoField(primary_key=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE, verbose_name='项目')
startTime = models.CharField(max_length=50, verbose_name='开始时间')
host = models.CharField(max_length=1024, null=True, blank=True, verbose_name='测试地址')
elapsedTime = models.CharField(max_length=50, verbose_name='结束时间')
class Meta:
verbose_name = '用例任务执行时间'
verbose_name_plural = '用例任务执行时间'
class AutomationCaseTestResult(models.Model):
"""
任务执行结果
"""
id = models.AutoField(primary_key=True)
automationCaseApi = models.ForeignKey(AutomationCaseApi, on_delete=models.CASCADE, verbose_name='接口'
, related_name="auto_result")
header = models.CharField(max_length=1024, blank=True, null=True, verbose_name='请求头')
parameter = models.TextField(blank=True, null=True, verbose_name='请求参数')
result = models.CharField(max_length=50, verbose_name='测试结果', choices=RESULT_CHOICE)
httpStatus = models.CharField(max_length=50, blank=True, null=True, verbose_name='http状态', choices=HTTP_CODE_CHOICE)
responseHeader = models.TextField(blank=True, null=True, verbose_name='返回头')
responseData = models.TextField(blank=True, null=True, verbose_name='实际返回内容')
testTime = models.CharField(max_length=128, null=True, blank=True, verbose_name='测试时间')
def __unicode__(self):
return self.httpStatus
class Meta:
verbose_name = '自动测试结果'
verbose_name_plural = '自动测试结果管理'
class AutomationReportSendConfig(models.Model):
"""
报告发送人配置
"""
id = models.AutoField(primary_key=True)
project = models.OneToOneField(Project, on_delete=models.CASCADE, verbose_name="项目")
reportFrom = models.EmailField(max_length=1024, blank=True, null=True, verbose_name="发送人邮箱")
mailUser = models.CharField(max_length=1024, blank=True, null=True, verbose_name="用户名")
mailPass = models.CharField(max_length=1024, blank=True, null=True, verbose_name="口令")
mailSmtp = models.CharField(max_length=1024, blank=True, null=True, verbose_name="邮箱服务器")
def __unicode__(self):
return self.reportFrom
class Meta:
verbose_name = "邮件发送配置"
verbose_name_plural = "邮件发送配置"
class VisitorsRecord(models.Model):
"""
访客记录
"""
id = models.AutoField(primary_key=True)
formattedAddress = models.CharField(max_length=1024, blank=True, null=True, verbose_name="访客地址")
country = models.CharField(max_length=50, blank=True, null=True, verbose_name="国家")
province = models.CharField(max_length=50, blank=True, null=True, verbose_name="省份")
city = models.CharField(max_length=50, blank=True, null=True, verbose_name="城市")
district = models.CharField(max_length=50, blank=True, null=True, verbose_name="县级")
township = models.CharField(max_length=50, blank=True, null=True, verbose_name="镇")
street = models.CharField(max_length=50, blank=True, null=True, verbose_name="街道")
number = models.CharField(max_length=50, blank=True, null=True, verbose_name="门牌号")
success = models.CharField(max_length=50, blank=True, null=True, verbose_name="成功")
reason = models.CharField(max_length=1024, blank=True, null=True, verbose_name="原因")
callTime = models.DateTimeField(auto_now_add=True, verbose_name="访问时间")
def __unicode__(self):
return self.formattedAddress
class Meta:
verbose_name = "访客"
verbose_name_plural = "访客查看"
| 23,320 | 8,287 |
from gevent import monkey
monkey.patch_all()
from gevent.pywsgi import WSGIServer
from werkzeug.serving import run_with_reloader
import os
from app import app
SERVER_HOST = os.getenv('SERVER_HOST', '0.0.0.0')
SERVER_PORT = int(os.getenv('SERVER_PORT', '80'))
SERVER_RELOAD = os.getenv('SERVER_RELOAD', 'no') == 'yes'
def run_server():
http_server = WSGIServer((SERVER_HOST, SERVER_PORT), app)
http_server.serve_forever()
if SERVER_RELOAD:
run_with_reloader(run_server)()
else:
run_server()
| 511 | 192 |
import os
import subprocess
import webbrowser
import clipboard
import requests
from bs4 import BeautifulSoup
from pyfiglet import Figlet
from Database.DatabaseManager import DatabaseManager as Animedb
class Downloader(Animedb):
def __init__(self, delay=60):
self._COLS = os.get_terminal_size().columns
self.showHeader()
super().__init__()
self.__URL = "https://nyaa.iss.one/?f=0&c=0_0&q="
self.__delay = delay
def showHeader(self, font=None):
os.system('clear||cls')
cols = self._COLS
self.drawline(cols)
if font is not None:
self.fancyPrint("Anime Downloader Script", font)
else:
if cols <= 125:
self.fancyPrint("Anime Downloader Script", 'small')
else:
self.fancyPrint("Anime Downloader Script", 'isometric3')
self.drawline(cols)
def downloadFromDB(self, n):
url = self.__URL
row = list(self.animedb['Downloader'][n].values())
ep = self.__incrementEP(row[3])
for j in range(6):
if row[j] == 'N/A':
continue
elif j == 3:
url += ep + "+"
else:
url += row[j] + "+"
self.fancyPrint(f'{row[1]}\nEP-{row[3]} -> {ep}', 'digital')
try:
self.__downloader(url)
self.fancyPrint("COPIED TO CLIPBOARD", 'digital')
self.fancyPrint("UPDATED EPISODE IN DATABASE", 'straight')
self.update(n, "EP", ep)
return self.__delay
except:
self.fancyPrint("NOT YET AVAILABLE", 'short')
return 1
def downloadFromInput(self, name, ep):
url = self.__URL
ep = self.__incrementEP(ep)
url += f"{name} {ep}"
try:
self.__downloader(url)
self.fancyPrint(f"DOWNLOADING EP-{ep}", 'digital')
return self.__delay
except:
self.fancyPrint("NOT YET AVAILABLE", 'short')
return 1
def __downloader(self, url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find_all('td', class_='text-center')
link = results[0].find_all('a')
self.__openClient(link)
clipboard.copy(link[1]["href"])
return link
def __openClient(self, link):
try:
subprocess.run(f'open -a "Free Download Manager" {link[1]["href"]}', shell=True, check=True)
except:
self.fancyPrint("Try Installing Free Download Manager", 'mini')
print("It also Backs Up as a Torrent Client")
download = "https://nyaa.iss.one/" + link[0]["href"]
webbrowser.open(download, new=2)
def __incrementEP(self, ep):
ep = str(int(ep) + 1)
if int(ep) < 10:
ep = "0" + ep
return ep
def drawline(self, cols):
print(end='\nX')
for col in range(cols - 2):
print(end='~')
print('X\n')
def commitToDb(self):
self.normalPrint("Commit?? : ", end='\c')
if input().upper() == 'Y':
self.commit()
self.fancyPrint("COMMITED UPDATES", 'short')
self.drawline(self._COLS)
def fancyPrint(self, text, font='digital'):
try:
subprocess.run(f'figlet -w $(tput cols) -c -f {font} "{text}" | lolcat', shell=True, check=True)
except:
f = Figlet(font=font)
print(f.renderText(text))
| 3,533 | 1,114 |
#!/usr/bin/env python
"""
Make plots of the Student's t-distribution for different degrees of freedom
"""
import matplotlib.pyplot as pl
from scipy.stats import norm
from scipy.stats import t
import numpy as np
mu = 0. # the mean, mu
nus = [1., 2., 5, 10, 100] # standard deviations, sigma
markers = ['b-', 'r-', 'm-', 'c-', 'g-']
x = np.linspace(-6, 6, 1000) # x
# set plot to render labels using latex
pl.rc('text', usetex=True)
pl.rc('font', family='serif')
pl.rc('font', size=14)
fig = pl.figure(figsize=(6,5), dpi=100)
# plot pdfs
for i, nu in enumerate(nus):
pl.plot(x, t.pdf(x, nu), markers[i], label='$\\nu=%d$'%nu)
# plot a Gaussian for comparison
pl.plot(x, norm.pdf(x, mu, 1.), 'k--', label='$N(0,1)$')
ax = pl.gca()
ax.set_xlabel('$t$', fontsize=14)
ax.set_ylabel('$p(t)$', fontsize=14)
ax.legend(loc='best', frameon=False)
fig.subplots_adjust(bottom=0.15)
pl.savefig('../studentst.pdf')
pl.show()
| 924 | 405 |
import argparse
import sys
from html2text import HTML2Text, __version__, config
def main():
baseurl = ""
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
p = argparse.ArgumentParser()
p.add_argument(
"--default-image-alt",
dest="default_image_alt",
default=config.DEFAULT_IMAGE_ALT,
help="The default alt string for images with missing ones",
)
p.add_argument(
"--pad-tables",
dest="pad_tables",
action="store_true",
default=config.PAD_TABLES,
help="pad the cells to equal column width in tables",
)
p.add_argument(
"--no-wrap-links",
dest="wrap_links",
action="store_false",
default=config.WRAP_LINKS,
help="don't wrap links during conversion",
)
p.add_argument(
"--wrap-list-items",
dest="wrap_list_items",
action="store_true",
default=config.WRAP_LIST_ITEMS,
help="wrap list items during conversion",
)
p.add_argument(
"--ignore-emphasis",
dest="ignore_emphasis",
action="store_true",
default=config.IGNORE_EMPHASIS,
help="don't include any formatting for emphasis",
)
p.add_argument(
"--reference-links",
dest="inline_links",
action="store_false",
default=config.INLINE_LINKS,
help="use reference style links instead of inline links",
)
p.add_argument(
"--ignore-links",
dest="ignore_links",
action="store_true",
default=config.IGNORE_ANCHORS,
help="don't include any formatting for links",
)
p.add_argument(
"--protect-links",
dest="protect_links",
action="store_true",
default=config.PROTECT_LINKS,
help="protect links from line breaks surrounding them with angle brackets",
)
p.add_argument(
"--ignore-images",
dest="ignore_images",
action="store_true",
default=config.IGNORE_IMAGES,
help="don't include any formatting for images",
)
p.add_argument(
"--images-as-html",
dest="images_as_html",
action="store_true",
default=config.IMAGES_AS_HTML,
help=(
"Always write image tags as raw html; preserves `height`, `width` and "
"`alt` if possible."
),
)
p.add_argument(
"--images-to-alt",
dest="images_to_alt",
action="store_true",
default=config.IMAGES_TO_ALT,
help="Discard image data, only keep alt text",
)
p.add_argument(
"--images-with-size",
dest="images_with_size",
action="store_true",
default=config.IMAGES_WITH_SIZE,
help=(
"Write image tags with height and width attrs as raw html to retain "
"dimensions"
),
)
p.add_argument(
"-g",
"--google-doc",
action="store_true",
dest="google_doc",
default=False,
help="convert an html-exported Google Document",
)
p.add_argument(
"-d",
"--dash-unordered-list",
action="store_true",
dest="ul_style_dash",
default=False,
help="use a dash rather than a star for unordered list items",
)
p.add_argument(
"-e",
"--asterisk-emphasis",
action="store_true",
dest="em_style_asterisk",
default=False,
help="use an asterisk rather than an underscore for emphasized text",
)
p.add_argument(
"-b",
"--body-width",
dest="body_width",
type=int,
default=config.BODY_WIDTH,
help="number of characters per output line, 0 for no wrap",
)
p.add_argument(
"-i",
"--google-list-indent",
dest="list_indent",
type=int,
default=config.GOOGLE_LIST_INDENT,
help="number of pixels Google indents nested lists",
)
p.add_argument(
"-s",
"--hide-strikethrough",
action="store_true",
dest="hide_strikethrough",
default=False,
help="hide strike-through text. only relevant when -g is " "specified as well",
)
p.add_argument(
"--escape-all",
action="store_true",
dest="escape_snob",
default=False,
help=(
"Escape all special characters. Output is less readable, but avoids "
"corner case formatting issues."
),
)
p.add_argument(
"--bypass-tables",
action="store_true",
dest="bypass_tables",
default=config.BYPASS_TABLES,
help="Format tables in HTML rather than Markdown syntax.",
)
p.add_argument(
"--ignore-tables",
action="store_true",
dest="ignore_tables",
default=config.IGNORE_TABLES,
help="Ignore table-related tags (table, th, td, tr) " "while keeping rows.",
)
p.add_argument(
"--single-line-break",
action="store_true",
dest="single_line_break",
default=config.SINGLE_LINE_BREAK,
help=(
"Use a single line break after a block element rather than two line "
"breaks. NOTE: Requires --body-width=0"
),
)
p.add_argument(
"--unicode-snob",
action="store_true",
dest="unicode_snob",
default=config.UNICODE_SNOB,
help="Use unicode throughout document",
)
p.add_argument(
"--no-automatic-links",
action="store_false",
dest="use_automatic_links",
default=config.USE_AUTOMATIC_LINKS,
help="Do not use automatic links wherever applicable",
)
p.add_argument(
"--no-skip-internal-links",
action="store_false",
dest="skip_internal_links",
default=config.SKIP_INTERNAL_LINKS,
help="Do not skip internal links",
)
p.add_argument(
"--links-after-para",
action="store_true",
dest="links_each_paragraph",
default=config.LINKS_EACH_PARAGRAPH,
help="Put links after each paragraph instead of document",
)
p.add_argument(
"--mark-code",
action="store_true",
dest="mark_code",
default=config.MARK_CODE,
help="Mark program code blocks with [code]...[/code]",
)
p.add_argument(
"--decode-errors",
dest="decode_errors",
default=config.DECODE_ERRORS,
help=(
"What to do in case of decode errors.'ignore', 'strict' and 'replace' are "
"acceptable values"
),
)
p.add_argument(
"--open-quote",
dest="open_quote",
default=config.OPEN_QUOTE,
help="The character used to open quotes",
)
p.add_argument(
"--close-quote",
dest="close_quote",
default=config.CLOSE_QUOTE,
help="The character used to close quotes",
)
p.add_argument(
"--version", action="version", version=".".join(map(str, __version__))
)
p.add_argument("filename", nargs="?")
p.add_argument("encoding", nargs="?", default="utf-8")
args = p.parse_args()
if args.filename and args.filename != "-":
with open(args.filename, "rb") as fp:
data = fp.read()
else:
data = sys.stdin.buffer.read()
try:
data = data.decode(args.encoding, args.decode_errors)
except UnicodeDecodeError as err:
warning = bcolors.WARNING + "Warning:" + bcolors.ENDC
warning += " Use the " + bcolors.OKGREEN
warning += "--decode-errors=ignore" + bcolors.ENDC + " flag."
print(warning)
raise err
h = HTML2Text(baseurl=baseurl)
# handle options
if args.ul_style_dash:
h.ul_item_mark = "-"
if args.em_style_asterisk:
h.emphasis_mark = "*"
h.strong_mark = "__"
h.body_width = args.body_width
h.google_list_indent = args.list_indent
h.ignore_emphasis = args.ignore_emphasis
h.ignore_links = args.ignore_links
h.protect_links = args.protect_links
h.ignore_images = args.ignore_images
h.images_as_html = args.images_as_html
h.images_to_alt = args.images_to_alt
h.images_with_size = args.images_with_size
h.google_doc = args.google_doc
h.hide_strikethrough = args.hide_strikethrough
h.escape_snob = args.escape_snob
h.bypass_tables = args.bypass_tables
h.ignore_tables = args.ignore_tables
h.single_line_break = args.single_line_break
h.inline_links = args.inline_links
h.unicode_snob = args.unicode_snob
h.use_automatic_links = args.use_automatic_links
h.skip_internal_links = args.skip_internal_links
h.links_each_paragraph = args.links_each_paragraph
h.mark_code = args.mark_code
h.wrap_links = args.wrap_links
h.wrap_list_items = args.wrap_list_items
h.pad_tables = args.pad_tables
h.default_image_alt = args.default_image_alt
h.open_quote = args.open_quote
h.close_quote = args.close_quote
sys.stdout.write(h.handle(data))
| 9,248 | 2,945 |
"""
.. the following is a link to enable linking to this file:
.. solver_:
Contains excitation solvers.
A basic single-point-focus excitation solver is :meth:`handybeam.solver.Solver.single_focus_solver`
"""
# Imports
import warnings
warnings.warn('solver.py is obsolete - use beamformer.py instead')
import handybeam.opencl_wrappers.solver_wrappers as solver_wrappers
# Class
class Solver:
"""" Contains the OpenCL subsystem for single focus solver.
This class calls the OpenCL wrapper for the single focus solver.
"""
def __init__(self, parent=None):
""" Initializes an instance of class Solver.
Parameters
----------
parent : handybeam.world.World()
This is an instance of the handybeam world class.
"""
self.parent = parent
self.solver = solver_wrappers.Solver(parent=self.parent)
def single_focus_solver(self, x_focus, y_focus, z_focus, local_work_size=(1, 1, 1), print_performance_feedback=False):
""" Solve excitation coefficients for a single focal point
This method calls the OpenCL wrapper mixin class single_focus_solver which determines
the set of activation coefficients required to produce a single focal point a given point in space.
Parameters
----------
x_focus : numpy float
This is the x-coordinate of the requested focal point position.
y_focus : numpy float
This is the y-coordinate of the requested focal point position.
z_focus : numpy float
This is the z-coordinate of the requested focal point position.
local_work_size : tuple
Tuple containing the local work sizes for the GPU.
print_performance_feedback : boolean
Boolean value determining whether or not to output the GPU performance statistics.
"""
kernel_output = self.solver.single_focus_solver(
self.parent.tx_array,
x_focus, y_focus, z_focus,
local_work_size=local_work_size,
print_performance_feedback=print_performance_feedback
)
self.parent.tx_array.tx_array_element_descriptor = kernel_output
def set_parent(self, new_parent):
""" changes the parent of an instance of the class Solver.
Parameters
----------
new_parent : handybeam.world.World()
This is an instance of the handybeam world class.
"""
self.parent = new_parent
| 2,848 | 731 |
########
# Copyright (c) 2017-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import copy
import uuid
from datetime import datetime
from flask import current_app
from cloudify.models_states import ExecutionState
from cloudify.utils import extract_and_merge_plugins
from dsl_parser import constants, tasks
from manager_rest import manager_exceptions, workflow_executor
from manager_rest.resource_manager import get_resource_manager
from manager_rest.deployment_update import step_extractor
from manager_rest.deployment_update.utils import extract_ids
from manager_rest.deployment_update.validator import StepValidator
from manager_rest.storage import (get_storage_manager,
models,
get_read_only_storage_manager,
db)
from manager_rest.deployment_update.constants import (
STATES,
ENTITY_TYPES,
NODE_MOD_TYPES,
DEFAULT_DEPLOYMENT_UPDATE_WORKFLOW
)
from manager_rest.deployment_update.handlers import (
DeploymentDependencies,
DeploymentUpdateNodeHandler,
DeploymentUpdateDeploymentHandler,
DeploymentUpdateNodeInstanceHandler)
from manager_rest.utils import get_formatted_timestamp
from manager_rest.rest.rest_utils import (
get_deployment_plan,
get_labels_from_plan,
get_parsed_deployment,
RecursiveDeploymentDependencies,
RecursiveDeploymentLabelsDependencies,
verify_blueprint_uploaded_state,
)
from manager_rest.execution_token import current_execution
class DeploymentUpdateManager(object):
def __init__(self, sm):
self.sm = sm
self._node_handler = DeploymentUpdateNodeHandler(sm)
self._node_instance_handler = DeploymentUpdateNodeInstanceHandler(sm)
self._deployment_handler = DeploymentUpdateDeploymentHandler(sm)
self._deployment_dependency_handler = DeploymentDependencies(sm)
self._step_validator = StepValidator(sm)
def get_deployment_update(self, deployment_update_id, include=None):
return self.sm.get(
models.DeploymentUpdate, deployment_update_id, include=include)
def list_deployment_updates(self,
include=None,
filters=None,
pagination=None,
sort=None,
substr_filters=None):
return self.sm.list(models.DeploymentUpdate,
include=include,
filters=filters,
pagination=pagination,
substr_filters=substr_filters,
sort=sort)
def stage_deployment_update(self,
deployment_id,
app_dir,
app_blueprint,
additional_inputs,
new_blueprint_id=None,
preview=False,
runtime_only_evaluation=False,
auto_correct_types=False,
reevaluate_active_statuses=False):
# validate no active updates are running for a deployment_id
if reevaluate_active_statuses:
self.reevaluate_updates_statuses_per_deployment(deployment_id)
self.validate_no_active_updates_per_deployment(deployment_id)
# enables reverting to original blueprint resources
deployment = self.sm.get(models.Deployment, deployment_id)
old_blueprint = deployment.blueprint
runtime_only_evaluation = (runtime_only_evaluation or
deployment.runtime_only_evaluation)
parsed_deployment = get_parsed_deployment(old_blueprint,
app_dir,
app_blueprint)
# Updating the new inputs with the deployment inputs
# (overriding old values and adding new ones)
old_inputs = copy.deepcopy(deployment.inputs)
new_inputs = {k: old_inputs[k]
for k in parsed_deployment.inputs if k in old_inputs}
new_inputs.update(additional_inputs)
# applying intrinsic functions
plan = get_deployment_plan(parsed_deployment, new_inputs,
runtime_only_evaluation,
auto_correct_types)
deployment_update_id = '{0}-{1}'.format(deployment.id, uuid.uuid4())
deployment_update = models.DeploymentUpdate(
id=deployment_update_id,
deployment_plan=plan,
runtime_only_evaluation=runtime_only_evaluation,
created_at=get_formatted_timestamp()
)
deployment_update.set_deployment(deployment)
deployment_update.preview = preview
deployment_update.old_inputs = old_inputs
deployment_update.new_inputs = new_inputs
if new_blueprint_id:
new_blueprint = self.sm.get(models.Blueprint, new_blueprint_id)
verify_blueprint_uploaded_state(new_blueprint)
deployment_update.old_blueprint = old_blueprint
deployment_update.new_blueprint = new_blueprint
self.sm.put(deployment_update)
return deployment_update
def reevaluate_updates_statuses_per_deployment(self, deployment_id: str):
for active_update in self.list_deployment_updates(
filters={'deployment_id': deployment_id,
'state': [STATES.UPDATING,
STATES.EXECUTING_WORKFLOW,
STATES.FINALIZING]}):
reevaluated_state = _map_execution_to_deployment_update_status(
active_update.execution.status)
if reevaluated_state and active_update.state != reevaluated_state:
current_app.logger.info("Deployment update %s status "
"reevaluation: `%s` -> `%s`",
active_update.id,
active_update.state,
reevaluated_state)
active_update.state = reevaluated_state
self.sm.update(active_update)
def create_deployment_update_step(self,
deployment_update,
action,
entity_type,
entity_id,
topology_order):
step = models.DeploymentUpdateStep(id=str(uuid.uuid4()),
action=action,
entity_type=entity_type,
entity_id=entity_id,
topology_order=topology_order)
step.set_deployment_update(deployment_update)
return self.sm.put(step)
def extract_steps_from_deployment_update(self, deployment_update):
nodes = [node.to_dict() for node in deployment_update.deployment.nodes]
supported_steps, unsupported_steps = step_extractor.extract_steps(
nodes,
deployment_update.deployment,
deployment_update.deployment_plan)
if unsupported_steps:
deployment_update.state = STATES.FAILED
self.sm.update(deployment_update)
unsupported_entity_ids = [step.entity_id
for step in unsupported_steps]
raise manager_exceptions.UnsupportedChangeInDeploymentUpdate(
'The blueprint you provided for the deployment update '
'contains changes currently unsupported by the deployment '
'update mechanism.\n'
'Unsupported changes: {0}'.format('\n'.join(
unsupported_entity_ids)))
for step in supported_steps:
self.create_deployment_update_step(deployment_update,
step.action,
step.entity_type,
step.entity_id,
step.topology_order)
def commit_deployment_update(self,
dep_update,
skip_install=False,
skip_uninstall=False,
skip_reinstall=False,
workflow_id=None,
ignore_failure=False,
install_first=False,
reinstall_list=None,
update_plugins=True,
force=False):
# Mark deployment update as committing
rm = get_resource_manager()
dep_update.keep_old_deployment_dependencies = skip_uninstall
dep_update.state = STATES.UPDATING
self.sm.update(dep_update)
# Handle any deployment related changes. i.e. workflows and deployments
modified_deployment_entities, raw_updated_deployment = \
self._deployment_handler.handle(dep_update)
# Retrieve previous_nodes
previous_nodes = [node.to_dict() for node in self.sm.list(
models.Node, filters={'deployment_id': dep_update.deployment_id},
get_all_results=True
)]
# Update the nodes on the storage
modified_entity_ids, depup_nodes = self._node_handler.handle(
dep_update)
# Extract changes from raw nodes
node_instance_changes = self._extract_changes(dep_update,
depup_nodes,
previous_nodes)
# Create (and update for adding step type) node instances
# according to the changes in raw_nodes
depup_node_instances = self._node_instance_handler.handle(
dep_update, node_instance_changes)
# Calculate which plugins to install and which to uninstall
central_plugins_to_install, central_plugins_to_uninstall = \
self._extract_plugins_changes(dep_update, update_plugins)
# Calculate which deployment schedules need to be added or deleted
schedules_to_create, schedules_to_delete = \
self._extract_schedules_changes(dep_update)
# Saving the needed changes back to the storage manager for future use
# (removing entities).
dep_update.deployment_update_deployment = raw_updated_deployment
dep_update.deployment_update_nodes = depup_nodes
dep_update.deployment_update_node_instances = depup_node_instances
dep_update.modified_entity_ids = modified_entity_ids.to_dict(
include_rel_order=True)
dep_update.central_plugins_to_install = central_plugins_to_install
dep_update.central_plugins_to_uninstall = central_plugins_to_uninstall
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
labels_to_create = self._get_deployment_labels_to_create(dep_update)
parents_labels = []
if labels_to_create:
parents_labels = rm.get_deployment_parents_from_labels(
labels_to_create
)
dep_graph = RecursiveDeploymentLabelsDependencies(self.sm)
dep_graph.create_dependencies_graph()
rm.verify_attaching_deployment_to_parents(
dep_graph,
parents_labels,
deployment.id
)
self.sm.update(dep_update)
# If this is a preview, no need to run workflow and update DB
if dep_update.preview:
dep_update.state = STATES.PREVIEW
dep_update.id = None
# retrieving recursive dependencies for the updated deployment
dep_graph = RecursiveDeploymentDependencies(self.sm)
dep_graph.create_dependencies_graph()
deployment_dependencies = dep_graph.retrieve_dependent_deployments(
dep_update.deployment_id)
dep_update.set_recursive_dependencies(deployment_dependencies)
dep_update.schedules_to_create = \
self.list_schedules(schedules_to_create)
dep_update.schedules_to_delete = schedules_to_delete
dep_update.labels_to_create = [{'key': label[0], 'value': label[1]}
for label in labels_to_create]
return dep_update
# Handle inter-deployment dependencies changes
self._deployment_dependency_handler.handle(dep_update)
# Update deployment attributes in the storage manager
deployment.inputs = dep_update.new_inputs
deployment.runtime_only_evaluation = dep_update.runtime_only_evaluation
if dep_update.new_blueprint:
deployment.blueprint = dep_update.new_blueprint
deployment.capabilities = \
dep_update.deployment_plan.get('capabilities', {})
self.sm.update(deployment)
# Execute the default 'update' workflow or a custom workflow using
# added and related instances. Any workflow executed should call
# finalize_update, since removing entities should be done after the
# executions.
# The raw_node_instances are being used only for their ids, thus
# they should really hold the finished version for the node instance.
execution = self._execute_update_workflow(
dep_update,
depup_node_instances,
modified_entity_ids.to_dict(),
skip_install=skip_install,
skip_uninstall=skip_uninstall,
skip_reinstall=skip_reinstall,
workflow_id=workflow_id,
ignore_failure=ignore_failure,
install_first=install_first,
reinstall_list=reinstall_list,
central_plugins_to_install=central_plugins_to_install,
central_plugins_to_uninstall=central_plugins_to_uninstall,
update_plugins=update_plugins,
force=force
)
# Update deployment update attributes in the storage manager
dep_update.execution = execution
dep_update.state = STATES.EXECUTING_WORKFLOW
self.sm.update(dep_update)
# First, delete old deployment schedules
for schedule_id in schedules_to_delete:
schedule = self.sm.get(
models.ExecutionSchedule,
None,
filters={'id': schedule_id, 'deployment_id': deployment.id})
self.sm.delete(schedule)
# Then, create new deployment schedules
deployment_creation_time = datetime.strptime(
deployment.created_at.split('.')[0], '%Y-%m-%dT%H:%M:%S'
).replace(second=0)
rm.create_deployment_schedules_from_dict(
schedules_to_create, deployment, deployment_creation_time)
rm.create_resource_labels(
models.DeploymentLabel,
deployment,
labels_to_create
)
if parents_labels:
for parent in parents_labels:
rm.add_deployment_to_labels_graph(
dep_graph,
deployment,
parent
)
return self.get_deployment_update(dep_update.id)
def validate_no_active_updates_per_deployment(self, deployment_id):
existing_updates = self.list_deployment_updates(
filters={'deployment_id': deployment_id}).items
active_updates = [u for u in existing_updates
if u.state not in (STATES.SUCCESSFUL, STATES.FAILED)]
if not active_updates:
return
raise manager_exceptions.ConflictError(
'there are deployment updates still active; update IDs: {0}'
.format(', '.join([u.id for u in active_updates])))
@staticmethod
def list_schedules(schedules_dict):
schedules_list = []
for k, v in schedules_dict.items():
list_item = v
list_item['id'] = k
schedules_list.append(list_item)
return schedules_list
def _extract_changes(self,
dep_update,
raw_nodes,
previous_nodes):
"""Extracts the changes between the current node_instances and
the raw_nodes specified
:param dep_update: deployment update object
:param raw_nodes: node objects from deployment update
:return: a dictionary of modification type and node instanced modified
"""
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
deployment_id_filter = {'deployment_id': deployment.id}
# By this point the node_instances aren't updated yet
previous_node_instances = [instance.to_dict() for instance in
self.sm.list(models.NodeInstance,
filters=deployment_id_filter,
get_all_results=True)]
# extract all the None relationships from the deployment update nodes
# in order to use in the extract changes
no_none_relationships_nodes = copy.deepcopy(raw_nodes)
for node in no_none_relationships_nodes:
node['relationships'] = [r for r in node['relationships'] if r]
# project changes in deployment
changes = tasks.modify_deployment(
nodes=no_none_relationships_nodes,
previous_nodes=previous_nodes,
previous_node_instances=previous_node_instances,
scaling_groups=deployment.scaling_groups,
modified_nodes=()
)
self._patch_changes_with_relationship_index(
changes[NODE_MOD_TYPES.EXTENDED_AND_RELATED], raw_nodes)
return changes
@staticmethod
def _patch_changes_with_relationship_index(raw_node_instances, raw_nodes):
for raw_node_instance in (i for i in raw_node_instances
if 'modification' in i):
raw_node = next(n for n in raw_nodes
if n['id'] == raw_node_instance['node_id'])
for relationship in raw_node_instance['relationships']:
target_node_id = relationship['target_name']
rel_index = next(i for i, d
in enumerate(raw_node['relationships'])
if d['target_id'] == target_node_id)
relationship['rel_index'] = rel_index
def _validate_reinstall_list(self,
reinstall,
add,
remove,
dep_update):
"""validate node-instances explicitly supplied to reinstall list exist
and are not about to be installed or uninstalled in this update"""
node_instances = self.sm.list(
models.NodeInstance,
filters={'deployment_id': dep_update.deployment_id},
get_all_results=True
)
node_instances_ids = [n.id for n in node_instances]
add_conflict = [n for n in reinstall if n in add]
remove_conflict = [n for n in reinstall if n in remove]
not_existing = [n for n in reinstall if n not in node_instances_ids]
msg = 'Invalid reinstall list supplied.'
if not_existing:
msg += '\nFollowing node instances do not exist in this ' \
'deployment: ' + ', '.join(not_existing)
if add_conflict:
msg += '\nFollowing node instances are just being added in the ' \
'update: ' + ', '.join(add_conflict)
if remove_conflict:
msg += '\nFollowing node instances are just being removed in ' \
'the update: ' + ', '.join(remove_conflict)
if any([not_existing, add_conflict, remove_conflict]):
dep_update.state = STATES.FAILED
self.sm.update(dep_update)
raise manager_exceptions.BadParametersError(msg)
def _update_reinstall_list(self,
reinstall_list,
add_list,
remove_list,
modified_entity_ids,
dep_update,
skip_reinstall):
"""Add nodes that their properties have been updated to the list of
node instances to reinstall, unless skip_reinstall is true"""
reinstall_list = reinstall_list or []
self._validate_reinstall_list(reinstall_list,
add_list,
remove_list,
dep_update)
if skip_reinstall:
return reinstall_list
# get all entities with modifications in properties or operations
for change_type in (ENTITY_TYPES.PROPERTY, ENTITY_TYPES.OPERATION):
for modified in modified_entity_ids[change_type]:
modified = modified.split(':')
# pick only entities that are part of nodes
if modified[0].lower() != 'nodes':
continue
# list instances of each node
node_instances = self.sm.list(
models.NodeInstance,
filters={'deployment_id': dep_update.deployment_id,
'node_id': modified[1]},
get_all_results=True
)
# add instances ids to the reinstall list, if they are not in
# the install/uninstall list
reinstall_list += [e.id for e in node_instances.items
if e.id not in add_list
and e.id not in remove_list]
return reinstall_list
def _execute_update_workflow(self,
dep_update,
node_instances,
modified_entity_ids,
skip_install=False,
skip_uninstall=False,
skip_reinstall=False,
workflow_id=None,
ignore_failure=False,
install_first=False,
reinstall_list=None,
central_plugins_to_install=None,
central_plugins_to_uninstall=None,
update_plugins=True,
force=False):
"""Executed the update workflow or a custom workflow
:param dep_update: deployment update object
:param node_instances: a dictionary of modification type and
add_node.modification instances
:param modified_entity_ids: the entire add_node.modification entities
list (by id)
:param skip_install: if to skip installation of node instances.
:param skip_uninstall: if to skip uninstallation of node instances.
:param skip_reinstall: if to skip reinstallation of node instances.
:param workflow_id: the update workflow id
:param ignore_failure: if to ignore failures.
:param install_first: if to install the node instances before
uninstalling them.
:param reinstall_list: list of node instances to reinstall.
:param central_plugins_to_install: plugins to install that have the
central_deployment_agent as the executor.
:param central_plugins_to_uninstall: plugins to uninstall that have the
central_deployment_agent as the executor.
:param update_plugins: whether or not to perform plugin updates.
:param force: force update (i.e. even if the blueprint is used to
create components).
:return: an Execution object.
"""
added_instances = node_instances[NODE_MOD_TYPES.ADDED_AND_RELATED]
extended_instances = \
node_instances[NODE_MOD_TYPES.EXTENDED_AND_RELATED]
reduced_instances = node_instances[NODE_MOD_TYPES.REDUCED_AND_RELATED]
removed_instances = node_instances[NODE_MOD_TYPES.REMOVED_AND_RELATED]
added_instance_ids = extract_ids(
added_instances.get(NODE_MOD_TYPES.AFFECTED))
removed_instance_ids = extract_ids(
removed_instances.get(NODE_MOD_TYPES.AFFECTED))
reinstall_list = self._update_reinstall_list(reinstall_list,
added_instance_ids,
removed_instance_ids,
modified_entity_ids,
dep_update,
skip_reinstall)
parameters = {
# needed in order to finalize the commit
'update_id': dep_update.id,
# For any added node instance
'added_instance_ids': added_instance_ids,
'added_target_instances_ids':
extract_ids(added_instances.get(NODE_MOD_TYPES.RELATED)),
# encapsulated all the change entity_ids (in a dictionary with
# 'node' and 'relationship' keys.
'modified_entity_ids': modified_entity_ids,
# Any nodes which were extended (positive modification)
'extended_instance_ids':
extract_ids(extended_instances.get(NODE_MOD_TYPES.AFFECTED)),
'extend_target_instance_ids':
extract_ids(extended_instances.get(NODE_MOD_TYPES.RELATED)),
# Any nodes which were reduced (negative modification)
'reduced_instance_ids':
extract_ids(reduced_instances.get(NODE_MOD_TYPES.AFFECTED)),
'reduce_target_instance_ids':
extract_ids(reduced_instances.get(NODE_MOD_TYPES.RELATED)),
# Any nodes which were removed as a whole
'removed_instance_ids': removed_instance_ids,
'remove_target_instance_ids':
extract_ids(removed_instances.get(NODE_MOD_TYPES.RELATED)),
# Whether or not execute install/uninstall/reinstall,
# order of execution, behavior in failure while uninstalling, and
# whether or not to update the plugins.
'skip_install': skip_install,
'skip_uninstall': skip_uninstall,
'ignore_failure': ignore_failure,
'install_first': install_first,
'update_plugins': update_plugins,
# Plugins that are executed by the central deployment agent and
# need to be un/installed
'central_plugins_to_install': central_plugins_to_install,
'central_plugins_to_uninstall': central_plugins_to_uninstall,
# List of node-instances to reinstall
'node_instances_to_reinstall': reinstall_list
}
execution = models.Execution(
workflow_id=workflow_id or DEFAULT_DEPLOYMENT_UPDATE_WORKFLOW,
deployment=dep_update.deployment,
allow_custom_parameters=True,
blueprint_id=dep_update.new_blueprint_id,
parameters=parameters,
status=ExecutionState.PENDING,
)
self.sm.put(execution)
if current_execution and \
current_execution.workflow_id == 'csys_update_deployment':
# if we're created from a update_deployment workflow, join its
# exec-groups, for easy tracking
for exec_group in current_execution.execution_groups:
exec_group.executions.append(execution)
db.session.commit()
messages = get_resource_manager().prepare_executions(
[execution],
allow_overlapping_running_wf=True,
force=force,
)
workflow_executor.execute_workflow(messages)
return execution
def finalize_commit(self, deployment_update_id):
""" finalizes the update process by removing any removed
node/node-instances and updating any reduced node
"""
# mark deployment update as finalizing
dep_update = self.get_deployment_update(deployment_update_id)
dep_update.state = STATES.FINALIZING
self.sm.update(dep_update)
# The order of these matter
self._deployment_handler.finalize(dep_update)
self._node_instance_handler.finalize(dep_update)
self._node_handler.finalize(dep_update)
self._deployment_dependency_handler.finalize(dep_update)
# mark deployment update as successful
dep_update.state = STATES.SUCCESSFUL
self.sm.update(dep_update)
return dep_update
def _extract_plugins_changes(self, dep_update, update_plugins):
"""Extracts plugins that need to be installed or uninstalled.
:param dep_update: a DeploymentUpdate object.
:param update_plugins: whether to update the plugins or not.
:return: plugins that need installation and uninstallation (a tuple).
"""
def get_plugins_to_install(plan, is_old_plan):
return extract_and_merge_plugins(
plan[constants.DEPLOYMENT_PLUGINS_TO_INSTALL],
plan[constants.WORKFLOW_PLUGINS_TO_INSTALL],
filter_func=is_centrally_deployed,
with_repetition=is_old_plan)
def is_centrally_deployed(plugin):
return (plugin[constants.PLUGIN_EXECUTOR_KEY]
== constants.CENTRAL_DEPLOYMENT_AGENT)
def extend_list_from_dict(source_dict, filter_out_dict, target_list):
target_list.extend(
source_dict[k]
for k in source_dict if k not in filter_out_dict)
if not update_plugins:
return [], []
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
old_plan = deployment.blueprint.plan
new_plan = dep_update.deployment_plan
plugins_to_install_old = get_plugins_to_install(old_plan, True)
plugins_to_install_new = get_plugins_to_install(new_plan, False)
# Convert to plugin_name->plugin dict
new_plugins = {p[constants.PLUGIN_NAME_KEY]: p
for p in plugins_to_install_new}
old_plugins = {p[constants.PLUGIN_NAME_KEY]: p
for p in plugins_to_install_old}
central_plugins_to_install, central_plugins_to_uninstall = [], []
extend_list_from_dict(source_dict=new_plugins,
filter_out_dict=old_plugins,
target_list=central_plugins_to_install)
extend_list_from_dict(source_dict=old_plugins,
filter_out_dict=new_plugins,
target_list=central_plugins_to_uninstall)
# Deal with the intersection between the old and new plugins
intersection = (k for k in new_plugins if k in old_plugins)
for plugin_name in intersection:
old_plugin = old_plugins[plugin_name]
new_plugin = new_plugins[plugin_name]
if new_plugin == old_plugin:
continue
central_plugins_to_install.append(new_plugin)
central_plugins_to_uninstall.append(old_plugin)
return central_plugins_to_install, central_plugins_to_uninstall
def _extract_schedules_changes(self, dep_update):
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
old_settings = deployment.blueprint.plan.get('deployment_settings')
new_settings = dep_update.deployment_plan.get('deployment_settings')
schedules_to_delete = []
schedules_to_create = {}
if old_settings:
for schedule_id in old_settings.get('default_schedules', {}):
try:
schedule = self.sm.get(
models.ExecutionSchedule,
None,
filters={'id': schedule_id,
'deployment_id': deployment.id})
if schedule.deployment_id == deployment.id:
schedules_to_delete.append(schedule_id)
except manager_exceptions.NotFoundError:
continue
if new_settings:
name_conflict_error_msg = \
'The Blueprint used for the deployment update contains a ' \
'default schedule `{0}`, but a deployment schedule `{0}` ' \
'already exists for the deployment `{1}` . Please either ' \
'delete the existing schedule or fix the blueprint.'
schedules_to_create = new_settings.get('default_schedules', {})
for schedule_id in schedules_to_create:
try:
self.sm.get(models.ExecutionSchedule,
None,
filters={'id': schedule_id,
'deployment_id': deployment.id})
if schedule_id not in schedules_to_delete:
raise manager_exceptions.InvalidBlueprintError(
name_conflict_error_msg.format(schedule_id,
deployment.id))
except manager_exceptions.NotFoundError:
continue
return schedules_to_create, schedules_to_delete
def _get_deployment_labels_to_create(self, dep_update):
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
new_labels = get_labels_from_plan(dep_update.deployment_plan,
constants.LABELS)
return get_resource_manager().get_labels_to_create(deployment,
new_labels)
def _delete_single_label_from_deployment(self,
label_key,
label_value,
deployment):
dep_label = self.sm.get(
models.DeploymentLabel,
None,
filters={
'_labeled_model_fk': deployment._storage_id,
'key': label_key,
'value': label_value
}
)
self.sm.delete(dep_label)
# What we need to access this manager in Flask
def get_deployment_updates_manager(preview=False):
"""
Get the current app's deployment updates manager, create if necessary
"""
if preview:
return current_app.config.setdefault(
'deployment_updates_preview_manager',
DeploymentUpdateManager(get_read_only_storage_manager())
)
return current_app.config.setdefault(
'deployment_updates_manager',
DeploymentUpdateManager(get_storage_manager())
)
def _map_execution_to_deployment_update_status(execution_status: str) -> str:
if execution_status == ExecutionState.TERMINATED:
return STATES.SUCCESSFUL
if execution_status in [ExecutionState.FAILED,
ExecutionState.CANCELLED,
ExecutionState.CANCELLING,
ExecutionState.FORCE_CANCELLING,
ExecutionState.KILL_CANCELLING]:
return STATES.FAILED
| 36,550 | 9,304 |
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
x = np.array([1,2,3,7,8,9])
y = np.array([7,6,7,3,2,2])
plt.scatter(x, y)
plt.show()
X = np.array(zip(x,y))
kmeans = KMeans(n_clusters=2)
kmeans.fit(X)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
colours = ['g.', 'r.', 'c.', 'y.', 'm.', 'k.' ,'w.', 'go']
for i in range(len(X)):
colour = colours[labels[i]%len(colours)]
plt.plot(X[i][0],X[i][1], colour, '10')
# Display centroids as well
plt.scatter(centroids[:,0], centroids[:,1], marker="x", s = 150, linewidths=4)
plt.show() | 595 | 285 |
import logging
from .processors import JsonProcessor
from . import Config
from .stores import IncidentFileStore, RawStore, ProcessedFileStore
from .routes.push import PushReceiver
import json
from bos_incidents.exceptions import DuplicateIncidentException
import threading
import time
from strict_rfc3339 import InvalidRFC3339Error
from . import utils
from .utils import slugify
from datetime import timedelta
from bos_incidents.format import string_to_incident
from bos_incidents import factory
from dataproxy.utils import CommonFormat
incidents_storage = factory.get_incident_storage()
def _send_to_witness(processor, incident, targets=None):
try:
initial_delay = Config.get("subscriptions",
"delay_before_initial_sending_in_seconds",
incident["call"],
0)
if initial_delay > 0:
logging.getLogger(__name__).info("Incident " + incident["unique_string"] + ": Waiting before sending " + incident["call"])
time.sleep(initial_delay)
logging.getLogger(__name__).info("Incident " + incident["unique_string"] + ": Sending result now")
PushReceiver.subscribed_witnesses_status = processor.send_to_witness(
incident,
targets=targets
)
received_witnesses = len([key for key, value in PushReceiver.subscribed_witnesses_status.items() if value == "ok"])
logging.getLogger(__name__).debug("Incident " + incident["unique_string"] + ": Successfully sent to " + str(received_witnesses) + " witnesses")
return received_witnesses
except Exception as e:
logging.getLogger(__name__).info("Incident " + incident["unique_string"] + ": PUSH to witness failed, continueing anyways, exception below")
logging.getLogger(__name__).exception(e)
def _send_list_to_witness(processor, incident_list, targets=None, async_queue=True):
for incident in incident_list:
logging.getLogger(__name__).info("Trigger sending " + incident["unique_string"])
if async_queue:
# send to witnesses
thr = threading.Thread(target=_send_to_witness,
args=(processor, incident, targets,))
thr.start() # we dont care when it finishes
else:
_send_to_witness(processor, incident, targets=targets)
def process_content(provider_name,
processor,
processed_store,
incident_store,
file_content,
file_ending,
restrict_witness_group=None, # deprecated
async_queue=True,
target=None):
file_name = None
if restrict_witness_group is None and target is not None:
restrict_witness_group = target
# before storing, check if its worth processing
is_interesting = file_content is not None
if is_interesting and processor:
is_interesting = processor.source_of_interest(
file_content
)
incidents = []
do_not_send_to_witness = True
if is_interesting:
# store found file again
file_name = processed_store.save(
provider_name,
file_content,
file_ext=file_ending)
try:
# process content (should be asynchronous)
if processor:
for incident in processor.process(file_content):
logging.getLogger(__name__ + "_" + provider_name).debug("Postprocessing " + incident["unique_string"])
incident["provider_info"]["source_file"] = file_name
incidents.append(incident)
# only send if its a new incident
logging.getLogger(__name__ + "_" + provider_name).debug(" ... exists")
do_not_send_to_witness = incident_store.exists(
provider_name,
file_ext=".json",
file_name=incident["unique_string"])
if not do_not_send_to_witness:
logging.getLogger(__name__ + "_" + provider_name).debug(" ... save in incidents folder")
# save locally
incident_file = incident_store.save(
provider_name,
json.dumps(incident),
file_ext=".json",
file_name=incident["unique_string"])
try:
logging.getLogger(__name__ + "_" + provider_name).debug(" ... save in incidents database")
incidents_storage.insert_incident(incident)
except DuplicateIncidentException:
pass
except Exception as e:
logging.getLogger(__name__ + "_" + provider_name).info(provider_name + ": INSERT INTO stats failed, continueing anyways, incident file is " + incident_file + ", exception below")
logging.getLogger(__name__ + "_" + provider_name).exception(e)
incident.pop("_id", None)
try:
logging.getLogger(__name__ + "_" + provider_name).debug(" ... sending to witnesses (" + str(restrict_witness_group) + ", async_queue=" + str(async_queue) + ")")
if async_queue:
# send to witnesses
thr = threading.Thread(target=_send_to_witness,
args=(processor, incident, _find_targets(restrict_witness_group),))
thr.start() # we dont care when it finishes
else:
_send_to_witness(processor, incident, targets=_find_targets(restrict_witness_group))
except Exception as e:
logging.getLogger(__name__ + "_" + provider_name).info(provider_name + ": PUSH to witness failed, continueing anyways, incident file is " + incident_file + ", exception below")
logging.getLogger(__name__ + "_" + provider_name).exception(e)
except Exception as e:
logging.getLogger(__name__ + "_" + provider_name).info(provider_name + ": Processing failed, continueing anyways. Source file is " + file_name + ", exception below")
logging.getLogger(__name__ + "_" + provider_name).exception(e)
return {
"file_name": file_name,
"amount_incidents": len(incidents),
"incidents": incidents,
"do_not_send_to_witness": do_not_send_to_witness,
"is_interesting": is_interesting
}
def _find_targets(target):
matched = []
for witness in Config.get("subscriptions", "witnesses"):
if target is not None:
if target == witness.get("group", None):
matched.append(witness)
elif target == witness["url"] or target == witness.get("name", None):
matched.append(witness)
else:
matched.append(witness)
return matched
def replay(restrict_witness_group=None,
providers=None,
received=None,
processor=None,
name_filter=None,
incidents=None,
async_execution=None,
async_queue=None,
only_report=None,
target=None):
if name_filter is None and (incidents is None or incidents == []):
report = {"name_filter": "Name filter must not be empty"}
return report
if async_execution is None:
async_execution = False
if only_report is None:
only_report = False
logging.getLogger(__name__).info("Replay: Collecting configuration ...")
replay_stats = {}
replay_stats["async_execution"] = async_execution
replay_stats["target"] = target
if providers is None:
providers = list(Config.get("providers").keys())
if type(providers) == str:
providers = [providers]
replay_stats["providers"] = providers
if processor is None:
processor = JsonProcessor()
replay_stats["processor"] = processor.__class__.__name__
if restrict_witness_group is not None:
target = restrict_witness_group
matched_targets = _find_targets(target)
replay_stats["matched_targets"] = len(matched_targets)
if len(matched_targets) == 0:
logging.getLogger(__name__).info("Replay: No matched witnesses found for target " + target)
return replay_stats
if incidents is None:
incidents = []
if type(name_filter) == str:
name_filter = name_filter.split(",")
if name_filter is not None:
offset_left = 3
offset_right = 3
match_date = None
for tmp in name_filter:
tmp = slugify(tmp)
try:
match_date = utils.string_to_date(tmp[0:20])
break
except InvalidRFC3339Error:
pass
try:
match_date = utils.string_to_date(tmp[0:8])
break
except InvalidRFC3339Error:
pass
try:
match_date = utils.string_to_date(tmp[0:10])
break
except InvalidRFC3339Error:
pass
if "create" in name_filter:
offset_left = 28
if match_date and received is None:
received = []
for i in range(-offset_left, offset_right):
_date = utils.date_to_string(match_date + timedelta(days=i))
received.append(_date[0:4] + _date[5:7] + _date[8:10])
folder_filter = []
for provider in providers:
folder_filter.append(provider)
if received is None:
received = ["20181", "2019"]
if type(received) == str:
received = [received]
for tmp in received:
folder_filter.append(tmp)
replay_stats["folder_filter"] = folder_filter
replay_stats["name_filter"] = name_filter
logging.getLogger(__name__).info("Replay: Finding all incidents in file dump with configuration " + str(replay_stats))
for incident in processor.process_generic(
folder="dump/d_incidents",
folder_filter=folder_filter,
name_filter=name_filter):
incidents.append(incident)
if len(received) == 2:
logging.getLogger(__name__).info("Replay: Querying local database for incidents")
regex_filter = ".*".join(name_filter) + ".*"
# Only prepend ".*" if there expected to be anything beforehand
if not regex_filter.startswith("201"):
# cover all years 2010-2029
regex_filter = ".*" + regex_filter
try:
#if len(received) == 1:
# if len(received[0]) == 8:
# _from = datetime(received[0][0:4], received[0][4:6], received[0][6:8], 0, 0, tzinfo=tzutc())
# _till = datetime(received[0][0:4], received[0][4:6], received[0][6:8], 23, 59, tzinfo=tzutc())
# elif len(received[0]) == 6:
# _from = datetime(received[0][0:4], received[0][4:6], 1, 0, 0, tzinfo=tzutc())
# _till = datetime(received[0][0:4], received[0][4:6], 28, 23, 59, tzinfo=tzutc())
#else:
# _from = None
for incident in incidents_storage.get_incidents(
dict(
unique_string={"$regex": regex_filter, "$options": "i"}#,
#timestamp={"$lt": float(_till.timestamp()), "$gt": float(_from.timestamp())}
)
):
# don't add duplicates
if incident["provider_info"]["name"] + "-" + incident["unique_string"] not in [x["provider_info"]["name"] + "-" + x["unique_string"] for x in incidents]:
incidents.append(incident)
except Exception as e:
logging.getLogger(__name__).warning("MongoDB not reachable, continueing anyways" + str(e))
pass
else:
if type(incidents) == str:
incidents = [incidents]
if type(incidents) == list and len(incidents) > 0 and type(incidents[0]) == str:
manufactured = []
for item in incidents:
for provider in providers:
manufactured.append(string_to_incident(item, provider_info=provider))
incidents = manufactured
replay_stats["amount_incidents"] = len(incidents)
incident_ids = []
for incident in incidents:
incident_ids.append(incident["unique_string"])
if replay_stats["amount_incidents"] == 1:
replay_stats["incidents"] = incidents
else:
replay_stats["incidents"] = incident_ids
logging.getLogger(__name__).info("Found " + str(len(incident_ids)) + " incidents.")
if not only_report:
sorted_list = sorted(incidents, key=lambda k: k['provider_info']['pushed'])
logging.getLogger(__name__).info("Replay: Sorted " + str(len(sorted_list)) + " incidents ...")
if async_execution:
# send to witnesses
thr = threading.Thread(target=_send_list_to_witness,
args=(processor, sorted_list, matched_targets, async_queue))
thr.start() # we dont care when it finishes
replay_stats["incidents_sent"] = True
else:
number = _send_list_to_witness(processor,
sorted_list,
targets=matched_targets,
async_queue=async_queue)
replay_stats["incidents_sent"] = number
else:
replay_stats["incidents_sent"] = False
return replay_stats
| 14,288 | 3,880 |
from pprint import pprint
from st2common.runners.base_action import Action
class PrintConfigAction(Action):
def run(self):
print("=========")
pprint(self.config)
print("=========")
| 212 | 63 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: rviz.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='rviz.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\nrviz.proto\"\x07\n\x05\x45mpty\"\x1b\n\x0b\x43odeRequest\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x0c\"3\n\x15\x43reateInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"P\n\x06Status\x12\"\n\x05value\x18\x01 \x01(\x0e\x32\x13.Status.StatusValue\"\"\n\x0bStatusValue\x12\t\n\x05\x45RROR\x10\x00\x12\x08\n\x04\x44ONE\x10\x01\x32\x61\n\x04RViz\x12#\n\x08run_code\x12\x0c.CodeRequest\x1a\x07.Status\"\x00\x12\x34\n\x0f\x63reate_instance\x12\x16.CreateInstanceRequest\x1a\x07.Status\"\x00\x62\x06proto3'
)
_STATUS_STATUSVALUE = _descriptor.EnumDescriptor(
name='StatusValue',
full_name='Status.StatusValue',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ERROR', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DONE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=151,
serialized_end=185,
)
_sym_db.RegisterEnumDescriptor(_STATUS_STATUSVALUE)
_EMPTY = _descriptor.Descriptor(
name='Empty',
full_name='Empty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=14,
serialized_end=21,
)
_CODEREQUEST = _descriptor.Descriptor(
name='CodeRequest',
full_name='CodeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='CodeRequest.code', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=23,
serialized_end=50,
)
_CREATEINSTANCEREQUEST = _descriptor.Descriptor(
name='CreateInstanceRequest',
full_name='CreateInstanceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='CreateInstanceRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='CreateInstanceRequest.data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=103,
)
_STATUS = _descriptor.Descriptor(
name='Status',
full_name='Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='Status.value', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_STATUS_STATUSVALUE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=185,
)
_STATUS.fields_by_name['value'].enum_type = _STATUS_STATUSVALUE
_STATUS_STATUSVALUE.containing_type = _STATUS
DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
DESCRIPTOR.message_types_by_name['CodeRequest'] = _CODEREQUEST
DESCRIPTOR.message_types_by_name['CreateInstanceRequest'] = _CREATEINSTANCEREQUEST
DESCRIPTOR.message_types_by_name['Status'] = _STATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), {
'DESCRIPTOR' : _EMPTY,
'__module__' : 'rviz_pb2'
# @@protoc_insertion_point(class_scope:Empty)
})
_sym_db.RegisterMessage(Empty)
CodeRequest = _reflection.GeneratedProtocolMessageType('CodeRequest', (_message.Message,), {
'DESCRIPTOR' : _CODEREQUEST,
'__module__' : 'rviz_pb2'
# @@protoc_insertion_point(class_scope:CodeRequest)
})
_sym_db.RegisterMessage(CodeRequest)
CreateInstanceRequest = _reflection.GeneratedProtocolMessageType('CreateInstanceRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEINSTANCEREQUEST,
'__module__' : 'rviz_pb2'
# @@protoc_insertion_point(class_scope:CreateInstanceRequest)
})
_sym_db.RegisterMessage(CreateInstanceRequest)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'rviz_pb2'
# @@protoc_insertion_point(class_scope:Status)
})
_sym_db.RegisterMessage(Status)
_RVIZ = _descriptor.ServiceDescriptor(
name='RViz',
full_name='RViz',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=187,
serialized_end=284,
methods=[
_descriptor.MethodDescriptor(
name='run_code',
full_name='RViz.run_code',
index=0,
containing_service=None,
input_type=_CODEREQUEST,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='create_instance',
full_name='RViz.create_instance',
index=1,
containing_service=None,
input_type=_CREATEINSTANCEREQUEST,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_RVIZ)
DESCRIPTOR.services_by_name['RViz'] = _RVIZ
# @@protoc_insertion_point(module_scope)
| 7,525 | 2,860 |
import bottle
from bottle import request, route, template, auth_basic, redirect
from utils.db_helper import create_blank
from utils.db_manage import get_all_blanks, get_all_questions_by_token, \
get_blank_id_by_token, add_new_users_answers, \
get_blank_info_by_token, get_report_by_token, get_all_standards_by_token, \
get_all_users_by_token, mark_competences_as_used, get_competences_by_token, \
change_blank_state_by_token, get_user_answers_by_user_id, \
change_user_state_by_user_id, get_user_state_by_user_id
import json
from utils.config import *
from utils.notify import send_email
from utils.analysis import get_analysis_by_blank_id
import pathlib
def check(user, password):
return user == CONFIG["ADMIN_LOGIN"] and password == CONFIG["ADMIN_PASSWORD"]
@route('/admin')
@auth_basic(check)
def get_admin_page():
blanks = get_all_blanks()
moderating_blanks = get_all_blanks('moderating')
tokens = [blank['token'] for blank in blanks]
standards = [
{
'standards': get_all_standards_by_token(token),
'token': token
}
for token in tokens
]
return template('{}/assets/admin.tpl'.format(_path), blanks=blanks, moderatingBlanks=moderating_blanks, standards=standards)
@route('/admin/new_blank', method="POST")
def create_new_blank():
data = {
'forms': request.forms,
'files': request.files
}
token = create_blank(data)
redirect('/admin/blank/{}'.format(token))
@route('/admin/blank/<token>')
@auth_basic(check)
def get_admin_blank_page(token):
blank = get_blank_info_by_token(token)
questions = get_all_questions_by_token(token)
standards = get_all_standards_by_token(token)
hostname = CONFIG["HOSTNAME"]
port = CONFIG["PORT"]
return template('{}/assets/blank.tpl'.format(_path), questions=questions, blank=blank, token=token, standards=standards, hostname=hostname, port=port)
@route('/admin/save_blank/<token>', method="POST")
@auth_basic(check)
def save_blank(token):
questions = request.body.read().decode('utf-8')
questions = json.loads(questions)
questions = [json.loads(question) for question in questions]
mark_competences_as_used(questions, token)
@route('/admin/send_email/<token>')
@auth_basic(check)
def send_notification(token):
recievers = get_all_users_by_token(token)
send_email(recievers, token)
change_blank_state_by_token("sent", token)
redirect('/admin/blank/{}'.format(token))
@route('/admin/report/<token>')
@auth_basic(check)
def get_admin_report_page(token):
reports = get_report_by_token(token)
blank = get_blank_info_by_token(token)
blank_id = blank["id"]
users = get_all_users_by_token(token)
questions_amount = len(get_competences_by_token(token))
user_stat = [
{
'user_id': user['user_id'],
'user_email': user['user_email'],
'stat': int(len(get_user_answers_by_user_id(user['user_id'])) / questions_amount * 100)
}
for user in users
]
analysis = get_analysis_by_blank_id(blank_id)
return template('{}/assets/report.tpl'.format(_path), reports=reports, blank=blank, users=users, user_stat=user_stat, analysis=analysis)
@route('/quiz/<token>/<user_id>')
def get_quiz_page(token, user_id):
questions = get_competences_by_token(token)
blank_info = get_blank_info_by_token(token)
state = get_user_state_by_user_id(user_id)
return template('{}/assets/quiz.tpl'.format(_path), questions=questions, token=token, user_id=user_id, blank=blank_info, state=state)
@route('/quiz/<token>/<user_id>', method='POST')
def write_new_user_answers(token, user_id):
blank_id = get_blank_id_by_token(token)
answers = request.body.read().decode('utf-8')
answers = json.loads(answers)
answers = [json.loads(answer) for answer in answers]
add_new_users_answers(answers, blank_id, user_id)
questions_amount = len(get_competences_by_token(token))
user_answers_amount = len(get_user_answers_by_user_id(user_id))
stat = user_answers_amount // questions_amount
if stat == 1:
change_user_state_by_user_id("finished", user_id)
def main(_host="localhost"):
app = bottle.app()
bottle.run(app=app, host=_host, port=CONFIG["PORT"])
if __name__ == "__main__":
_path = pathlib.Path().absolute()
main(CONFIG["HOSTNAME"])
| 4,418 | 1,540 |
from django import forms
from .models import Contact
#class ContactForm(forms.Form):
#name = forms.CharField(max_length=50)
class ContactForm(forms.Form):
name = forms.CharField()
email = forms.EmailField()
age = forms.IntegerField()
class ContactModelForm(forms.ModelForm):
class Meta:
model = Contact
fields = '__all__'
| 362 | 107 |
#encoding:utf-8
import cv2
import numpy as np
import matplotlib.pyplot as plt
#读取图片
src = cv2.imread('test01.jpg')
#获取图像大小
rows, cols = src.shape[:2]
#将源图像高斯模糊
img = cv2.GaussianBlur(src, (3,3), 0)
#进行灰度化处理
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#边缘检测(检测出图像的边缘信息)
edges = cv2.Canny(gray,50,250,apertureSize = 3)
cv2.imwrite("canny.jpg", edges)
#通过霍夫变换得到A4纸边缘
lines = cv2.HoughLinesP(edges,1,np.pi/180,50,minLineLength=90,maxLineGap=10)
#下面输出的四个点分别为四个顶点
for x1,y1,x2,y2 in lines[0]:
print(x1,y1),(x2,y2)
for x1,y1,x2,y2 in lines[1]:
print(x1,y1),(x2,y2)
#绘制边缘
for x1,y1,x2,y2 in lines[0]:
cv2.line(gray, (x1,y1), (x2,y2), (0,0,255), 1)
#根据四个顶点设置图像透视变换矩阵
pos1 = np.float32([[114, 82], [287, 156], [8, 322], [216, 333]])
pos2 = np.float32([[0, 0], [188, 0], [0, 262], [188, 262]])
M = cv2.getPerspectiveTransform(pos1, pos2)
#图像透视变换
result = cv2.warpPerspective(src, M, (190, 272))
#显示图像
cv2.imshow("original", src)
cv2.imshow("result", result)
#等待显示
cv2.waitKey(0)
cv2.destroyAllWindows()
| 1,063 | 683 |
from os import listdir
from os.path import isfile, join
from videoprops import get_video_properties
import math
import subprocess
import time
input_path = 'input/Test_Set/'
output_path = 'output/Test_Set/'
files = [f for f in listdir(input_path) if isfile(join(input_path, f))]
for file in files:
input_full_path = input_path + file
output_full_path = output_path + file[:-4]
props = get_video_properties(input_full_path)
print(f'''Resolution: {props['width']}×{props['height']}''')
new_w = 346
new_h = math.ceil((new_w * props['height'])/props['width'])
print(f'''New resolution: {new_w}×{new_h}''')
start_time = time.time()
subprocess.call('./run_v2e.sh ' + input_full_path + ' ' + output_full_path + ' ' + str(new_w) + ' ' + str(new_h), shell=True)
print("--- %s seconds ---" % (time.time() - start_time))
print('\n')
| 846 | 326 |
#!/usr/bin/python
config_exp = '''
robots:
1:
id: 1
ip: 192.168.0.1
macaddress: ec:08:6b:0d:68:ef
cable_ip: 150.164.212.43
configs:
map: DIR_/map/ambiente.png
treefile: DIR_/steinerData1.dat
resolution: 0.05
exit: 1
simulation: 1
broadcast_address: 127.255.255.255
algorithm_port: 39988
configuration_port: 46544
'''
import sys
import os
import math
class Create():
def create(self, dir_):
with open("config_sim.yaml", 'w') as f:
f.write(config_exp.replace('DIR_', dir_))
if __name__ == "__main__":
dir_ = sys.argv[1]
exp = Create()
exp.create(dir_)
| 610 | 289 |
import pytest
from graphql_relay import to_global_id
from rest_framework.utils import json
@pytest.mark.django_db()
def test_node_permission_classes_without_authentication(book_factory, graphql_client):
book = book_factory()
response = graphql_client.execute(
"""
query BookAsAdmin($id: ID!) {
bookAsAdmin(id: $id) {
id
}
}""",
variables={"id": to_global_id("BookType", book.pk)},
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {"bookAsAdmin": None},
"errors": [
{
"locations": [{"column": 13, "line": 3}],
"message": "You do not have permission to perform this action.",
"path": ["bookAsAdmin"],
}
],
}
@pytest.mark.django_db()
def test_node_permission_classes_without_permission(
user_factory, book_factory, graphql_client
):
user = user_factory()
book = book_factory()
graphql_client.force_authenticate(user)
response = graphql_client.execute(
"""
query BookAsAdmin($id: ID!) {
bookAsAdmin(id: $id) {
id
}
}""",
variables={"id": to_global_id("BookType", book.pk)},
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {"bookAsAdmin": None},
"errors": [
{
"locations": [{"column": 13, "line": 3}],
"message": "You do not have permission to perform this action.",
"path": ["bookAsAdmin"],
}
],
}
@pytest.mark.django_db()
def test_node_permission_classes_with_permission(
user_factory, book_factory, graphql_client
):
user = user_factory(is_staff=True)
book = book_factory()
graphql_client.force_authenticate(user)
response = graphql_client.execute(
"""
query BookAsAdmin($id: ID!) {
bookAsAdmin(id: $id) {
id
}
}""",
variables={"id": to_global_id("BookType", book.pk)},
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {"bookAsAdmin": {"id": to_global_id("BookType", book.pk)}}
}
| 2,309 | 708 |
from __future__ import with_statement
import datetime
import firebirdsql
from firebirdsql.tests.base import * # noqa
from firebirdsql.consts import * # noqa
class TestProc(TestBase):
def setUp(self):
TestBase.setUp(self)
cur = self.connection.cursor()
cur.execute('''
CREATE TABLE foo_table (
a INTEGER NOT NULL,
b VARCHAR(30) NOT NULL UNIQUE,
c VARCHAR(1024),
d DECIMAL(16,3) DEFAULT -0.123,
e DATE DEFAULT '1967-08-11',
f TIMESTAMP DEFAULT '1967-08-11 23:45:01',
g TIME DEFAULT '23:45:01',
h BLOB SUB_TYPE 1,
i DOUBLE PRECISION DEFAULT 0.0,
j FLOAT DEFAULT 0.0,
PRIMARY KEY (a),
CONSTRAINT CHECK_A CHECK (a <> 0)
)
''')
cur.execute('''
CREATE PROCEDURE foo_proc
RETURNS (out1 INTEGER, out2 VARCHAR(30))
AS
BEGIN
out1 = 1;
out2 = 'ABC';
END
''')
cur.execute('''
CREATE PROCEDURE bar_proc (param_a INTEGER, param_b VARCHAR(30))
RETURNS (out1 INTEGER, out2 VARCHAR(30))
AS
BEGIN
out1 = param_a;
out2 = param_b;
END
''')
cur.execute('''
CREATE PROCEDURE baz_proc(param_a INTEGER)
RETURNS (out1 INTEGER, out2 VARCHAR(30))
AS
BEGIN
SELECT a, b FROM foo_table
WHERE a= :param_a
INTO :out1, :out2;
SUSPEND;
END
''')
self.connection.commit()
# 3 records insert
cur.execute("""
insert into foo_table(a, b, c,h)
values (1, 'a', 'b','This is a memo')""")
cur.execute("""
insert into foo_table(a, b, c, e, g, i, j)
values (2, 'A', 'B', '1999-01-25', '00:00:01', 0.1, 0.1)""")
cur.execute("""
insert into foo_table(a, b, c, e, g, i, j)
values (3, 'X', 'Y', '2001-07-05', '00:01:02', 0.2, 0.2)""")
self.connection.commit()
def test_call_proc(self):
cur = self.connection.cursor()
r = cur.callproc("foo_proc")
self.assertEqual(cur.fetchone(), r)
cur.close()
cur = self.connection.cursor()
try:
rs = cur.execute("select out1, out2 from foo_proc")
if rs is None:
# foo_proc not selectable with Firebird 1.5
pass
else:
pass
except firebirdsql.OperationalError:
# foo_proc not selectable with Firebird 2.x
pass
finally:
cur.close()
cur = self.connection.cursor()
cur.callproc("bar_proc", (1, "ABC"))
rs = cur.fetchallmap()
self.assertEqual(len(rs), 1)
self.assertEqual(rs[0]['OUT1'], 1)
self.assertEqual(rs[0]['OUT2'], 'ABC')
cur.close()
cur = self.connection.cursor()
cur.execute("select out1, out2 from baz_proc(?)", (1, ))
rs = cur.fetchall()
self.assertEqual(len(rs), 1)
self.assertEqual((1, 'a'), rs[0])
cur.close()
def test_insert_returning(self):
cur = self.connection.cursor()
cur.execute("insert into foo_table(a, b) values (4, 'b') returning e")
self.assertEqual(cur.rowcount, 1)
self.assertEqual(cur.fetchone()[0], datetime.date(1967, 8, 11))
cur.close()
def test_prep_insert_returning(self):
cur = self.connection.cursor()
prep = cur.prep("insert into foo_table(a, b) values (?, 'b') returning e")
cur.execute(prep, (5, ))
self.assertEqual(cur.fetchone()[0], datetime.date(1967, 8, 11))
cur.close()
| 3,960 | 1,285 |
"""
Name: Christopher Weaver
Date: 11/07/2021
Project: USW Coursework 2 Remake with Python and MySQL
Additional lib Used: black, pylint, sqlacodegen.
Description: Log in methods associated with log in class.
"""
# coding=utf-8
# Libraries
import pyinputplus as pyin
from log_in_class import LogInObject
from user import User
from base import create_session
from sqlalchemy.orm.exc import NoResultFound
# Functions
def query_user():
# Call create_session() func to local object
session = create_session()
# Query the user
username_input_local = pyin.inputStr(" Insert Search User: ")
our_user = session.query(User).filter_by(username=username_input_local).first()
if our_user:
print("Username Found")
print("\nOur User:")
print("Username: ", our_user.username, "Firstname: ", our_user.firstname)
else:
print("Username Not Found")
print("\nOur User:")
print(our_user)
session.close()
def log_in_user():
check_username_match()
def pin_check(pin_in):
pin_entry = pyin.inputStr("Insert Pin: ")
if pin_entry == pin_in:
print("Pin OK")
return True
print("Pin Bad")
return False
def check_password_match(pw_in):
print(pw_in)
pwd_entry = pyin.inputStr("Insert Password: ")
if pw_in == pwd_entry:
print("Password OK!")
return True
print("BAD PASSWORD!")
return False
def is_logged_in(pin_match, pw_match):
if pin_match & pw_match:
return True
return False
def user_name_entry():
username_l = pyin.inputStr("Insert Username: ")
return username_l
def check_username_match():
# create session
session = create_session()
# User Inputs
unl = user_name_entry()
# generates user object for comparison.
our_user = session.query(User).filter(User.username == unl).one()
# Creating object
print(our_user)
# User Object
user_obj = LogInObject(
username=our_user.username,
password=our_user.password,
firstname=our_user.firstname,
lastname=our_user.lastname,
pin=our_user.pin,
usertype=our_user.usertype,
is_admin=our_user.is_admin,
is_lecturer=our_user.is_lecturer,
is_student=our_user.is_student,
pin_match=False,
logged_in=False,
pw_match=False,
)
try:
if (
our_user
and check_password_match(our_user.password)
and pin_check(our_user.pin)
):
print("Username Found")
# debugging
print("Username: ", our_user.username, "Firstname: ", our_user.firstname)
print("Password Match")
print("Pin match")
# set condition args to true
user_obj.pw_match = True
user_obj.pin_match = True
user_obj.logged_in = is_logged_in(user_obj.pin_match, user_obj.pw_match)
print(user_obj)
return user_obj
else:
print("Username Not Found")
print("\nOur User:")
print(our_user)
except NoResultFound as e:
print(e)
| 3,149 | 1,000 |
from utils.api import APIView
from utils.decorators import login_required
from course.models import Course, Registration
from ..models import Assignment
from ..serializers import AssignmentSerializer
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
class AssignmentAPI(APIView):
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="course_id",
in_=openapi.IN_QUERY,
description="Unique ID of a course",
required=True,
type=openapi.TYPE_INTEGER,
),
openapi.Parameter(
name="assignment_id",
in_=openapi.IN_QUERY,
description="Unique ID of a assignment",
type=openapi.TYPE_INTEGER,
),
openapi.Parameter(
name="limit",
in_=openapi.IN_QUERY,
description="Number of assignments to show",
type=openapi.TYPE_STRING,
default=10,
),
openapi.Parameter(
name="offset",
in_=openapi.IN_QUERY,
description="ID of the first assignment of list",
type=openapi.TYPE_STRING,
default=0,
),
],
operation_description="Get assignment list of the course",
responses={200: AssignmentSerializer},
)
@login_required
def get(self, request):
assignment_id = request.GET.get("assignment_id")
course_id = request.GET.get("course_id")
if not course_id:
return self.error("Invalid parameter, course_id is required")
try:
Course.objects.get(id=course_id)
Registration.objects.get(user_id=request.user.id, course_id=course_id)
except Course.DoesNotExist:
return self.error("Course does not exist")
except Registration.DoesNotExist:
return self.error("Invalid access, not registered user")
context = {"request": request}
if assignment_id:
try:
assignment = Assignment.objects.get(id=assignment_id, course_id=course_id, visible=True)
return self.success(AssignmentSerializer(assignment, context=context).data)
except Assignment.DoesNotExist:
return self.error("Assignment does not exists")
assignments = Assignment.objects.filter(course_id=course_id, visible=True)
return self.success(self.paginate_data(request, assignments, AssignmentSerializer, context))
| 2,630 | 684 |
# generated by ModelBuilder
from scielo_migration.iid2json.meta_record import MetaRecord
# generated by ModelBuilder
class BaseArticleRecord(MetaRecord):
def __init__(
self, record, multi_val_tags=None,
data_dictionary=None):
super().__init__(
record, multi_val_tags, data_dictionary)
# generated by ModelBuilder
@property
def fulltexts(self):
"""
Fulltexts
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def html_url(self):
"""
Html Url
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def is_ahead_of_print(self):
"""
Is Ahead Of Print
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def issue(self):
"""
Issue
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def issue_label(self):
"""
Issue Label
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def issue_url(self):
"""
Issue Url
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def journal(self):
"""
Journal
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def mixed_affiliations(self):
"""
Mixed Affiliations
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def pdf_url(self):
"""
Pdf Url
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def translated_htmls(self):
"""
Translated Htmls
v000
"""
return self.get_field_content("v000", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def assets_code(self):
"""
Assets Code
v004
"""
return self.get_field_content("v004", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def author(self):
"""
Author
v010 {'1': 'xref', 'k': 'orcid', 'l': 'lattes', 'n': 'given_names', 'p': 'prefix', 'r': 'role', 's': 'surname'}
"""
return self.get_field_content("v010", subfields={'1': 'xref', 'k': 'orcid', 'l': 'lattes', 'n': 'given_names', 'p': 'prefix', 'r': 'role', 's': 'surname'}, single=False, simple=False)
# generated by ModelBuilder
@property
def corporative_authors(self):
"""
Corporative Authors
v011
"""
return self.get_field_content("v011", subfields={}, single=False, simple=True)
# generated by ModelBuilder
@property
def article_titles(self):
"""
Article Titles
v012 {'s': 'subtitle', '_': 'text', 'l': 'language'}
"""
return self.get_field_content("v012", subfields={'s': 'subtitle', '_': 'text', 'l': 'language'}, single=False, simple=False)
# generated by ModelBuilder
@property
def page(self):
"""
Page
v014 {'e': 'elocation', 'f': 'start', 'l': 'end', 's': 'sequence'}
"""
return self.get_field_content("v014", subfields={'e': 'elocation', 'f': 'start', 'l': 'end', 's': 'sequence'}, single=False, simple=False)
# generated by ModelBuilder
@property
def illustrative_material(self):
"""
Illustrative Material
v038
"""
return self.get_field_content("v038", subfields={}, single=False, simple=True)
# generated by ModelBuilder
@property
def original_language(self):
"""
Original Language
v040
"""
return self.get_field_content("v040", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def original_section(self):
"""
Original Section
v049
"""
return self.get_field_content("v049", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def section(self):
"""
Section
v049
"""
return self.get_field_content("v049", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def section_code(self):
"""
Section Code
v049
"""
return self.get_field_content("v049", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def translated_section(self):
"""
Translated Section
v049
"""
return self.get_field_content("v049", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def thesis_degree(self):
"""
Thesis Degree
v051
"""
return self.get_field_content("v051", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def thesis_organization(self):
"""
Thesis Organization
v052
"""
return self.get_field_content("v052", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def project_sponsor(self):
"""
Project Sponsor
v058
"""
return self.get_field_content("v058", subfields={}, single=False, simple=True)
# generated by ModelBuilder
@property
def project_name(self):
"""
Project Name
v059
"""
return self.get_field_content("v059", subfields={}, single=False, simple=True)
# generated by ModelBuilder
@property
def contract(self):
"""
Contract
v060
"""
return self.get_field_content("v060", subfields={}, single=False, simple=True)
# generated by ModelBuilder
@property
def issue_publication_date(self):
"""
Issue Publication Date
v065
"""
return self.get_field_content("v065", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def publication_date(self):
"""
Publication Date
v065
"""
return self.get_field_content("v065", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def affiliations(self):
"""
Affiliations
v070 {'1': 'div1', '2': 'div2', '3': 'div3', '_': 'orgname', 'c': 'city', 'e': 'email', 'i': 'id', 'p': 'country', 's': 'state'}
"""
return self.get_field_content("v070", subfields={'1': 'div1', '2': 'div2', '3': 'div3', '_': 'orgname', 'c': 'city', 'e': 'email', 'i': 'id', 'p': 'country', 's': 'state'}, single=False, simple=False)
# generated by ModelBuilder
@property
def article_type(self):
"""
Article Type
v071
"""
return self.get_field_content("v071", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def document_type(self):
"""
Document Type
v071
"""
return self.get_field_content("v071", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def abstracts(self):
"""
Abstracts
v083 {'a': 'text', 'l': 'language'}
"""
return self.get_field_content("v083", subfields={'a': 'text', 'l': 'language'}, single=False, simple=False)
# generated by ModelBuilder
@property
def keywords(self):
"""
Keywords
v085 {'k': 'text', 'l': 'language'}
"""
return self.get_field_content("v085", subfields={'k': 'text', 'l': 'language'}, single=False, simple=False)
# generated by ModelBuilder
@property
def processing_date(self):
"""
Processing Date
v091
"""
return self.get_field_content("v091", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def update_date(self):
"""
Update Date
v091
"""
return self.get_field_content("v091", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def creation_date(self):
"""
Creation Date
v093
"""
return self.get_field_content("v093", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def receive_date_iso(self):
"""
Receive Date ISO
v112
"""
return self.get_field_content("v112", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def acceptance_date_iso(self):
"""
Acceptance Date ISO
v114
"""
return self.get_field_content("v114", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def review_date_iso(self):
"""
Review Date ISO
v116
"""
return self.get_field_content("v116", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def data_model_version(self):
"""
Data Model Version
v120
"""
return self.get_field_content("v120", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def internal_sequence_id(self):
"""
Internal Sequence Id
v121
"""
return self.get_field_content("v121", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def order(self):
"""
Order
v121
"""
return self.get_field_content("v121", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def ahead_publication_date(self):
"""
Ahead Publication Date
v223
"""
return self.get_field_content("v223", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def document_publication_date(self):
"""
Document Publication Date
v223
"""
return self.get_field_content("v223", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def doi(self):
"""
DOI
v237
"""
return self.get_field_content("v237", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def normalized_affiliations(self):
"""
Normalized Affiliations
v240 {'i': 'id', 'p': 'country'}
"""
return self.get_field_content("v240", subfields={'i': 'id', 'p': 'country'}, single=False, simple=False)
# generated by ModelBuilder
@property
def doi_with_lang(self):
"""
DOI with language
v337 {'d': 'doi', 'l': 'language'}
"""
return self.get_field_content("v337", subfields={'d': 'doi', 'l': 'language'}, single=False, simple=False)
# generated by ModelBuilder
@property
def any_issn(self):
"""
Any Issn
v435 {'_': 'value', 't': 'type'}
"""
return self.get_field_content("v435", subfields={'_': 'value', 't': 'type'}, single=False, simple=False)
# generated by ModelBuilder
@property
def permissions(self):
"""
Permissions
v540
"""
return self.get_field_content("v540", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def languages(self):
"""
Languages
v601
"""
return self.get_field_content("v601", subfields={}, single=False, simple=True)
# generated by ModelBuilder
@property
def xml_languages(self):
"""
Xml Languages
v601
"""
return self.get_field_content("v601", subfields={}, single=False, simple=True)
# generated by ModelBuilder
@property
def scielo_domain(self):
"""
Scielo Domain
v690
"""
return self.get_field_content("v690", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def file_code(self):
"""
File Code
v702
"""
return self.get_field_content("v702", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def original_html(self):
"""
Original Html
v702
"""
return self.get_field_content("v702", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def publisher_id(self):
"""
Publisher Id
v880
"""
return self.get_field_content("v880", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def scielo_pid_v2(self):
"""
SciELO PID v2
v880
"""
return self.get_field_content("v880", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def publisher_ahead_id(self):
"""
Publisher Ahead Id
v881
"""
return self.get_field_content("v881", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def scielo_pid_v3(self):
"""
SciELO PID v3
v885
"""
return self.get_field_content("v885", subfields={}, single=True, simple=True)
# generated by ModelBuilder
@property
def collection_acronym(self):
"""
Collection Acronym
v992
"""
return self.get_field_content("v992", subfields={}, single=True, simple=True)
| 14,380 | 4,445 |
seq = "AACTGAGAC"
def split(sequence):
return [char for char in seq]
word = 'sequence'
seqlist=(split(word))
print("Task 1: разделена секвенция\n",seqlist)
def revetring (sequence):
reversed_seq = seqlist[::-1]
#reversed_seq = ''.join(reversed_seq)
return reversed_seq
print("Task 2: обратна секвенция\n",revetring(seq))
def complimentation (sequence):
compsequence=[]
for x in range(len(seqlist)):
if sequence[x] == "A": compsequence.append("T")
elif sequence[x] == "T": compsequence.append("A")
elif sequence[x] == "C": compsequence.append("G")
else: compsequence.append("C")
#compsequence = ''.join(compsequence)
return compsequence
print("Task 3: комплементарна секвенция\n",complimentation(seq))
compl_rev_seq=revetring(seq)
compl_rev_seq=complimentation(compl_rev_seq)
print("Task 4: комплементарна обратна секвенция\n",compl_rev_seq)
| 913 | 339 |
from .cutom_serializers import HassIoSerializers
from homeassistant.components.http import HomeAssistantView
import homeassistant.core as ha
class FunikiEntitiesView(HomeAssistantView):
url = "/api/funikientites"
name = "api:funiki-entites-list"
requires_auth = False
@ha.callback
def get(self, request):
hass = request.app["hass"]
entity_registry = hass.data['entity_registry'];
return self.json_message(HassIoSerializers.entitySerializers(entityRegistry=entity_registry))
| 521 | 161 |
import configparser
import pip
from isee.common import get_env_var, get_file_path
def install_requires(project_dir=None):
if not project_dir:
project_dir = get_env_var('GITHUB_WORKSPACE')
path = get_file_path('setup.cfg', project_dir)
config = configparser.ConfigParser()
config.read(path)
pkgs = [x for x in config['options']['install_requires'].split('\n') if x]
pip.main(['install'] + pkgs)
def build_dependency_wheels(repository_dir, wheelhouse, requirements_filepath=None):
args = ['wheel', '--wheel-dir', wheelhouse, '--find-links', wheelhouse]
if requirements_filepath:
args.extend(['--requirement', requirements_filepath])
args.extend(['--editable', repository_dir])
pip.main(args)
| 751 | 247 |
"""Constants for the Belkin Wemo component."""
DOMAIN = "wemo"
SERVICE_SET_HUMIDITY = "set_humidity"
SERVICE_RESET_FILTER_LIFE = "reset_filter_life"
WEMO_SUBSCRIPTION_EVENT = f"{DOMAIN}_subscription_event"
| 208 | 85 |
import time as t
s = t.time()
set = [a**b for a in range(2,101) for b in range(2,101)] ; set.sort()
for i in set:
if set.count(i)>1:
for j in range(set.count(i)-1): set.remove(i)
print(len(set))
e = t.time()
print(e-s)
| 244 | 114 |
from model_resnet import *
from demo import *
from utils import *
dim_z = 120
vocab_size = 1000
num_samples = 12 #@param {type:"slider", min:1, max:20, step:1}
truncation = 0.32 #@param {type:"slider", min:0.02, max:1, step:0.02}
noise_seed = 0 #@param {type:"slider", min:0, max:100, step:1}
category = "951"
z = truncated_z_sample(num_samples, truncation, noise_seed)
y = int(951)
# print(z)
feed_dict = sample(z, y, truncation=truncation)
# print(feed_dict['input_y'].shape)
model = Generator(code_dim=120, n_class=1000, chn=6, debug=True)
# inputs = torch.from_numpy(feed_dict['input_z']).float()
# labels = torch.from_numpy(feed_dict['input_y']).float()
# out = model(inputs,labels)
# print(out.size())
# model.apply(weights_init)
print('0,1,2,3'.split(','))
# torch.save(model.state_dict(),'test_model.pth')
| 829 | 369 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .summary_data import SummaryData
class ExtensionSummaryData(SummaryData):
"""ExtensionSummaryData.
:param assigned: Count of Licenses already assigned.
:type assigned: int
:param available: Available Count.
:type available: int
:param included_quantity: Quantity
:type included_quantity: int
:param total: Total Count.
:type total: int
:param assigned_through_subscription: Count of Extension Licenses assigned to users through msdn.
:type assigned_through_subscription: int
:param extension_id: Gallery Id of the Extension
:type extension_id: str
:param extension_name: Friendly name of this extension
:type extension_name: str
:param is_trial_version: Whether its a Trial Version.
:type is_trial_version: bool
:param minimum_license_required: Minimum License Required for the Extension.
:type minimum_license_required: object
:param remaining_trial_days: Days remaining for the Trial to expire.
:type remaining_trial_days: int
:param trial_expiry_date: Date on which the Trial expires.
:type trial_expiry_date: datetime
"""
_attribute_map = {
'assigned': {'key': 'assigned', 'type': 'int'},
'available': {'key': 'available', 'type': 'int'},
'included_quantity': {'key': 'includedQuantity', 'type': 'int'},
'total': {'key': 'total', 'type': 'int'},
'assigned_through_subscription': {'key': 'assignedThroughSubscription', 'type': 'int'},
'extension_id': {'key': 'extensionId', 'type': 'str'},
'extension_name': {'key': 'extensionName', 'type': 'str'},
'is_trial_version': {'key': 'isTrialVersion', 'type': 'bool'},
'minimum_license_required': {'key': 'minimumLicenseRequired', 'type': 'object'},
'remaining_trial_days': {'key': 'remainingTrialDays', 'type': 'int'},
'trial_expiry_date': {'key': 'trialExpiryDate', 'type': 'iso-8601'}
}
def __init__(self, assigned=None, available=None, included_quantity=None, total=None, assigned_through_subscription=None, extension_id=None, extension_name=None, is_trial_version=None, minimum_license_required=None, remaining_trial_days=None, trial_expiry_date=None):
super(ExtensionSummaryData, self).__init__(assigned=assigned, available=available, included_quantity=included_quantity, total=total)
self.assigned_through_subscription = assigned_through_subscription
self.extension_id = extension_id
self.extension_name = extension_name
self.is_trial_version = is_trial_version
self.minimum_license_required = minimum_license_required
self.remaining_trial_days = remaining_trial_days
self.trial_expiry_date = trial_expiry_date
| 3,350 | 959 |
"""
Definition of urls for CreeDictionary.
"""
from django.conf import settings
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, path
from django_js_reverse.views import urls_js
import CreeDictionary.API.views as api_views
from CreeDictionary.CreeDictionary import views
from CreeDictionary.CreeDictionary.sitemaps import sitemaps
# TODO: use URL namespaces:
# e.g., cree-dictionary:index instead of cree-dictionary-index
# See: https://docs.djangoproject.com/en/2.2/topics/http/urls/#url-namespaces
urlpatterns = [
################################# Primary URLs #################################
path("", views.index, name="cree-dictionary-index"),
path("search", views.index, name="cree-dictionary-search"),
# "word" is a user-friendly alternative for the linguistic term "lemma"
path(
"word/<str:lemma_text>/",
views.entry_details,
name="cree-dictionary-index-with-lemma",
),
path("about", views.about, name="cree-dictionary-about"),
path("contact-us", views.contact_us, name="cree-dictionary-contact-us"),
path("query-help", views.query_help, name="cree-dictionary-query-help"),
path("admin/fst-tool", views.fst_tool, name="cree-dictionary-fst-tool"),
################################# Internal API #################################
# internal use to render boxes of search results
path(
"_search_results/<str:query_string>/",
views.search_results,
name="cree-dictionary-search-results",
),
# internal use to render paradigm and only the paradigm
path(
"_paradigm_details/",
views.paradigm_internal,
name="cree-dictionary-paradigm-detail",
),
# POST to this URL to change the display mode:
path(
"_change_display_mode",
views.ChangeDisplayMode.as_view(),
name="cree-dictionary-change-display-mode",
),
# POST to this URL to change the display mode:
path(
"_change_paradigm_label",
views.ChangeParadigmLabelPreference.as_view(),
name="cree-dictionary-change-paradigm-label",
),
################################ Click in text #################################
# cree word translation for click-in-text
path(
"click-in-text/",
api_views.click_in_text,
name="cree-dictionary-word-click-in-text-api",
),
path(
"click-in-text-embedded-test/",
api_views.click_in_text_embedded_test,
name="cree-dictionary-click-in-text-embedded-test",
),
############################## Other applications ##############################
path("admin/", admin.site.urls),
path("search-quality/", include("CreeDictionary.search_quality.urls")),
path("", include("CreeDictionary.morphodict.urls")),
path(
"sitemap.xml",
sitemap,
{"sitemaps": sitemaps},
name="django.contrib.sitemaps.views.sitemap",
),
################################# Special URLS #################################
# Reverse URLs in JavaScript: https://github.com/ierror/django-js-reverse
path("jsreverse", urls_js, name="js_reverse"),
]
if hasattr(settings, "GOOGLE_SITE_VERIFICATION"):
urlpatterns.append(
path(
f"google{settings.GOOGLE_SITE_VERIFICATION}.html",
views.google_site_verification,
)
)
if settings.DEBUG:
# saves the need to `manage.py collectstatic` in development
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG and settings.ENABLE_DJANGO_DEBUG_TOOLBAR:
import debug_toolbar
# necessary for debug_toolbar to work
urlpatterns.append(path("__debug__/", include(debug_toolbar.urls)))
| 3,816 | 1,151 |
import datetime
class Token():
_token_string = ''
_expiry_date = None
def __init__(self, token_string):
self._token_string = token_string
""" Expire after 10 min """
self._expiry_date = datetime.datetime.now() + datetime.timedelta(0,60*10)
def __str__(self):
return _token_string
def get_token(self):
if not self._is_expired():
return self._token_string
else:
raise TokenExpiredError()
def _is_expired(self):
return datetime.datetime.now() >= self._expiry_date
def to_json(self):
return '{"token":"' + self._token_string + '", "expiry":"' + str(self._expiry_date) + '"}'
class TokenExpiredError(Exception):
""" Raised on token expiration """
| 679 | 257 |
"""A setuptools based setup module.
See:
https://github.com/reecechimento/python-acelerate
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / 'README.md').read_text(encoding='utf-8')
setup(
name='ENTER_PKGNAME', # Required
version='ENTER_VERSION', # Required
description='ENTER_DESCRIPTION', # NOTE: Optional
long_description=long_description, # NOTE: Optional
long_description_content_type='text/markdown', # NOTE: 'text/plain' | 'text/rst' | 'text/markdown'
url='ENTER_GITURL', # NOTE: Optional
author='Chimento, Reece',
author_email='reecechimento@gmail.com',
classifiers=[
# How mature is this project? | 3 - Alpha | 4 - Beta | 5 Production/Stable
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.10 :: Only', # NOTE: Specifies the python versions you support.
],
# NOTE: This field adds keywords for your project which will appear on
# the project page. What does your project relate to?
keywords='engineering,electrical,energy-storage,test-engineering',
# NOTE: When your source code is in a subdirectory under the project
# root, e.g. `src/`, it is necessary to specifty the `package_dir`
# argument.
package_dir={'': 'src'}, # Optional
# You can just specify your package directories manually here if your
# project is simple.
# NOTE: OTHERWISE you can use find_packages().
# NOTE: Alternatively, if you just want to distribute a single Python
# file, use the `py_modules` argument instead as follows, which will
# expect a file called `my_modeule.py` to exist:
# py_modules=["my_module"],
packages=find_packages(where='src'), # WARN: Required
# WARN: Specify which Python versions you support. `pip install` will check this
python_requires='>=3.10, <4',
# WARN: `install_requires` specifies what a project *minimally* needs to
# run correctly.
# NOTE: This is the specification that is used to install its
# dependencies.
install_requires=[
'aiohttp',
'ruamel.yaml'
],
# List of additional groups of dependencies (e.g. development dependencies)
# $ pip install python-acelerate[dev]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are any data files included in your packages that need to be
# installed, specify them here.
package_data={
'config': ['init.yml'],
},
# Although 'package_data' is preferred approace, in some cases you may
# need to place data files outside of your packages. See:
# https://docs.python.org/distutils/setupscript.html # installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.previx>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and
# allow `pip` to create the appropriate form of executable for the
# target platform
#
#
# For example, the following would provide a command called `acelerate` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'main = ENTER_PKGNAME:__INIT__.PY_FUNCTION', # NOTE: hook the __init__.py method in main()
],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/ # project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'ENTER_GITHUB_BUGREPORTS',
'Source': 'ENTER_GITHUB_URL',
},
)
| 5,406 | 1,327 |
# Model taken from https://arxiv.org/pdf/1810.08647.pdf,
# INTRINSIC SOCIAL MOTIVATION VIA CAUSAL
# INFLUENCE IN MULTI-AGENT RL
# model is a single convolutional layer with a kernel of size 3, stride of size 1, and 6 output
# channels. This is connected to two fully connected layers of size 32 each
import tensorflow as tf
from ray.rllib.models.misc import normc_initializer, flatten
from ray.rllib.models.model import Model
import tensorflow.contrib.slim as slim
class ConvToFCNet(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
inputs = input_dict["obs"]
smoothed_rews = None
if isinstance(inputs, list):
smoothed_rews = inputs[1]
inputs = inputs[0]
hiddens = [32, 32]
with tf.name_scope("custom_net"):
inputs = slim.conv2d(
inputs,
6,
[3, 3],
1,
activation_fn=tf.nn.relu,
scope="conv")
last_layer = flatten(inputs)
i = 1
for size in hiddens:
label = "fc{}".format(i)
last_layer = slim.fully_connected(
last_layer,
size,
weights_initializer=normc_initializer(1.0),
activation_fn=tf.nn.relu,
scope=label)
i += 1
output = slim.fully_connected(
last_layer,
num_outputs,
weights_initializer=normc_initializer(0.01),
activation_fn=None,
scope="fc_out")
if smoothed_rews is not None:
output = tf.concat([output, smoothed_rews], axis=-1)
return output, last_layer
class ConvToFCNetLarge(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
inputs = input_dict["obs"]
# print("TYPE-IS")
# print(type(inputs))
# print(inputs)
# # print(inputs.shape)
# import sys
# sys.stdout.flush()
smoothed_rews = None
if isinstance(inputs, list):
smoothed_rews = inputs[1]
inputs = inputs[0]
# inputs = input_dict["obs"][0]
# print(inputs)
# print(inputs.shape)
# sys.stdout.flush()
hiddens = [64, 64]
with tf.name_scope("custom_net"):
inputs = slim.conv2d(
inputs,
16,
[3, 3],
1,
activation_fn=tf.nn.relu,
scope="conv")
last_layer = flatten(inputs)
# last_layer = tf.concat([last_layer, smoothed_rews], axis=-1)
i = 1
for size in hiddens:
label = "fc{}".format(i)
last_layer = slim.fully_connected(
last_layer,
size,
weights_initializer=normc_initializer(1.0),
activation_fn=tf.nn.relu,
scope=label)
i += 1
output = slim.fully_connected(
last_layer,
num_outputs,
weights_initializer=normc_initializer(0.01),
activation_fn=None,
scope="fc_out")
# print(output)
# print(output.shape)
# sys.stdout.flush()
if smoothed_rews is not None:
output = tf.concat([output, smoothed_rews], axis=-1)
# print(output)
# print(output.shape)
# print("NEW OUTPUT")
# print(output)
# print(output.shape)
#
# print(last_layer)
# sys.stdout.flush()
return output, last_layer
| 3,812 | 1,133 |
import numpy as np
from utils.visualize import view_trajectory
DEFAULT_SCALE = 1.0
def load_trajectory(file_name: str, num_steps: int, save_results: bool = True) -> np.ndarray:
"""Load paited trajectory and pre-processing
Args:
file_name (string): [description]
num_steps (int): [description]
Returns:
trajectory with coordinates in shape (num_steps, 2)
"""
trajectory = np.load(file_name).astype(np.float32)
# 1. normalization
w, h = (trajectory.max(axis=0) - trajectory.min(axis=0)).tolist()
ratio = w / h
if ratio >= 1:
trajectory = (trajectory - trajectory.min(axis=0)) / w
scales = np.array([[DEFAULT_SCALE, h / w]])
else:
trajectory = (trajectory - trajectory.min(axis=0)) / h
scales = np.array([[w / h, DEFAULT_SCALE]])
# [-DEFAULT_SCALE, DEFAULT_SCALE], ratio kepted
trajectory = (trajectory - scales / 2) * 2 * DEFAULT_SCALE
if save_results:
view_trajectory(trajectory, title="original_trajectory")
# 2. resampling / interpolation
# Human hardly draw the curve in the constant speed, so a rough resampling (linear interpolation)
# should be used for pre-processing
margin_right = sorted(np.where(trajectory[:, 0] == scales[0, 0])[0])
margin_left = sorted(np.where(trajectory[:, 0] == -scales[0, 0])[0])
assert len(margin_right) >= 1 and len(margin_left) >= 1
ts = np.linspace(0, 1, num_steps)
xs = scales[0, 0] * np.sin(2 * np.pi * ts)
ys = np.zeros_like(xs)
# from left to right
ids = np.arange(num_steps // 2 + 1) - num_steps // 4
ids_traj = np.arange(trajectory.shape[0] - margin_left[0] + margin_right[-1]) - (
trajectory.shape[0] - margin_left[0]
)
ys[ids] = np.interp(xs[ids], trajectory[ids_traj, 0], trajectory[ids_traj, 1])
# from right to left
ys[-num_steps // 4 - 1 : num_steps // 4 : -1] = np.interp(
xs[-num_steps // 4 - 1 : num_steps // 4 : -1],
trajectory[margin_left[-1] : margin_right[0] : -1, 0],
trajectory[margin_left[-1] : margin_right[0] : -1, 1],
)
trajectory_interp = np.zeros([num_steps, 2])
trajectory_interp[:, 0], trajectory_interp[:, 1] = xs, ys
if save_results:
view_trajectory(trajectory_interp, title="interp_trajectory")
return trajectory_interp
| 2,348 | 889 |
from fastapi import APIRouter
from ....core.jobs import JobBase
router = APIRouter()
@router.get("")
async def get_job_classes():
return [j.meta_info() for j in JobBase.__subclasses__()]
| 193 | 63 |
"""
115. 不同的子序列
https://leetcode-cn.com/problems/distinct-subsequences/
"""
def numDistinct(s: str, t: str):
m, n = len(s), len(t)
if n > m:
return 0
dp = [[0] * (n+1) for _ in range(m+1)]
for i in range(m+1):
dp[i][n] = 1
for i in range(m-1, -1, -1):
for j in range(n-1, -1, -1):
if s[i] == t[j]:
dp[i][j] = dp[i+1][j+1] + dp[i+1][j]
else:
dp[i][j] = dp[i+1][j]
return dp[0][0]
def numDistinctOther(s: str, t: str):
m, n = len(s), len(t)
if n > m:
return 0
dp = [[0] * (n+1) for _ in range(m+1)]
for i in range(m+1):
dp[i][0] = 1
for i in range(1, n+1):
for j in range(i, m+1):
if t[i-1] == s[j-1]:
dp[j][i] = dp[j-1][i-1] + dp[j-1][i]
else:
dp[j][i] = dp[j-1][i]
return dp[m][n]
"""
babgbag
bag
"""
print("numDistinct: ", numDistinctOther("rabbbit", "rabbit")) # 3
print("numDistinct: ", numDistinctOther("babgbag", "bag")) # 5
print("numDistinct: ", numDistinctOther("babb", "bbb")) # 1 | 1,110 | 525 |
from django.contrib.auth.backends import ModelBackend
from hub.models import OneallToken
class OneallBackend(ModelBackend):
def authenticate(self, oneall_token=None):
try:
token = OneallToken.objects.get(token=oneall_token)
except OneallToken.DoesNotExist:
return None
else:
return token.user
| 360 | 102 |
#!/usr/bin/env python
"""
update_dreqs_0183.py
ESGF AttributeUpdate
Called from a Rose suite to update the checksums in MPI AMIP files that have
already been updated with the first version that didn't preserve checksums.
"""
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import argparse
import logging.config
import os
import sys
import django
django.setup()
from pdata_app.models import Checksum, DataRequest, TapeChecksum
from pdata_app.utils.common import adler32
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('request_id', help='to request id to update')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
model, expt, var_lab, table, var = args.request_id.split('_')
if model == 'MPIESM-1-2-HR':
new_model = 'MPI-ESM1-2-HR'
elif model == 'MPIESM-1-2-XR':
new_model = 'MPI-ESM1-2-XR'
else:
raise ValueError('Unknown source_id {}'.format(model))
dreq = DataRequest.objects.get(
climate_model__short_name=new_model,
experiment__short_name=expt,
rip_code=var_lab,
variable_request__table_name=table,
variable_request__cmor_name=var
)
logger.debug('DataRequest is {}'.format(dreq))
for data_file in dreq.datafile_set.order_by('name'):
logger.debug('Processing {}'.format(data_file.name))
file_path = os.path.join(data_file.directory, data_file.name)
cs = data_file.checksum_set.first()
if not cs:
logger.error('No checksum for {}'.format(data_file.name))
else:
TapeChecksum.objects.create(
data_file=data_file,
checksum_value=cs.checksum_value,
checksum_type=cs.checksum_type
)
# Remove the original checksum now that the tape checksum's
# been created
cs.delete()
Checksum.objects.create(
data_file=data_file,
checksum_type='ADLER32',
checksum_value=adler32(file_path)
)
# Update the file's size
data_file.tape_size = data_file.size
data_file.size = os.path.getsize(file_path)
# Save all of the changes
data_file.save()
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
| 3,902 | 1,206 |
from discord.ext import commands
from Util import Configuration
def is_owner():
async def predicate(ctx):
return ctx.bot.is_owner(ctx.author)
return commands.check(predicate)
def is_trusted(ctx):
return is_user("TRUSTED", ctx) or is_mod(ctx)
def is_mod(ctx:commands.Context):
return is_user("MOD", ctx) or (hasattr(ctx.author, "roles") and ctx.channel.permissions_for(ctx.author).ban_members) or is_admin(ctx)
def is_admin(ctx:commands.Context):
return is_user("ADMIN", ctx) or (hasattr(ctx.author, "roles") and ctx.channel.permissions_for(ctx.author).administrator)
def is_server_owner(ctx):
return ctx.guild is not None and ctx.author == ctx.guild.owner
def is_user(perm_type, ctx):
if ctx.guild is None:
return False
if not hasattr(ctx.author, "roles"):
return False
roles = Configuration.getConfigVar(ctx.guild.id, f"{perm_type}_ROLES")
for role in ctx.author.roles:
if role.id in roles:
return True
return False
def mod_only():
async def predicate(ctx):
return is_mod(ctx)
return commands.check(predicate)
def is_dev(ctx:commands.Context):
if ctx.guild is None:
return False
devrole = Configuration.getConfigVar(ctx.guild.id, "DEV_ROLE")
if devrole != 0:
for role in ctx.author.roles:
if role.id == devrole:
return True
return is_admin(ctx)
def devOnly():
async def predicate(ctx):
return is_dev(ctx)
return commands.check(predicate)
def is_server(ctx, id):
return ctx.guild is not None and ctx.guild.id == id
def bc_only():
async def predicate(ctx):
return is_server(ctx, 309218657798455298)
return commands.check(predicate)
def no_testers():
async def predicate(ctx):
return not is_server(ctx, 197038439483310086)
return commands.check(predicate)
def check_permission(ctx:commands.Context, default):
name = ctx.command.qualified_name.split(" ")[0]
if ctx.guild is None:
return default
command_overrides = Configuration.getConfigVar(ctx.guild.id, "COMMAND_OVERRIDES")
cog_overrides = Configuration.getConfigVar(ctx.guild.id, "COG_OVERRIDES")
cog_name = type(ctx.cog).__name__
if name in command_overrides:
return check_perm_lvl(ctx, command_overrides[name])
elif cog_name in cog_overrides:
return check_perm_lvl(ctx, cog_overrides[cog_name])
else:
return default
def public(ctx):
return True
def disabled(ctx):
return False
perm_checks = [
public,
is_trusted,
is_mod,
is_admin,
is_server_owner,
disabled
]
def check_perm_lvl(ctx, lvl):
return perm_checks[lvl](ctx)
| 2,709 | 942 |
from data_cfg import project_dataset
from tensorboardX import SummaryWriter
from utils import init_folder
from options import ProjectOptions
from model import create_model
import time
import os
import torch
if __name__ == '__main__':
opt=ProjectOptions().get_opt()
ProjectOptions.print_options(opt)
nets_path=os.path.join(opt.checkpoints_dir,opt.model+'_'+opt.name)
init_folder(nets_path,opt.im_save_dir)
data_set = project_dataset(opt)
data_loader = torch.utils.data.DataLoader(data_set, batch_size=opt.batch_size, shuffle=True)
length=len(data_loader)
model=create_model(opt)
model.load_networks()
if opt.print_net:
model.print_networks()
if opt.phase=='train':
log_dir=os.path.join('./log',opt.model+'_'+opt.name)
init_folder( './log')
print('Start training....')
writer = SummaryWriter(log_dir)
for e in range(opt.epochs):
epoch=e+1
model.clear_sumloss()
for i,data in enumerate(data_loader,0):
model.set_input(data)
model.train()
model.write(writer,e*length+i+1)
if (i+1)%opt.print_freq==0:
model.print_loss(opt.name,epoch,i,length)
if (e*length+i+1)%opt.im_save_freq==0:
print('save img')
model.save_results()
if epoch%opt.net_save_freq==0:
model.save_networks()
print('Training over')
if opt.phase=='test':
print('Current: ', time.asctime(time.localtime(time.time())))
print('Start testing')
t=0.0
l=0.0
for i,data in enumerate(data_loader,0):
l+=data['test'].size()[0]
model.set_input(data)
f1t=time.time()
model.forward()
f2t=time.time()
t+=(f2t-f1t)
model.save_results()
print('Testing over')
print('Current: ', time.asctime(time.localtime(time.time())))
print('Average inference latency for one frame: %.4fs'%(t/l))
| 2,149 | 712 |
import pandas
from openpyxl import load_workbook
from openpyxl.styles import Font
df1 = pandas.read_excel('data/shifts.xlsx', sheet_name='Sheet')
df2 = pandas.read_excel('data/shifts.xlsx', sheet_name='Sheet1')
df3 = pandas.read_excel('data/shift_3.xlsx')
df_all = pandas.concat([df1, df2, df3], sort=False)
to_excel = df_all.to_excel('output/allshifts.xlsx', index=None)
wb = load_workbook('output/allshifts.xlsx')
ws = wb.active
total_col = ws['G1']
total_col.font = Font(bold=True)
total_col.value = 'Total'
e_col, f_col = ['E', 'F']
for row in range(2, 300):
result_cell = 'G{}'.format(row)
e_value = ws[e_col + str(row)].value
f_value = ws[f_col + str(row)].value
ws[result_cell] = e_value * f_value
wb.save('output/totalled.xlsx')
| 752 | 312 |
import pytest
import pandas as pd
pytest.importorskip('clickhouse_driver')
pytestmark = pytest.mark.clickhouse
def test_column_types(alltypes):
df = alltypes.execute()
assert df.tinyint_col.dtype.name == 'int8'
assert df.smallint_col.dtype.name == 'int16'
assert df.int_col.dtype.name == 'int32'
assert df.bigint_col.dtype.name == 'int64'
assert df.float_col.dtype.name == 'float32'
assert df.double_col.dtype.name == 'float64'
assert pd.core.common.is_datetime64_dtype(df.timestamp_col.dtype)
| 530 | 202 |
import fractal
class Mandelbrot(fractal.Fractal):
__slots__ = ('__w', '__h', '__max_iter', '__grid')
def __init__(self, re_start, re_end, im_start, im_end, max_iter=100, w=600, h=400):
self.__max_iter = max_iter
self.__w = w
self.__h = h
self.__grid = self.make_grid(re_start, re_end, im_start, im_end)
def __compute(self, c):
z = 0
n = 0
while abs(z) <= 4 and n < self.__max_iter:
z = z*z + c
n += 1
return n
def evaluate(self):
import numpy as np
compute_v = np.vectorize(self.__compute)
return compute_v(self.__grid)
def get_color(self, pt):
# Smooth coloring scheme, others exist.
hue = int(255 * pt / self.__max_iter)
saturation = 255
value = 255 if pt < self.__max_iter else 0
return hue, saturation, value
@property
def height(self):
return self.__h
@property
def width(self):
return self.__w
def make_image(self, evaluated):
from PIL import Image, ImageDraw
im = Image.new('HSV', (self.__w, self.__h), (0, 0, 0))
draw = ImageDraw.Draw(im)
for i in range(self.__w):
for j in range(self.__h):
img_pt = evaluated[i, j]
color = self.get_color(img_pt)
draw.point([i, j], color)
return im.convert('RGB')
class Newton(fractal.Fractal):
__slots__ = ('__re_start', '__re_end', '__im_start', '__im_end', '__w', '__h',
'__max_err', '__max_iter', '__decimals', '__grid', '__palette', '__color_dict')
def __init__(self, re_start, re_end, im_start, im_end, palette, w=600, h=400,
max_err=1e-5, max_iter=1e4, decimals=8):
self.__re_start = re_start
self.__re_end = re_end
self.__im_start = im_start
self.__im_end = im_end
self.__w = w
self.__h = h
self.__max_err = max_err
self.__max_iter = max_iter
self.__decimals = decimals
self.__grid = self.make_grid(re_start, re_end, im_start, im_end)
self.__palette = palette
self.__color_dict = {}
def __compute(self, c, func, func_der):
f_c = func(c)
count = 0
output = c
while abs(f_c.real) >= self.__max_err or abs(f_c.imag) >= self.__max_err:
f_prime_c = func_der(c)
if f_prime_c == 0:
output = c
break
c -= f_c / f_prime_c
f_c = func(c)
count += 1
if count >= self.__max_iter:
# Algorithm did not converge, input default val which should be outside of normal evaluation ranges.
output = -1e5
break
output = c
return complex(round(output, self.__decimals), 0) if isinstance(output, float) else \
complex(round(output.real, self.__decimals), round(output.imag, self.__decimals))
def evaluate(self, func, func_der):
import warnings
import numpy as np
compute_v = np.vectorize(self.__compute)
computed = compute_v(self.__grid, func, func_der)
roots = np.unique(computed.flatten())
roots_len = len(roots)
palette_len = len(self.__palette)
if palette_len < roots_len:
print(roots)
pad_len = roots_len - palette_len
self.__palette += ['#000000'] * pad_len
warnings.warn(f'Palette provided had length {palette_len}, but there were {roots_len} roots. ' +
f'Palette was padded with {pad_len} times black.')
# If the algorithm didn't converge the point will be colored black
self.__color_dict = {roots[i]: '#000000' if roots[i].real == -1e5 else self.__palette[i] for i in range(roots_len)}
return computed
def get_color(self, pt):
return self.__color_dict[pt]
def get_root_adjacent_pts(self, nr_pts=5):
"""
Method will return the pixel coordinates of the points nearest to each of the computed roots from __color_dict
:param nr_pts: Pixel window around the pixel nearest to the root.
:return: A list of lists, each representing a pixel coordinate.
"""
re_step = (self.__re_end - self.__re_start) / self.__w
im_step = (self.__im_end - self.__im_start) / self.__h
iter_range = range(- nr_pts//2, 1 + nr_pts//2)
out = []
for root in self.__color_dict:
out += self.__nearest_pts(root, re_step, im_step, iter_range)
return out
def __nearest_pts(self, root, re_step, im_step, iter_range):
re_nearest = (root.real - self.__re_start) // re_step
im_nearest = (root.imag - self.__im_start) // im_step
return [[re_nearest + i, im_nearest + j] for i in iter_range for j in iter_range]
@property
def height(self):
return self.__h
@property
def width(self):
return self.__w
def func(x):
# return x ** 5 - 3j * x**3 - (5 + 2j) + x
# return x ** 5 - 3j * x**3 - (5 + 2j) * x ** 2 + 3*x + 1
return x**3 - 2*x + 2
# return x**3 - 1
def func_der(x):
# return 5 * x ** 4 - 9j * x**2 + 1
# return 5 * x ** 4 - 9j * x**2 - 10 * x - 4j * x + 3
return 3 * x**2 - 2
# return 3 * x**2
if __name__ == '__main__':
# Blue hueues: ['#023E8A', '#0077B6', '#90E0EF', '#CAF0F8', '#03045E']
from PIL import Image, ImageDraw, ImageColor
# mdb = Mandelbrot(-2, 1, -1, 1, w=int(1024 * 3/2), h=1024)
# evaluated = mdb.evaluate()
# im = mdb.make_image(evaluated)
# im.save('images/mdb.jpg', 'JPEG')
newt = Newton(-4, 4, -4, 4, w=600, h=600, max_err=1e-10, max_iter=1e2, decimals=9,
palette=['#023E8A', '#0077B6', '#90E0EF', '#CAF0F8', '#03045E'])
evaluated = newt.evaluate(func, func_der)
near_roots = newt.get_root_adjacent_pts()
im = newt.make_image(evaluated)
draw = ImageDraw.Draw(im)
for pt in near_roots:
draw.point(pt, ImageColor.getrgb("#FF0000"))
im.save('images/cubic_w_roots.jpg', 'JPEG')
| 6,141 | 2,224 |
"""
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
To represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the linked list where tail connects to.
If pos is -1, then there is no cycle in the linked list.
Note: Do not modify the linked list.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: tail connects to node index 1
Explanation: There is a cycle in the linked list, where tail connects to the second node.
Example 2:
Input: head = [1,2], pos = 0
Output: tail connects to node index 0
Explanation: There is a cycle in the linked list, where tail connects to the first node.
Example 3:
Input: head = [1], pos = -1
Output: no cycle
Explanation: There is no cycle in the linked list.
Follow-up:
Can you solve it without using extra space?
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
# 定义一个慢指针和一个快指针
slow, fast = head, head
while True:
# 如果快指针到头了,则说明链表中没有环
if not fast or not fast.next:
return None
# 慢指针每次前进一步
slow = slow.next
# 快指针每次前进两步
fast = fast.next.next
# 如果快指针赶上了慢指针,那就说明链表中有环,而且fast - slow = nb(b为环的长度)
# 而fast = 2 * slow ,因此此时slow走了nb步,其中n未定,但为一个正整数。
if slow == fast:
break
# 让fast从head开始
fast = head
# 当fast和slow再次相遇时,fast应该走了a步到达环的开始,slow走了a+nb步也到了环的开始,此时两个指针所在的结点就是所求。
# 其中a为从头结点到环的开始的长度。
while slow != fast:
slow = slow.next
fast = fast.next
return fast
"""
思路:快慢指针法,当快慢指针第一次相遇时,那就说明链表中有环,而且fast - slow = nb(b为环的长度)。而fast = 2 * slow ,因此此时slow走了nb步,其中n未定,但为一个正整数。让fast从head开始,
并且fast也改为每次走一步,这样当fast和slow再次相遇时,fast应该走了a步到达环的开始,slow走了a+nb步也到了环的开始,此时两个指针所在的结点就是所求(其中a为从头结点到环的开始的长度)。算法时间
复杂度为O(n),因为第二次相遇中,慢指针须走步数 a<a+b;第一次相遇中,慢指针须走步数 a+b−x<a+b,其中x为双指针重合点与环入口距离;因此总体为线性复杂度。空间复杂度为O(1),因为双指针使用常数大
小的额外空间。
"""
| 2,204 | 1,134 |
# -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "77777777777777777777777777777777777777777777777777"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
# Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'postgres', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': 'postgres', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
}
# ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"user",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"ajax_datatable",
]
AUTH_USER_MODEL = "user.TestUser"
SITE_ID = 1
MIDDLEWARE = ()
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
},
]
| 1,161 | 442 |
A = list(map(int, input().split()))
A.sort()
if A[2] - A[1] == A[1] - A[0]:
print('Yes')
else:
print('No')
| 117 | 57 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from app.models import AreaCodeLookup
from pyzipcode import ZipCodeDatabase
from app import db
import us
from geopy.geocoders import Nominatim
from easydict import EasyDict as edict
print("starting webdriver")
driver = webdriver.Firefox()
print("getting webpage")
driver.get("https://www.allareacodes.com/")
result = driver.find_elements(By.XPATH, "//select[@style='width: 100%; margin-right: 2px']")
area_code_and_place = result[0].text.split("\n")
prefixes = [
"New", "Los", "San", "Baton", "Fort",
"Bowling", "Lake", "Grand", "Saint",
"Charlotte"
]
zcdb = ZipCodeDatabase()
geolocator = Nominatim()
for area_code in area_code_and_place:
state = area_code.split("-")[1].split("(")[0].strip()
if "DC" in state:
state = us.states.lookup("DC").abbr
else:
state = us.states.lookup(state).abbr
city = area_code.split("-")[1].split("(")[1].rstrip(")")
city = city.strip()
if "," in city:
city = city.split(",")[0]
if " " in city:
if [prefix for prefix in prefixes if prefix in city] == []:
city = city.split(" ")[0]
if isinstance(zcdb.find_zip(city=city,state=state),list):
zip_code = zcdb.find_zip(city=city,state=state)[0]
else:
zip_code = zcdb.find_zip(city=city,state=state)
if zip_code is None:
try:
zip_code = zcdb.find_zip(state=state)[0]
except:
if state == "MP":
zip_code = edict({
"latitude":15.200755,
"longitude":145.756952
})
elif state == "GU":
zip_code = edict({
"latitude":13.463345,
"longitude":144.733168
})
else:
import code
code.interact(local=locals())
area_code = AreaCodeLookup(
area_code.split("-")[0].strip(),
city,
state,
zip_code.latitude,
zip_code.longitude
)
db.session.add(area_code)
db.session.commit()
| 2,213 | 734 |
from django.contrib.gis.db import models
from django.utils.timezone import now # timezone depends on value of USE_TZ env variable
from django.utils import timezone
class Brand(models.Model):
name = models.CharField(max_length=128, blank=False)
twitter_handle = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name
class Location(models.Model):
name = models.CharField(max_length=128, blank=False)
area = models.MultiPolygonField()
def __str__(self):
return self.name
class TruckLocation(models.Model):
class Meta:
unique_together = (('brand', 'location', 'date'),)
brand = models.ForeignKey(Brand, on_delete=models.CASCADE)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
date_time = models.DateTimeField(default=timezone.now)
date = models.DateField(editable=False)
source_url = models.CharField(max_length=256, blank=False)
def __str__(self):
return f'{self.brand} at {self.location} on {self.date}'
def save(self, *args, **kwargs):
self.date = self.date_time.date()
super(TruckLocation, self).save(*args, **kwargs)
class LocationRegex(models.Model):
class Meta:
verbose_name_plural = 'location regexes'
regex = models.CharField(max_length=256, blank=False, unique=True)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return f'{self.location}: {self.regex}'
| 1,493 | 479 |
#!/usr/bin/env python
'''
Version: 0.0.1
Python library for the bargaining protocol
'''
from pybargain_protocol import bargaining_pb2
from pybargain_protocol.constants import MAINNET
from pybargain_protocol.exceptions import SerializationError, DeserializationError
from pybargain_protocol.protocol_rules import check_time, check_memo
class BargainingCancellationDetails(object):
'''
Details of a BargainingCancellation message
'''
'''
ATTRIBUTES
buyer_data = arbitrary data that may be used by the buyer
memo = utf-8 encoded, plain-text (no formatting) note that should be displayed to the receiver (part of the negotiation)
seller_data = arbitrary data that may be used by the seller
time = unix timestamp associated to the message
'''
'''
CONSTRUCTOR
'''
def __init__(self,
time = 0,
buyer_data = '',
seller_data = '',
memo = ''):
'''
Constructor
Parameters:
time = unix timestamp associated to the message
buyer_data = arbitrary data that may be used by the buyer
seller_data = arbitrary data that may be used by the seller
memo = utf-8 encoded, plain-text (no formatting) note that should be displayed to the receiver (part of the negotiation)
'''
self.time = time
self.buyer_data = buyer_data
self.seller_data = seller_data
self.memo = memo
'''
SERIALIZATION
'''
def serialize(self):
'''
Serializes the message (protobuff)
'''
try:
pbcd = bargaining_pb2.BargainingCancellationDetails()
pbcd.time = self.time
if self.buyer_data : pbcd.buyer_data = self.buyer_data
if self.seller_data : pbcd.seller_data = self.seller_data
if self.memo : pbcd.memo = self.memo
return pbcd.SerializeToString()
except:
raise SerializationError('A problem occurred while serializing the BargainingCancellationDetails with Protocol Buffers')
def deserialize(pbuff):
'''
Deserializes a protobuff message as a BargainingCancellationDetails
Parameters:
pbuff = protobuff message
'''
if not pbuff: raise DeserializationError('Protocol Buffer message is empty')
try:
pbcd = bargaining_pb2.BargainingCancellationDetails()
pbcd.ParseFromString(pbuff)
except:
raise DeserializationError('A problem occurred while deserializing the Protocol Buffers message associated to a BargainingCancellationDetails')
time = pbcd.time
bdata = pbcd.buyer_data
sdata = pbcd.seller_data
memo = pbcd.memo
return BargainingCancellationDetails(time, bdata, sdata, memo)
deserialize = staticmethod(deserialize)
'''
VALIDATIONS
'''
def check_msg_fmt(self, network = MAINNET):
'''
Checks if message format is valid
Returns True if message is valid, False otherwise
Parameters:
network = network used for the negotiation
'''
return check_time(self) and check_memo(self)
| 3,471 | 954 |
bl_info = {
'name': 'Geodesic',
'description': 'Geodesic like things; weighted shortest path and walking along a mesh',
'blender': (2, 92, 0),
'category': 'Object',
}
import itertools
import random
from functools import partial
from itertools import starmap
import math
import logging
import networkx as nx
import numpy as np
import mathutils
import bmesh
import bpy
from mathutils import Matrix, Vector
# import sys
# os.system(f'{sys.executable} -m ensurepip')
# os.system(f'{sys.executable} -m pip install networkx')
logger = logging.getLogger('geodesic')
TOL = 1e-4
VERT_TOL = 1e-2
AXES = {
'X': Vector((1, 0, 0)),
'Y': Vector((0, 1, 0)),
'Z': Vector((0, 0, 1)),
}
class RandomPairsWithReplacement:
def __init__(self, xs):
self.xs = xs
self.i = 0
random.shuffle(self.xs)
def commit(self): pass
def reject(self): pass
def draw(self):
if len(self.xs) - self.i < 2:
self.i = 0
random.shuffle(self.xs)
a = self.xs[self.i]
b = self.xs[self.i + 1]
self.i += 2
return a, b
class RandomPairsWithoutReplacement:
def __init__(self, xs):
# idk is it better to use a single deque and a marker of when the first element comes back around?
self.primary = xs
self.secondary = []
self.i = 0
self.a = None
self.b = None
self.done = False
self.reload()
def reload(self):
self.primary.extend(self.secondary)
random.shuffle(self.primary)
self.secondary.clear()
if len(self.primary) < 2:
self.done = True
def draw(self):
if self.done:
return None
if len(self.primary) < 2:
self.reload()
if self.done:
return None
self.a = self.primary.pop()
self.b = self.primary.pop()
return self.a, self.b
def commit(self):
assert self.a is not None and self.b is not None
self.a = None
self.b = None
def reject(self):
assert self.a is not None and self.b is not None
self.secondary.append(self.a)
self.secondary.append(self.b)
self.a = None
self.b = None
def const(n):
return n
def const_n(x, n):
return [x] * n
def rotated(v, rot):
v.rotate(rot)
return v
def uniform_n(low, hi, n):
return [random.uniform(low, hi) for _ in range(n)]
def vector_rejection(a, b):
return a - a.project(b)
def one_mesh_one_curve(objects):
if len(objects) != 2:
return None
a, b = objects
if a.type == 'MESH' and b.type == 'CURVE':
return a, b
elif b.type == 'MESH' and a.type == 'CURVE':
return b, a
else:
return None
def get_bmesh(obj, use_modifiers=False, context=None):
if use_modifiers:
if context is None:
context = bpy.context
dg = context.evaluated_depsgraph_get()
obj = obj.evaluated_get(dg)
ret = bmesh.new()
ret.from_mesh(obj.data)
return ret
def rotate_about_axis(axis, theta):
"""
rodrigues formula
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis.normalized()
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return Matrix([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def build_graph_vert_pairs(G, it, vertex_group=None, min_weight=0.1):
for u, v in it:
if G.has_edge(u.index, v.index):
continue
d = (u.co - v.co).length
if vertex_group is not None:
# TODO the "right" thing to do is take the sum of a weighted average of weights encountered across the path for some sample amount
try:
half_weight = (vertex_group.weight(u.index) + vertex_group.weight(v.index)) / 2
except Exception:
continue
# stretch weight range from [0, 1] to [min_weight, 2]
multiplier = max(half_weight, min_weight / 2) * 2
d *= multiplier
G.add_edge(u.index, v.index, weight=d)
def build_graph(mesh, vertex_group=None, min_weight=0.1, cross_faces=False):
G = nx.Graph()
if cross_faces:
it = itertools.chain.from_iterable(itertools.combinations(f.verts, 2) for f in mesh.faces)
else:
it = (e.verts for e in mesh.edges)
build_graph_vert_pairs(G, it, vertex_group, min_weight)
mesh.verts.ensure_lookup_table()
for node in G:
G.add_node(node, vert=mesh.verts[node].co)
return G, mesh.verts
def remove_path(G, nodes):
for i in range(len(nodes) - 1):
G.remove_edge(nodes[i], nodes[i + 1])
def make_empty_curve(name='Curve'):
curve = bpy.data.objects.new(name, bpy.data.curves.new(name, 'CURVE'))
bpy.context.collection.objects.link(curve)
curve.data.dimensions = '3D'
return curve
def make_spline(curve, points, name='Spline', type='BEZIER', handle_type='AUTO'):
spline = curve.data.splines.new(type)
spline_points = spline.bezier_points if type == 'BEZIER' else spline.points
spline_points.add(len(points) - 1)
assert len(points) == len(spline_points)
for sp, p in zip(spline_points, points):
if isinstance(p, bmesh.types.BMVert):
p = p.co
sp.co = p
if type == 'BEZIER':
sp.handle_left_type = handle_type
sp.handle_right_type = handle_type
return spline
def make_curve(points, name='Curve', type='BEZIER', handle_type='AUTO'):
curve = make_empty_curve(name=name)
make_spline(curve, points, type=type, handle_type=handle_type)
return curve
def set_spline_handles(spline, handle_type):
if not spline.type == 'BEZIER':
return
for p in spline.bezier_points:
p.handle_left_type = handle_type
p.handle_right_type = handle_type
def set_curve_handles(curve, handle_type):
for spline in curve.data.splines:
set_spline_handles(spline, handle_type)
def _vert_or_index(v):
return getattr(v, 'index', v)
def try_shortest_path(G, a, b):
a = _vert_or_index(a)
b = _vert_or_index(b)
try:
return nx.algorithms.shortest_path(G, a, b, weight='weight')
except nx.exception.NodeNotFound:
logger.debug(f'NodeNotFound, {a} -> {b} vertex must have not been in the vertex group')
except nx.exception.NetworkXNoPath:
logger.debug(f'No such path {a} -> {b}')
return None
def get_path_points(G, path):
return [G.nodes[i]['vert'] for i in path]
def path_weight(G, path):
ret = 0
for i in range(len(path) - 1):
ret += G[path[i]][path[i + 1]]['weight']
return ret
def closest_vertex_on_face(mesh, face_index, point):
mesh.faces.ensure_lookup_table()
return min(mesh.faces[face_index].verts, key=lambda v: (v.co - point).length_squared)
def snap_curve_splines_shortest_path(G, obj, mesh, curve, vertex_group=None, cross_faces=False, closest_vert=True):
remove = []
splines = list(curve.data.splines)
for spline in splines:
points = spline.bezier_points if spline.type == 'BEZIER' else spline.points
if len(points) < 2:
continue
start = points[0].co
end = points[-1].co
succ1, loc1, normal1, face_index1 = obj.closest_point_on_mesh(start)
succ2, loc2, normal2, face_index2 = obj.closest_point_on_mesh(end)
if not succ1 or not succ2:
continue
if closest_vert:
a = closest_vertex_on_face(mesh, face_index1, obj.matrix_world @ loc1).index
b = closest_vertex_on_face(mesh, face_index2, obj.matrix_world @ loc2).index
path = try_shortest_path(G, a, b)
if path is None:
continue
points = get_path_points(G, path)
else:
# the start and end are on a face, try each path from each vert to each other vert and take the one with least total path length
p1 = loc1
p2 = loc2
mesh.faces.ensure_lookup_table()
paths = filter(None, starmap(partial(try_shortest_path, G), itertools.product(mesh.faces[face_index1].verts, mesh.faces[face_index2].verts)))
def score(path):
return (
path_weight(G, path) +
(p1 - G.nodes[path[0]]['vert']).length +
(p2 - G.nodes[path[-1]]['vert']).length
)
path = min(paths, key=score, default=None)
if path is None:
continue
points = [p1] + get_path_points(G, path) + [p2]
make_spline(curve, points, type=spline.type)
remove.append(spline)
for x in remove:
curve.data.splines.remove(x)
# options will be added that make it hard to tell without exhaustive checks whether
# paths that fit criteria will be found in reasonable time. maxtries_multiplier bounds our efforts
def generate_multiple_paths(G, n, maxtries_multiplier=10, with_replacement=True, min_length=2):
ret = []
pairs = (RandomPairsWithReplacement if with_replacement else RandomPairsWithoutReplacement)(list(G))
for _ in range(n * maxtries_multiplier):
pair = pairs.draw()
if pair is None:
break
a, b = pair
# TODO future things might reject this path
path = try_shortest_path(G, a, b)
# TODO if this is frequently caused by disconnected components, it would be smarter to partition
# the pairs up front and only try pairs with a connected component
if path is None or len(path) < min_length:
pairs.reject()
continue
ret.append(get_path_points(G, path))
pairs.commit()
if len(ret) == n:
break
return ret
# expected to be called with only edges containing 1 or 2 faces
def next_face(face, edge):
for f in edge.link_faces:
if f != face:
return f
return None
def line_line_intersection(a, b, c, d):
"""3space segment intersection"""
ret = mathutils.geometry.intersect_line_line(a, b, c, d)
if ret is None:
return None
x, y = ret
if ((x - y).length > TOL or # lines dont intersect
not point_on_line(x, a, b) or # intersection not on line 1
not point_on_line(y, c, d) # intersection not on line 2
):
return None
return x
def point_on_line(pt, line1, line2):
intersection, pct = mathutils.geometry.intersect_point_line(pt, line1, line2)
return (
(pt - intersection).length_squared < TOL and # closest point on line is this point
0 <= pct <= 1 # and it exists between the endpoints
)
# I don't know of a nicer way to do this
def make_face_face_rotation_matrix(face1, face2, axis):
dihedral = face1.normal.angle(face2.normal)
m = rotate_about_axis(axis, dihedral)
if math.isclose((m @ face1.normal).dot(face2.normal), 1, rel_tol=TOL):
return m
m = rotate_about_axis(axis, -dihedral)
assert math.isclose((m @ face1.normal).dot(face2.normal), 1, rel_tol=TOL)
return m
def closest_point_on_mesh(obj, point):
succ, loc, normal, face_index = obj.closest_point_on_mesh(point)
if not succ:
raise ValueError('failed to get closest_point_on_mesh')
return loc, normal, face_index
def walk_along_mesh(obj, mesh, start, heading):
"""
Expects heading to be along the face its starting on already, otherwise we project it onto the face
Returns Tuple[
list of N points including start of the walk along a mesh in direction of heading with length of heading,
list of N-1 face indices where the line from points[i] to points[i + 1] lies on face[i]
]
"""
loc, normal, face_index = closest_point_on_mesh(obj, start)
mesh.faces.ensure_lookup_table()
points = [loc]
face = mesh.faces[face_index]
faces = []
# TODO if we are given start at exactly a vert, the face is ambiguous, but maybe we should be nice and try each face
# and take the one with the smallest dot since the heading may imply which face was "intended"
# of course if you have coplanar faces the dot won't tell you enough, you'd then want to check which one the heading
# actually produces a path that doesn't end right away
# getting -0 issues
if not math.isclose(abs(heading.dot(face.normal)), 0, rel_tol=1e-3):
# if abs(heading.dot(face.normal)) > 1e-3:
logger.debug('reprojection heading onto face because dot is {:.6f} {} {}'.format(heading.dot(face.normal), heading, face.normal))
l = heading.length
heading = vector_rejection(heading, face.normal)
heading.normalize()
heading *= l
while heading.length_squared:
a = points[-1]
b = a + heading
# find first edge that intersects our heading
intersection = None
for edge in face.edges:
v1 = edge.verts[0].co
v2 = edge.verts[1].co
if point_on_line(a, v1, v2):
continue
intersection = line_line_intersection(a, b, v1, v2)
if intersection is not None:
break
# end of the road
if intersection is None:
# logger.debug('INTERSECTION IS NONE')
points.append(b)
faces.append(face.index)
assert len(points) - 1 == len(faces)
return points, faces
# back to start
# TODO this won't always be useful if the start is off an edge, we would have to check that an existing segment.dot(new_segment) == 0
if (intersection - points[0]).length < TOL:
# logger.debug('BACK TO START')
assert len(points) - 1 == len(faces)
return points, faces
# hit a vert
if (intersection - v1).length < VERT_TOL or (intersection - v2).length < VERT_TOL:
# logger.debug('HIT A VERT')
points.append(intersection)
faces.append(face.index)
assert len(points) - 1 == len(faces)
return points, faces
points.append(intersection)
new_face = next_face(face, edge)
if new_face is None:
# logger.debug('NEWFACE IS NONE')
faces.append(face.index)
assert len(points) - 1 == len(faces)
return points, faces
# assert (heading.length) >= (intersection - a).length
heading -= (intersection - a) # subtract off the amount we have
heading = make_face_face_rotation_matrix(face, new_face, v2 - v1) @ heading
faces.append(face.index)
face = new_face
assert len(points) - 1 == len(faces)
return points, faces
assert False
def snap_curve_splines_walk(obj, mesh, curve):
remove = []
mat = obj.matrix_world.copy()
mat.invert()
splines = list(curve.data.splines)
for spline in splines:
points = spline.bezier_points if spline.type == 'BEZIER' else spline.points
if len(points) < 2:
continue
# TODO does this need to be done anywhere else?
start = mat @ curve.matrix_world @ points[0].co
end = mat @ curve.matrix_world @ points[-1].co
points, faces = walk_along_mesh(obj, mesh, start, end - start)
make_spline(curve, points, type=spline.type)
remove.append(spline)
for x in remove:
curve.data.splines.remove(x)
def generate_walks(obj, mesh, curve, starts, gen_n_spokes, gen_angles, gen_lengths):
mesh.faces.ensure_lookup_table()
for i, start in enumerate(starts):
spokes = gen_n_spokes()
angles = gen_angles(spokes)
lengths = gen_lengths(spokes)
if isinstance(start, tuple):
start, heading = start
heading.normalize()
loc, normal, face_index = closest_point_on_mesh(obj, start)
else:
loc, normal, face_index = closest_point_on_mesh(obj, start)
# choose arb heading
heading = rotate_about_axis(normal, random.uniform(-math.pi, math.pi)) @ normal.orthogonal()
for length, angle in zip(lengths, angles):
h = (rotate_about_axis(normal, angle) @ heading) * length
points, faces = walk_along_mesh(obj, mesh, start, h)
make_spline(curve, points)
# not ideal but the graph isn't guaranteed to have real edges
def edges_from_verts(mesh, verts):
mesh.verts.ensure_lookup_table()
for i in range(len(verts) - 1):
a = mesh.verts[verts[i]]
b = mesh.verts[verts[i + 1]]
for e in a.link_edges:
if b == e.other_vert(a):
yield e.index
break
def dev():
C = bpy.context
D = bpy.data
to_remove = [x for x in D.objects if x.name.startswith('Curve')]
for x in to_remove:
D.objects.remove(x, do_unlink=True)
obj = D.objects['Dodec']
m = bmesh.new()
m.from_mesh(obj.data)
# m.transform(obj.matrix_world)
# shortest path test
G, verts = build_graph(m, vertex_group=obj.vertex_groups['Group'], cross_faces=True)
bc = D.objects['BezierCurve']
snap_curve_splines_shortest_path(G, obj, m, bc, closest_vert=False)
bc.matrix_world = obj.matrix_world
obj = D.objects['Plane']
m = bmesh.new()
m.from_mesh(obj.data)
# path runs into edge which has no other face
points, faces = walk_along_mesh(obj, m, Vector((-.99, -.99, 1)), Vector((1, 1.56, 0)).normalized() * 3)
curve = make_curve(points, handle_type='VECTOR')
curve.data.bevel_depth = 0.01
curve.matrix_world = obj.matrix_world
obj = D.objects['Cube.000']
m = bmesh.new()
m.from_mesh(obj.data)
# ends on first face
points, faces = walk_along_mesh(obj, m, Vector((-.99, -.99, 1)), Vector((1, 1.56, 0)).normalized() * 1)
curve = make_curve(points, handle_type='VECTOR')
curve.data.bevel_depth = 0.01
curve.matrix_world = obj.matrix_world
# goes to second face
points, faces = walk_along_mesh(obj, m, Vector((-0.99, -0.99, 1)), Vector((1, .76, 0)).normalized() * 3)
curve = make_curve(points, handle_type='VECTOR')
curve.data.bevel_depth = 0.01
curve.matrix_world = obj.matrix_world
# hits a vert
points, faces = walk_along_mesh(obj, m, Vector((-0.99, -0.99, 1)), Vector((1, 1, 0)).normalized() * 150)
curve = make_curve(points, handle_type='VECTOR')
curve.data.bevel_depth = 0.01
curve.matrix_world = obj.matrix_world
# initial heading is not along face
points, faces = walk_along_mesh(obj, m, Vector((-0.99, -0.99, 1)), Vector((1, .33, 0.1)).normalized() * 3)
curve = make_curve(points, handle_type='VECTOR')
curve.data.bevel_depth = 0.01
curve.matrix_world = obj.matrix_world
# TODO we could have early stopping when we wrap around, but as in the case here, the starting point is off the edge
# so just checking the intersection isnt sufficient, need to check the dot with all existing segments
points, faces = walk_along_mesh(obj, m, Vector((-1, 0, -.99)), Vector((0, 0, 1)).normalized() * 30)
curve = make_curve(points, handle_type='VECTOR')
curve.data.bevel_depth = 0.01
curve.matrix_world = obj.matrix_world
obj = D.objects['Cube.001']
m = bmesh.new()
m.from_mesh(obj.data)
points, faces = walk_along_mesh(obj, m, Vector((-0.99, -0.99, 1)), Vector((1, .56, 0)).normalized() * 100)
curve = make_curve(points, handle_type='VECTOR')
curve.data.bevel_depth = 0.01
curve.matrix_world = obj.matrix_world
obj = D.objects['Cube.002']
m = bmesh.new()
m.from_mesh(obj.data)
curve = make_empty_curve()
generate_walks(
obj,
m,
curve,
[f.calc_center_median() for f in m.faces],
partial(random.randint, 3, 5),
partial(np.linspace, 0, np.pi * 2, endpoint=False),
lambda n: [random.uniform(3, 5) for _ in range(n)],
)
curve.data.bevel_depth = 0.01
curve.matrix_world = obj.matrix_world
set_curve_handles(curve, 'VECTOR')
obj = D.objects['Cube.Particles']
m = bmesh.new()
m.from_mesh(obj.data)
depsg = C.evaluated_depsgraph_get()
particles = obj.evaluated_get(depsg).particle_systems[0].particles
curve = make_empty_curve('Curve.Particles')
mat = obj.matrix_world.copy()
mat.invert()
# for p in particles[:10]:
# v = rotated(Vector((0, 1, 0)), p.rotation)
# p1 = p.location
# p2 = p1 + v * 2
# make_spline(curve, [mat @ p1, mat @ p2])
# the mat stuff is to bring the particle location into object local space
generate_walks(
obj,
m,
curve,
[(mat @ p.location, rotated(Vector((0, 1, 0)), p.rotation)) for p in particles],
partial(const, 3),
partial(np.linspace, 0, np.pi * 2, endpoint=False),
partial(uniform_n, 1, 2),
)
curve.data.bevel_depth = 0.01
curve.matrix_world = obj.matrix_world
set_curve_handles(curve, 'VECTOR')
class GeodesicWeightedShortestPath(bpy.types.Operator):
"""Select shortest path between two vertices on a mesh using vertex weights"""
bl_idname = 'mesh.geodesic_select_shortest_weighted_path'
bl_label = 'Geodesic Select Shortest Weighted Path'
bl_options = {'REGISTER', 'UNDO'}
cross_faces: bpy.props.BoolProperty(
name='Cross Faces',
default=False,
description='Allow crossing faces in n-gons even if no edge connects the verts',
)
vertex_group: bpy.props.StringProperty(name='Vertex Group', default='')
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH'
def draw(self, context):
obj = context.object
self.layout.prop_search(self, 'vertex_group', obj, 'vertex_groups', text='Vertex Group')
self.layout.prop(self, 'cross_faces')
def execute(self, context):
obj = context.object
if len(obj.vertex_groups) == 0:
self.report({'WARNING'}, 'This mesh has no vertex groups, use the builtin select shortest path')
return {'CANCELLED'}
if obj.data.total_vert_sel != 2:
self.report({'WARNING'}, f'Select only 2 vertices, got {obj.data.total_vert_sel}')
return {'CANCELLED'}
if self.vertex_group == '':
self.vertex_group = obj.vertex_groups[obj.vertex_groups.active_index].name
m = bmesh.from_edit_mesh(obj.data)
selected_verts = [x for x in m.verts if x.select]
assert len(selected_verts) == 2
G, verts = build_graph(m, vertex_group=obj.vertex_groups[self.vertex_group], cross_faces=self.cross_faces)
path = try_shortest_path(G, selected_verts[0].index, selected_verts[1].index)
if path is None:
self.report({'WARNING'}, f'No path exists between the selected vertices {selected_verts[0].index} {selected_verts[1].index}')
# we use FINISHED here to allow selecting another vertex group that might have a path
return {'FINISHED'}
m.verts.ensure_lookup_table()
for p in path:
m.verts[p].select = True
m.edges.ensure_lookup_table()
for e in edges_from_verts(m, path):
m.edges[e].select = True
# TODO is the same thing useful for faces?
bmesh.update_edit_mesh(obj.data, False, False)
return {'FINISHED'}
class GeodesicSnapCurveToMeshShortestPath(bpy.types.Operator):
"""Snap each spline's in a curve to a mesh's face by optionally weighted shortest path"""
bl_idname = 'object.geodesic_snap_curve_to_mesh_shortest_path'
bl_label = 'Geodesic Snap Curve to Mesh Shortest Path'
bl_options = {'REGISTER', 'UNDO'}
vertex_group: bpy.props.StringProperty(name='Vertex Group', default='')
cross_faces: bpy.props.BoolProperty(
name='Cross Faces',
default=False,
description='Allow crossing faces in n-gons even if no edge connects the verts',
)
closest_vert: bpy.props.BoolProperty(
name='Closest Vert',
default=False,
description='Snap the start and end to the nearest vert',
)
@classmethod
def poll(cls, context):
return one_mesh_one_curve(context.selected_objects) is not None
def draw(self, context):
self.layout.prop_search(self, 'vertex_group', context.object, 'vertex_groups', text='Vertex Group')
self.layout.row().prop(self, 'cross_faces')
self.layout.row().prop(self, 'closest_vert')
def execute(self, context):
mesh_curve = one_mesh_one_curve(context.selected_objects)
if mesh_curve is None:
self.report({'ERROR'}, 'You need to select one mesh and one curve object')
return {'CANCELLED'}
obj, curve = mesh_curve
m = bmesh.new()
m.from_mesh(obj.data)
vertex_group = None if self.vertex_group == '' else obj.vertex_groups[self.vertex_group]
G, verts = build_graph(m, vertex_group=vertex_group, cross_faces=self.cross_faces)
snap_curve_splines_shortest_path(G, obj, m, curve, closest_vert=self.closest_vert)
curve.matrix_world = obj.matrix_world
return {'FINISHED'}
class GeodesicSnapCurveToMeshWalk(bpy.types.Operator):
"""Snap each spline's in a curve to the surface of mesh, using the splines start and endpoint as the heading"""
bl_idname = 'object.geodesic_snap_curve_to_mesh_walk'
bl_label = 'Geodesic Snap Curve to Mesh Walk'
bl_options = {'REGISTER', 'UNDO'}
handle_type: bpy.props.EnumProperty(
name='Handle Type',
items=[
('VECTOR', 'Vector', 'Vector'),
('AUTO', 'Auto', 'Auto'),
],
)
# TODO other things probably need to access the modifed geometry too
use_modifiers: bpy.props.BoolProperty(name='Use Modifiers', default=True)
@classmethod
def poll(cls, context):
return one_mesh_one_curve(context.selected_objects) is not None
def execute(self, context):
mesh_curve = one_mesh_one_curve(context.selected_objects)
if mesh_curve is None:
self.report({'ERROR'}, 'You need to select one mesh and one curve object')
return {'CANCELLED'}
obj, curve = mesh_curve
m = get_bmesh(obj, use_modifiers=self.use_modifiers, context=context)
snap_curve_splines_walk(obj, m, curve)
set_curve_handles(curve, self.handle_type)
curve.matrix_world = obj.matrix_world
return {'FINISHED'}
class GeodesicGenerateShortestPaths(bpy.types.Operator):
"""Generate shortest paths between random vertex pairs"""
bl_idname = 'object.geodesic_generate_shortest_paths'
bl_label = 'Geodesic Generate Shortest Paths'
bl_options = {'REGISTER', 'UNDO'}
n_paths: bpy.props.IntProperty(name='Number of Paths', min=1, default=1)
with_replacement: bpy.props.BoolProperty(name='With Replacement', description='Re-use vertices if true', default=True)
vertex_group: bpy.props.StringProperty(name='Vertex Group', default='')
cross_faces: bpy.props.BoolProperty(
name='Cross Faces',
default=False,
description='Allow crossing faces in n-gons even if no edge connects the verts',
)
handle_type: bpy.props.EnumProperty(
name='Handle Type',
items=[
('VECTOR', 'Vector', 'Vector'),
('AUTO', 'Auto', 'Auto'),
],
)
bevel_depth: bpy.props.FloatProperty(name='Bevel Depth', default=0, min=0, precision=3, step=1)
seed: bpy.props.IntProperty(name='Seed', default=0)
min_length: bpy.props.IntProperty(name='Min Length', default=2, description='Don\'t accept paths with fewer than this many vertices')
def draw(self, context):
self.layout.prop(self, 'n_paths')
self.layout.prop_search(self, 'vertex_group', context.object, 'vertex_groups', text='Vertex Group')
self.layout.prop(self, 'with_replacement')
self.layout.prop(self, 'cross_faces')
self.layout.prop(self, 'min_length')
self.layout.prop(self, 'handle_type')
self.layout.prop(self, 'bevel_depth')
self.layout.prop(self, 'seed')
@classmethod
def poll(cls, context):
return context.object is not None and context.object.type == 'MESH'
def execute(self, context):
random.seed(self.seed)
obj = context.object
m = bmesh.new()
m.from_mesh(obj.data)
vertex_group = None if self.vertex_group == '' else obj.vertex_groups[self.vertex_group]
G, verts = build_graph(m, vertex_group=vertex_group, cross_faces=self.cross_faces)
curve = make_empty_curve()
pointss = generate_multiple_paths(G, self.n_paths, with_replacement=self.with_replacement, min_length=self.min_length)
for points in pointss:
make_spline(curve, points, type='BEZIER', handle_type=self.handle_type)
if len(pointss) < self.n_paths:
self.report({'WARNING'}, f'Only generated {len(pointss)} curves')
curve.matrix_world = obj.matrix_world
curve.data.bevel_depth = self.bevel_depth
return {'FINISHED'}
def constant_or_random_enum(name):
return bpy.props.EnumProperty(
name=name,
items=[
('CONSTANT', 'Constant', 'Constant'),
('RANDOM_UNIFORM', 'Uniform Random', 'Uniform Random'),
]
)
class GeodesicGenerateWalks(bpy.types.Operator):
"""Generate walks on the surface of a mesh"""
bl_idname = 'object.geodesic_generate_walks'
bl_label = 'Geodesic Generate Walks'
bl_options = {'REGISTER', 'UNDO'}
n_spokes_type: constant_or_random_enum('Number of Spokes type')
n_spokes: bpy.props.IntProperty(name='Number of Spokes', min=1, default=1)
n_spokes_random_uniform_min: bpy.props.IntProperty(name='Uniform Random Min', min=0, default=1)
n_spokes_random_uniform_max: bpy.props.IntProperty(name='Uniform Random Max', min=0, default=1)
subset: bpy.props.IntProperty(name='Number of Sources to use', min=0, default=0, description='Only use this many sources (0 for all)')
source: bpy.props.EnumProperty(
name='Source',
items=[
('FACE_CENTERS', 'Face Centers', 'Face Centers'),
('PARTICLES', 'Particles', 'Particles'),
],
)
particle_system: bpy.props.StringProperty(name='Particle System', default='')
particle_axis: bpy.props.EnumProperty(
name='Particle Axis',
items=[
('X', 'X', 'X'),
('Y', 'Y', 'Y'),
('Z', 'Z', 'Z'),
]
)
path_length_type: constant_or_random_enum('Length of Paths Type')
path_length: bpy.props.FloatProperty(name='Length of Paths', min=0.001, default=1)
path_length_random_uniform_min: bpy.props.FloatProperty(name='Uniform Random Min', min=0.001, default=1)
path_length_random_uniform_max: bpy.props.FloatProperty(name='Uniform Random Max', min=0.001, default=1)
spoke_angle_type: bpy.props.EnumProperty(
name='Spoke Angle Type',
items=[
('EQUAL', 'Equally Spaced', 'Equally Spaced'),
('RANDOM', 'Randomly Spaced', 'Randomly Spaced'),
]
)
handle_type: bpy.props.EnumProperty(
name='Handle Type',
items=[
('VECTOR', 'Vector', 'Vector'),
('AUTO', 'Auto', 'Auto'),
],
)
bevel_depth: bpy.props.FloatProperty(name='Bevel Depth', default=0, min=0, precision=3, step=1)
seed: bpy.props.IntProperty(name='Seed', default=0)
def draw(self, context):
self.layout.row(heading='Source').prop(self, 'source', expand=True)
if self.source == 'PARTICLES':
self.layout.prop_search(self, 'particle_system', context.object, 'particle_systems', text='Particle System')
self.layout.row(heading='Axis').prop(self, 'particle_axis', expand=True)
self.layout.prop(self, 'subset')
self.layout.row(heading='Num Spokes Type').prop(self, 'n_spokes_type', expand=True)
if self.n_spokes_type == 'CONSTANT':
self.layout.prop(self, 'n_spokes')
else:
row = self.layout.row()
row.prop(self, 'n_spokes_random_uniform_min', text='Min')
row.prop(self, 'n_spokes_random_uniform_max', text='Max')
self.layout.row(heading='Path Length Type').prop(self, 'path_length_type', expand=True)
if self.path_length_type == 'CONSTANT':
self.layout.prop(self, 'path_length')
else:
row = self.layout.row()
row.prop(self, 'path_length_random_uniform_min', text='Min')
row.prop(self, 'path_length_random_uniform_max', text='Max')
self.layout.row(heading='Spoke Angle Type').prop(self, 'spoke_angle_type', expand=True)
self.layout.row(heading='Handle Type').prop(self, 'handle_type', expand=True)
self.layout.prop(self, 'bevel_depth')
self.layout.prop(self, 'seed')
@classmethod
def poll(cls, context):
return context.object is not None and context.object.type == 'MESH'
def execute(self, context):
random.seed(self.seed)
obj = context.object
if self.source == 'PARTICLES':
if len(obj.particle_systems) == 0:
self.report({'ERROR'}, 'Object has no particle system')
self.source = 'FACE_CENTERS'
elif self.particle_system == '':
self.particle_system = obj.particle_systems[obj.particle_systems.active_index].name
m = bmesh.new()
m.from_mesh(obj.data)
curve = make_empty_curve()
if self.source == 'FACE_CENTERS':
source = [f.calc_center_median() for f in m.faces]
else:
depsg = context.evaluated_depsgraph_get()
particles = obj.evaluated_get(depsg).particle_systems[self.particle_system].particles
mat = obj.matrix_world.copy()
mat.invert()
source = [(mat @ p.location, rotated(AXES[self.particle_axis].copy(), p.rotation)) for p in particles]
if self.subset > 0:
random.shuffle(source)
source = source[:self.subset]
if self.n_spokes_type == 'CONSTANT':
spokes = partial(const, self.n_spokes)
else:
self.n_spokes_random_uniform_max = max(self.n_spokes_random_uniform_min, self.n_spokes_random_uniform_max)
spokes = partial(random.randint, self.n_spokes_random_uniform_min, self.n_spokes_random_uniform_max)
if self.path_length_type == 'CONSTANT':
lengths = partial(const_n, self.path_length)
else:
self.path_length_random_uniform_max = max(self.path_length_random_uniform_min, self.path_length_random_uniform_max)
lengths = partial(uniform_n, self.path_length_random_uniform_min, self.path_length_random_uniform_max)
if self.spoke_angle_type == 'EQUAL':
angles = partial(np.linspace, 0, np.pi * 2, endpoint=False)
else:
angles = partial(uniform_n, -np.pi, np.pi)
generate_walks(
obj=obj,
mesh=m,
curve=curve,
starts=source,
gen_n_spokes=spokes,
gen_angles=angles,
gen_lengths=lengths,
)
curve.matrix_world = obj.matrix_world
curve.data.bevel_depth = self.bevel_depth
set_curve_handles(curve, self.handle_type)
return {'FINISHED'}
classes = [
GeodesicWeightedShortestPath,
GeodesicSnapCurveToMeshShortestPath,
GeodesicSnapCurveToMeshWalk,
GeodesicGenerateShortestPaths,
GeodesicGenerateWalks,
]
# TODO figure out the right place for all the menu items
class GeodesicMenu(bpy.types.Menu):
bl_label = 'Geodesic'
bl_idname = 'OBJECT_MT_geodesic'
def draw(self, context):
for klass in classes:
self.layout.operator(klass.bl_idname)
def menu_func(self, context):
self.layout.menu(GeodesicMenu.bl_idname)
def register():
bpy.utils.register_class(GeodesicMenu)
for klass in classes:
bpy.utils.register_class(klass)
bpy.types.VIEW3D_MT_object.append(menu_func)
def unregister():
bpy.utils.unregister_class(GeodesicMenu)
for klass in classes:
bpy.utils.unregister_class(klass)
bpy.types.VIEW3D_MT_object.remove(menu_func)
if __name__ == '__dev__':
# I have a script in a testing blendfile with the following two lines in it to run this script
# filename = "/path/to/origami.py"
# exec(compile(open(filename).read(), filename, 'exec'), {'__name__': '__dev__'})
try:
unregister()
except Exception:
pass
register()
logging.basicConfig(level=logging.DEBUG)
# dev()
logger.debug('-' * 80)
elif __name__ == '__main__':
register()
# FUTURE detect interesections for either path discard or early stopping or some type of weaving (ie add intersection to each involved spline and shuffle their z height)
| 37,375 | 12,535 |
# -*-coding: utf-8 -*-
print "닭을 세어 봅시다."
print "암탉", 25+30 / 6
print "수탉", 100-25 * 3 % 4
print "이제 달걀도 세어 봅시다."
print 3+2+1-5+4%2-1 / 4+6
print "3+2 < 5-7 는 참인가요?"
print 3+2 <5-7
print "3+2 는 얼마죠?", 3+2
print "5-7은 얼마죠?", 5-7
print "아하 이게 False인 이유네요"
print "더 해볼까요."
print "더 큰가요??", 5 > -2
print "더 크거나 같나요?", 5>= -2
print "더 작거나 같나요?", 5<= -2 | 352 | 303 |
import struct
from src.system.controller.python.messaging.messages import Message, hello, header, NODE_TYPE_CONTROLLER, MESSAGE_HELLO, ip_address
class HelloMessage(Message):
def __init__(self,
raw_bytes=None,
# OR
source_id=None, target_id=None, listening_port=None, ip_address_str=None):
Message.__init__(self, raw_bytes, message_id=MESSAGE_HELLO, source_id=source_id, target_id=target_id)
if raw_bytes is None:
ip_address_split = ip_address_str.split('.')
# ip_address_packed = struct.pack(ip_address,
# int(ip_address_split[0]),
# int(ip_address_split[1]),
# int(ip_address_split[2]),
# int(ip_address_split[3])
# )
self._hello_tuple = (NODE_TYPE_CONTROLLER, listening_port, int(ip_address_split[0]), int(ip_address_split[1]), int(ip_address_split[2]), int(ip_address_split[3]))
else:
# Parse this message
self._hello_tuple = self.unpack(hello)
def get_bytes(self):
return Message.get_bytes(self) + struct.pack(
hello,
# Node type
self._hello_tuple[0],
# Listening Port
self._hello_tuple[1],
# IP Below
self._hello_tuple[2],
self._hello_tuple[3],
self._hello_tuple[4],
self._hello_tuple[5]
)
| 1,603 | 455 |
"""Path-related functions for birch."""
import os
def _legacy_cfg_dpath(namespace):
return os.path.join(
os.path.expanduser('~'),
'.{}'.format(namespace),
)
XDG_CONFIG_HOME_VARNAME = 'XDG_CONFIG_HOME'
def _xdg_cfg_dpath(namespace):
if XDG_CONFIG_HOME_VARNAME in os.environ: # pragma: no cover
return os.path.join(
os.environ[XDG_CONFIG_HOME_VARNAME],
namespace,
)
return os.path.join( # pragma: no cover
os.path.expanduser('~'),
'.config',
namespace,
)
XDG_CACHE_HOME_VARNAME = 'XDG_CACHE_HOME'
def _xdg_cache_dpath(namespace):
if XDG_CACHE_HOME_VARNAME in os.environ: # pragma: no cover
return os.path.join(
os.environ[XDG_CACHE_HOME_VARNAME],
namespace,
)
return os.path.join( # pragma: no cover
os.path.expanduser('~'),
'.cache',
namespace,
)
| 937 | 341 |
from unittest import TestCase
from spacecat.common_utils import right_shift
class TestRight_shift(TestCase):
def test_right_shift(self):
to_shift = "1111"
self.assertEqual("0011", right_shift(to_shift, 2))
| 228 | 78 |
def read_state(lines):
out = dict()
for y, row in enumerate(lines):
for x, char in enumerate(row):
out[x,y] = 1 if char == '#' else 0
return out, len(lines[0]), len(lines)
def simulate(state, w, h, corners):
neighbours = {(x,y): [(i,j) for i in range(x-1,x+2) for j in range(y-1,y+2)
if i >= 0 and i < w and j >= 0 and j < h and (x,y) != (i,j)]
for x,y in state}
if corners:
state[0,0] = state[w-1,0] = state[0,h-1] = state[w-1,h-1] = 1
for c in range(100):
newstate = dict()
for s in state:
n = sum([state[k] for k in neighbours[s]])
newstate[s] = 1 if (n == 3 or (n == 2 and state[s])) else 0
state = newstate
if corners:
state[0,0] = state[w-1,0] = state[0,h-1] = state[w-1,h-1] = 1
return sum(state.values())
with open("day18.txt") as fh:
state, w, h = read_state([l.strip() for l in fh.readlines()])
print("2015 day 18 part 1: %d" % simulate(state, w, h, False))
print("2015 day 18 part 2: %d" % simulate(state, w, h, True))
| 1,085 | 447 |
import pickle
from os.path import isfile
from schedule.Week import Week
from schedule.default import list_of_week_days_names, list_of_lesson_names
class ScheduleData(object):
homework = None
week = None
@staticmethod
def get_day_number(name):
return next(i for i, x in enumerate(list_of_week_days_names) if name.lower() in x)
@staticmethod
def get_lesson_number(name):
return next(i for i, x in enumerate(list_of_lesson_names) if name.lower() in x)
@staticmethod
def dump():
return pickle.dumps((ScheduleData.week, ScheduleData.homework))
@staticmethod
def load(schedule_data_bytes):
tup = pickle.loads(schedule_data_bytes)
ScheduleData.week = tup[0]
ScheduleData.homework = tup[1]
@staticmethod
def init():
if isfile("save_schedule/schedule"):
with open("save_schedule/schedule", "rb") as f:
ScheduleData.load(f.read())
else:
ScheduleData.week = Week.generate_current_week()
ScheduleData.homework = [None, None, None, None, None, None, None, None, None]
ScheduleData.init()
| 1,149 | 371 |
"""
Nest a DataObject in another DataObject.
"""
from do_py import DataObject, R
class Contact(DataObject):
_restrictions = {
'phone_number': R.STR
}
class Author(DataObject):
"""
This DataObject is nested under `VideoGame` and nests `Contact`.
:restriction id:
:restriction name:
:restriction contact: Nested DataObject that represents contact information for this author.
"""
_restrictions = {
'id': R.INT,
'name': R.STR,
'contact': Contact
}
class VideoGame(DataObject):
"""
This DataObject is nested under nests `Author`.
:restriction id:
:restriction name:
:restriction author: Nested DataObject that represents author information for this video game.
"""
_restrictions = {
'id': R.INT,
'name': R.NULL_STR,
'author': Author
}
# Data objects must be instantiated at their **init** with a dictionary and strict True(default) or False.
instance = VideoGame({
'id': 1985,
'name': 'The Game',
'author': {
'id': 3,
'name': 'You Lose',
'contact': {
'phone_number': '555-555-5555'
}
}
}, strict=False)
print(instance)
# output: VideoGame{"author": {"contact": {"phone_number": "555-555-5555"}, "id": 3, "name": "You Lose"}, "id": 1985, "name": "The Game"}
| 1,373 | 441 |
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from .base import BaseTestCase, MockResponse
def mockApiSetup():
BaseTestCase.registerHttpGetHandler(
url="https://127.0.0.1:8080/api/v2/worker/k8shost",
response=MockResponse(
json_data={
"_embedded": {
"k8shosts": [
{
"status": "unlicensed",
"propinfo": {
"bds_storage_apollo": "false",
"bds_network_publicinterface": "ens5",
},
"approved_worker_pubkey": [],
"tags": [],
"hostname": (
"ip-10-1-0-238.eu-west-2.compute.internal"
),
"ipaddr": "10.1.0.238",
"setup_log": (
"/var/log/bluedata/install/"
"k8shost_setup_10.1.0.238-"
"2020-4-26-18-41-16"
),
"_links": {
"self": {"href": "/api/v2/worker/k8shost/4"}
},
"sysinfo": {
"network": [],
"keys": {
"reported_worker_public_key": (
"ssh-rsa ...== server\n"
)
},
"storage": [],
"swap": {"swap_total": 0},
"memory": {"mem_total": 65842503680},
"gpu": {"gpu_count": 0},
"cpu": {
"cpu_logical_cores": 16,
"cpu_count": 8,
"cpu_physical_cores": 8,
"cpu_sockets": 1,
},
"mountpoint": [],
},
},
{
"status": "bundle",
"approved_worker_pubkey": [],
"tags": [],
"hostname": "",
"ipaddr": "10.1.0.186",
"setup_log": (
"/var/log/bluedata/install/"
"k8shost_setup_10.1.0.186-"
"2020-4-26-18-49-10"
),
"_links": {
"self": {"href": "/api/v2/worker/k8shost/5"}
},
},
]
}
},
status_code=200,
headers={},
),
)
BaseTestCase.registerHttpGetHandler(
url="https://127.0.0.1:8080/api/v2/worker/k8shost/5",
response=MockResponse(
json_data={
"status": "bundle",
"approved_worker_pubkey": [],
"tags": [],
"hostname": "",
"ipaddr": "10.1.0.186",
"setup_log": (
"/var/log/bluedata/install/"
"k8shost_setup_10.1.0.186-"
"2020-4-26-18-49-10"
),
"_links": {"self": {"href": "/api/v2/worker/k8shost/5"}},
},
status_code=200,
headers={},
),
)
BaseTestCase.registerHttpGetHandler(
url="https://127.0.0.1:8080/api/v2/worker/k8shost/5?setup_log=true",
response=MockResponse(
json_data={
"status": "bundle",
"approved_worker_pubkey": [],
"tags": [],
"hostname": "",
"ipaddr": "10.1.0.186",
"setup_log": (
"/var/log/bluedata/install/"
"k8shost_setup_10.1.0.186-"
"2020-4-26-18-49-10"
),
"_links": {"self": {"href": "/api/v2/worker/k8shost/5"}},
},
status_code=200,
headers={},
),
)
BaseTestCase.registerHttpGetHandler(
url="https://127.0.0.1:8080/api/v2/worker/k8shost/8",
response=MockResponse(
json_data={},
status_code=404,
raise_for_status_flag=True,
headers={},
),
)
BaseTestCase.registerHttpPostHandler(
url="https://127.0.0.1:8080/api/v2/worker/k8shost/5",
response=MockResponse(json_data={}, status_code=204, headers={}),
)
BaseTestCase.registerHttpPostHandler(
url="https://127.0.0.1:8080/api/v2/worker/k8shost/",
response=MockResponse(
json_data={},
status_code=201,
headers={"location": "/new/cluster/id"},
),
)
| 6,297 | 1,841 |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_init_script_reload(host):
cmd = host.run("/etc/init.d/consul reload")
assert cmd.succeeded
def test_init_script_stop(host):
cmd = host.run("/etc/init.d/consul stop")
assert cmd.succeeded
def test_init_script_start(host):
cmd = host.run("/etc/init.d/consul start")
assert cmd.succeeded
| 498 | 189 |
#!/usr/bin/env python
# coding: utf-8
# In[97]:
#Imports & Dependencies
get_ipython().system('pip install selenium')
get_ipython().system('pip install splinter')
from splinter import Browser
from bs4 import BeautifulSoup
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
# In[98]:
url = "https://mars.nasa.gov/news/"
browser.visit(url)
# In[99]:
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# In[100]:
article = soup.find("div", class_="list_text")
news_p = article.find("div", class_="article_teaser_body").text
news_title = article.find("div", class_="content_title").text
news_date = article.find("div", class_="list_date").text
print(news_date)
print(news_title)
print(news_p)
# In[106]:
url2 = "https://jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url2)
# Scrape the browser into soup and use soup to find the image of mars
# Save the image url to a variable called `img_url`
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
image = soup.find("img", class_="thumb")["src"]
img_url = "https://jpl.nasa.gov"+image
featured_image_url = img_url
# Use the requests library to download and save the image from the `img_url` above
import requests
import shutil
response = requests.get(img_url, stream=True)
with open('img.jpg', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
# Display the image with IPython.display
from IPython.display import Image
Image(url='img.jpg')
# In[107]:
import tweepy
# Twitter API Keys
from key_vault import (consumer_key,
consumer_secret,
access_token,
access_token_secret)
# Setup Tweepy API Authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
target_user = "marswxreport"
full_tweet = api.user_timeline(target_user , count = 1)
mars_weather=full_tweet[0]['text']
mars_weather
# In[108]:
url3 = "http://space-facts.com/mars/"
browser.visit(url3)
# In[109]:
import pandas as pd
facts_url = "https://space-facts.com/mars/"
browser.visit(facts_url)
mars_data = pd.read_html(facts_url)
mars_data = pd.DataFrame(mars_data[0])
mars_facts = mars_data.to_html(header = False, index = False)
print(mars_facts)
# In[110]:
url4 = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url4)
# In[111]:
import time
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
mars_hemis=[]
# In[112]:
for i in range (4):
time.sleep(5)
images = browser.find_by_tag('h3')
images[i].click()
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
partial = soup.find("img", class_="wide-image")["src"]
img_title = soup.find("h2",class_="title").text
img_url = 'https://astrogeology.usgs.gov'+ partial
dictionary={"title":img_title,"img_url":img_url}
mars_hemis.append(dictionary)
browser.back()
# In[113]:
print(mars_hemis)
# In[ ]:
# In[ ]:
| 3,168 | 1,199 |
#!/usr/bin/python
# Copyright (c) 2017 xyzdev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import argparse
from grama.grama import Machine, Parser
def main():
argparser = argparse.ArgumentParser(description="Gra'Ma' Lang - virtual graph machine interpreter")
argparser.add_argument('-i', dest='interactive', action='store_true', default=False,
help='open interactive mode after executing a program file')
argparser.add_argument('-d', dest='debug', action='store_true', default=False,
help='open debugger after loading program but before executing')
argparser.add_argument('-v', dest='verbose', action='store_true', default=False,
help='enable extra debug output')
argparser.add_argument('-e', dest='execute', default='', type=str,
help='execute command')
argparser.add_argument('filename', type=str, default=None, nargs=argparse.REMAINDER, help='program file to execute')
args = argparser.parse_args()
if len(args.filename) > 1:
argparser.error('extra arguments after filename')
if args.execute and args.filename:
argparser.error('filename and -e arguments are mutually exclusive')
interactive = args.interactive if (args.filename or args.execute) else sys.stdin.isatty()
ma = Machine()
parser = Parser()
if args.filename:
with open(args.filename[0], 'r') as f:
source = f.read()
ma.instructions.extend(parser.parse(source))
elif args.execute:
ma.instructions.extend(parser.parse(args.execute))
elif not interactive:
source = ''
while True:
inp = sys.stdin.readline()
if not inp or inp.strip() == ';':
break
source += inp
ma.instructions.extend(parser.parse(source))
if args.verbose:
print [str(i) for i in ma.instructions]
if ma.instructions and not args.debug:
ma.do(verbose=args.verbose)
if interactive or args.debug:
ma.debugger.attach = True
ma.debugger.debug = args.debug
ma.debugger.verbose = args.verbose
ma.do(verbose=args.verbose)
if __name__ == "__main__":
sys.exit(main())
| 3,281 | 986 |
from loa.team import Team
def get_team():
return LittleLyrical("LittleLyrical")
class LittleLyrical(Team):
def arrange(self, enemy: Team):
first_unit = self.units[0]
for i in range(self.num_positions - 1):
j = i + 1
self.units[i] = self.units[j]
if self.units[i] != None:
self.units[i].pos = i
# end of for
self.units[-1] = first_unit
if self.units[-1] != None:
self.units[-1].pos = self.num_positions - 1
minHP = 50
for i in range(self.num_positions - 1):
if self.units[i] != None:
if self.units[i].hp < minHP:
minHP = self.units[i].hp
for i in range(self.num_positions - 1):
if self.units[i] != None:
if self.units[i].hp == minHP:
minhpunit = self.units[i]
self.units[i] = self.units[i+1]
self.units[i+1] = minhpunit
for i in range(self.num_positions - 1):
if self.units[i] != None:
self.units[i].pos = i
if self.units[-1] != None:
self.units[-1].pos = 9
| 1,270 | 436 |
#!/usr/bin/env python3
import sys, os, shutil, json, yaml
from time import localtime
ONLY_SIMULATED = False
ONLY_GIAB = True
# Make import from parent directory possible
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import modules.file_utils as file_utils
with open("constants.yml", "r") as constants_file:
constants = yaml.load(constants_file)
# Directory Paths
reference_directory = "data/references/"
datasets_directory = "data/datasets/"
def log_task_start(item, path):
started_tasks.append(path)
print("Downloading {}...".format(item), flush=True)
def log_task_end(item, path):
finished_tasks.append(path)
print("Downloaded {}".format(item), flush=True)
def log_data_present(item):
print("{} already present".format(item), flush=True)
####################
# REFERENCE GENOMES
####################
# Add new tools needed to download reference genomes here
tools = ["twoBitToFa"]
# Constants
fasta_file_ending = ".fa"
fastq_file_ending = ".fastq"
rsync_uri = "rsync://hgdownload.soe.ucsc.edu/genome/admin/exe/linux.x86_64/"
started_tasks = []
finished_tasks = []
def get_human_genome(genome_id, file_path):
url = "http://hgdownload.soe.ucsc.edu/goldenPath/"
url += "{0}/bigZips/{0}.2bit".format(genome_id)
two_bit_path = file_path + ".2bit"
started_tasks.append(two_bit_path)
file_utils.download(url, two_bit_path)
finished_tasks.append(two_bit_path)
# Convert .2bit file to .fa
print("Extracting {} from 2bit file...".format(genome_id), flush=True)
os.system("chmod +x {0}twoBitToFa && {0}twoBitToFa {1} {2}".format(
reference_directory,
two_bit_path,
file_path
))
file_utils.delete(two_bit_path)
def get_p_falciparum(genome_id, file_path):
url = "http://bp1.s3.amazonaws.com/malaria.tar.bz2"
download_path = reference_directory + "malaria.tar.bz2"
file_utils.download(url, download_path)
print("Unzipping {}...".format(genome_id), flush=True)
unzipped_directory = file_utils.unzip(download_path)
os.rename(unzipped_directory + "/genome_sequence_pfal.fa", file_path)
file_utils.delete(download_path)
file_utils.delete(unzipped_directory)
# Add new reference genomes with options here
genomes = {
"hg19": {
"getter": get_human_genome,
"name": "Human (hg19)",
"source": "http://hgdownload.cse.ucsc.edu/downloads.html#human"
},
"hg38": {
"getter": get_human_genome,
"name": "Human (hg38)",
"source": "http://hgdownload.cse.ucsc.edu/downloads.html#human"
},
"pfal": {
"getter": get_p_falciparum,
"name": "Malaria",
"source": "http://bioinf.itmat.upenn.edu/BEERS/bp1/datasets.php"
}
}
def get_tools():
for tool_name in tools:
tool_path = reference_directory + tool_name
if not os.path.exists(tool_path):
log_task_start(tool_name, tool_path)
tool_uri = rsync_uri + tool_name
os.system("rsync -aPq {} {}".format(tool_uri, tool_path))
log_task_end(tool_name, tool_path)
else:
log_data_present(tool_name)
def remove_tools():
for tool_name in tools:
tool_path = reference_directory + tool_name
file_utils.delete(tool_path)
def genome_path(genome_id):
return reference_directory + genome_id + fasta_file_ending
def get_genomes():
genome_infos_path = os.path.join(reference_directory, "references.json")
genome_infos = []
if os.path.exists(genome_infos_path):
with open(genome_infos_path, "r") as genome_infos_file:
genome_infos = json.load(genome_infos_file)
for genome_id, genome_specification in genomes.items():
if ONLY_SIMULATED and genome_id not in ["hg19", "pfal"]:
print("Skipping {} (only simulated)".format(genome_specification["name"]))
continue
if ONLY_GIAB and genome_id not in ["hg38"]:
print("Skipping {} (only giab)".format(genome_specification["name"]))
continue
file_path = genome_path(genome_id)
info_path = file_path.split(fasta_file_ending)[0] + ".yml"
genome_getter = genome_specification["getter"]
if not os.path.exists(file_path):
log_task_start(genome_id, file_path)
genome_getter(genome_id, file_path)
genome_infos.append({
"id": genome_id,
"name": genome_specification["name"],
"source": genome_specification["source"]
})
with open(genome_infos_path, "w") as genome_infos_file:
genome_infos_file.write(json.dumps(genome_infos))
log_task_end(genome_id, file_path)
else:
log_data_present(genome_id)
###################
# RNASEQ DATA SETS
###################
def write_dataset_json(info):
dataset_info_path = datasets_directory + info["id"] + ".json"
info["method"] = constants["dataset"]["FILE"]
info["layout"] = constants["dataset"]["PAIRED"]
info["created"] = localtime()
info["error"] = False
with open(dataset_info_path, "w") as dataset_info_file:
json.dump(info, dataset_info_file)
def get_baruzzo(dataset, directory):
zip_name = "{}.tar.bz2".format(dataset["file_name"])
url = "http://bp1.s3.amazonaws.com/{}".format(zip_name)
download_path = directory + "/" + zip_name
file_utils.download(url, download_path)
print("Unzipping {}...".format(dataset["name"]), flush=True)
file_utils.unzip(download_path)
# Move files to /beers directory
beers_directory = directory + "/beers/"
file_utils.create_directory(beers_directory)
for file_name in os.listdir(directory):
file_path = directory + "/" + file_name
if not os.path.isdir(file_path) and not file_path == download_path:
shutil.move(file_path, beers_directory + file_name)
# Move FASTQ files to root and rename
def setup_file(direction):
file_name = "{}.{}.fa".format(dataset["id"], direction)
file_origin = beers_directory + file_name
file_destination = "{}/{}{}".format(directory, direction, fastq_file_ending)
os.rename(file_origin, file_destination)
return file_name, file_destination
forward_file_name, forward_file_path = setup_file(constants["dataset"]["FORWARD"])
reverse_file_name, reverse_file_path = setup_file(constants["dataset"]["REVERSE"])
# Move CIG file to root and rename
truth_file_name = "{}.cig".format(dataset["id"])
truth_file_path = directory + "/truth.cig"
os.rename(beers_directory + truth_file_name, truth_file_path)
file_utils.delete(download_path)
file_utils.delete(beers_directory)
write_dataset_json({
"id": dataset["id"],
"name": dataset["name"],
"readLength": "100",
"data": {
constants["dataset"]["FORWARD"]: {
"name": forward_file_name,
"path": forward_file_path,
},
constants["dataset"]["REVERSE"]: {
"name": reverse_file_name,
"path": reverse_file_path,
}
},
"evaluation": {
"type": "beers",
"truth_file": {
"name": truth_file_name,
"path": truth_file_path
}
}
})
def get_from_encode(dataset, directory):
dataset_info = {
"id": dataset["id"],
"name": dataset["name"],
"readLength": "76",
"data": {
constants["dataset"]["FORWARD"]: {},
constants["dataset"]["REVERSE"]: {}
},
"evaluation": dataset["evaluation"]
}
def get_file(file_id, direction, directory):
print("Downloading {} file...".format(direction), flush=True)
zip_name = "{}.fastq.gz".format(file_id)
url = "https://www.encodeproject.org/files/{}/@@download/{}".format(
file_id,
zip_name
)
download_path = directory + "/" + zip_name
file_utils.download(url, download_path)
print("Unzipping {} file...".format(direction), flush=True)
file_utils.unzip(download_path)
file_utils.delete(download_path)
original_name = "{}.fastq".format(file_id)
file_origin = "{}/{}".format(directory, original_name)
file_destination = "{}/{}{}".format(directory, direction, fastq_file_ending)
os.rename(file_origin, file_destination)
return original_name, file_destination
for direction, file_id in dataset["files"].items():
original_name, file_destination = get_file(file_id, direction, directory)
dataset_info["data"][direction]["name"] = original_name
dataset_info["data"][direction]["path"] = file_destination
write_dataset_json(dataset_info)
# Baruzzo Data Sets
# * id is prefix of unzipped FASTA files
# * file_name is zip name given in download url
rna_seq_data = [
{
"id": "GM12878",
"name": "GIAB Pilot Genome",
"getter": get_from_encode,
"files": {
constants["dataset"]["FORWARD"]: "ENCFF000EWJ",
constants["dataset"]["REVERSE"]: "ENCFF000EWX"
},
"evaluation": { "type": "giab" }
},
{
"id": "simulated_reads_HG19t1r1",
"getter": get_baruzzo,
"file_name": "human_t1r1",
"name": "Simulated Human T1R1"
},
# {
# "id": "simulated_reads_HG19t1r2",
# "getter": get_baruzzo,
# "file_name": "human_t1r2",
# "name": "Simulated Human T1R2"
# },
# {
# "id": "simulated_reads_HG19t1r3",
# "getter": get_baruzzo,
# "file_name": "human_t1r3",
# "name": "Simulated Human T1R3"
# },
{
"id": "simulated_reads_HG19t2r1",
"getter": get_baruzzo,
"file_name": "human_t2r1",
"name": "Simulated Human T2R1"
},
# {
# "id": "simulated_reads_HG19t2r2",
# "getter": get_baruzzo,
# "file_name": "human_t2r2",
# "name": "Simulated Human T2R2"
# },
# {
# "id": "simulated_reads_HG19t2r3",
# "getter": get_baruzzo,
# "file_name": "human_t2r3",
# "name": "Simulated Human T2R3"
# },
{
"id": "simulated_reads_HG19t3r1",
"getter": get_baruzzo,
"file_name": "human_t3r1",
"name": "Simulated Human T3R1"
},
# {
# "id": "simulated_reads_HG19t3r2",
# "getter": get_baruzzo,
# "file_name": "human_t3r2",
# "name": "Simulated Human T3R2"
# },
# {
# "id": "simulated_reads_HG19t3r3",
# "getter": get_baruzzo,
# "file_name": "human_t3r3",
# "name": "Simulated Human T3R3"
# },
{
"id": "simulated_reads_PFALt1r1",
"getter": get_baruzzo,
"file_name": "malaria_t1r1",
"name": "Simulated Malaria T1R1"
},
# {
# "id": "simulated_reads_PFALt1r2",
# "getter": get_baruzzo,
# "file_name": "malaria_t1r2",
# "name": "Simulated Malaria T1R2"
# },
# {
# "id": "simulated_reads_PFALt1r3",
# "getter": get_baruzzo,
# "file_name": "malaria_t1r3",
# "name": "Simulated Malaria T1R3"
# },
{
"id": "simulated_reads_PFALt2r1",
"getter": get_baruzzo,
"file_name": "malaria_t2r1",
"name": "Simulated Malaria T2R1"
},
# {
# "id": "simulated_reads_PFALt2r2",
# "getter": get_baruzzo,
# "file_name": "malaria_t2r2",
# "name": "Simulated Malaria T2R2"
# },
# {
# "id": "simulated_reads_PFALt2r3",
# "getter": get_baruzzo,
# "file_name": "malaria_t2r3",
# "name": "Simulated Malaria T2R3"
# },
{
"id": "simulated_reads_PFALt3r1",
"getter": get_baruzzo,
"file_name": "malaria_t3r1",
"name": "Simulated Malaria T3R1"
},
# {
# "id": "simulated_reads_PFALt3r2",
# "getter": get_baruzzo,
# "file_name": "malaria_t3r2",
# "name": "Simulated Malaria T3R2"
# },
# {
# "id": "simulated_reads_PFALt3r3",
# "getter": get_baruzzo,
# "file_name": "malaria_t3r3",
# "name": "Simulated Malaria T3R3"
# }
]
def get_datasets():
for dataset in rna_seq_data:
if ONLY_SIMULATED and not dataset["id"].startswith("simulated"):
print("Skipping {} (only simulated)".format(dataset["name"]))
continue
if ONLY_GIAB and dataset["id"] != "GM12878":
print("Skipping {} (only giab)".format(dataset["name"]))
continue
dataset_directory = datasets_directory + dataset["id"]
dataset_getter = dataset["getter"]
if not os.path.isdir(dataset_directory):
file_utils.create_directory(dataset_directory)
log_task_start(dataset["name"], dataset_directory)
dataset_getter(dataset, dataset_directory)
log_task_end(dataset["name"], dataset_directory)
else:
log_data_present(dataset["name"])
###################
# SCRIPT EXECUTION
###################
print("", flush=True)
print("Downloading data", flush=True)
print("", flush=True)
file_utils.create_directory(reference_directory)
file_utils.create_directory(datasets_directory)
try:
get_tools()
get_genomes()
get_datasets()
remove_tools()
finally:
for path in started_tasks:
if not path in finished_tasks:
print("An error occured, deleting {}".format(path))
file_utils.delete(path)
| 13,679 | 4,825 |
import unittest
from werkzeug.security import generate_password_hash, check_password_hash
class TestPasswordHash(unittest.TestCase):
def test_generate_password_hash(self):
hash = generate_password_hash('mima')
print('mima的hash是:', hash)
def test_check_password_hash(self):
password = 'mima'
hash = generate_password_hash(password)
self.assertTrue(check_password_hash(hash, password))
self.assertFalse(check_password_hash(hash, password+'1'))
| 500 | 145 |
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import random
from bs4 import BeautifulSoup
from locust import HttpLocust, TaskSet, task, between
def is_static_file(f):
if "/images" in f:
return True
else:
return False
def fetch_static_assets(session, response):
resource_urls = set()
soup = BeautifulSoup(response.text, "html.parser")
for res in soup.find_all(src=True):
url = res['src']
if is_static_file(url):
resource_urls.add(url)
for url in resource_urls:
session.client.get(url)
class UserBehavior(TaskSet):
def on_start(self):
self.products = [
"0983976883313",
"1051094507639",
"3103748076140",
"3377807835348",
"3480077496703",
"4618701513994",
"5147991444866",
"6392888360364",
"6464865908071",
"0000000000000", # dummy id
]
self.versions = ["v2a", "v2b", "v2b"] # Add an additional v2b to favor this version over v2a
self.client.headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 (LocustIO)"}
@task(2)
def index(self):
response = self.client.get("/")
fetch_static_assets(self, response)
@task(1)
def browseProduct(self):
response = self.client.get("/product/" + random.choice(self.products))
fetch_static_assets(self, response)
@task
def viewCart(self):
self.client.get("/cart")
@task
def addToCart(self):
product = random.choice(self.products)
oneclick = bool(random.getrandbits(1))
self.client.get("/product/" + product)
self.client.post("/cart", {
'product_id': product,
'type': 'oneclick' if oneclick else ''
}, cookies={"app_version": random.choice(self.versions) if oneclick else 'v1'})
@task
def checkout(self):
self.addToCart();
self.client.post("/cart/checkout", {
'name': 'Demo User',
'email': 'demo-user@example.com',
'address': '123 Road',
'zip': '80807',
'city': 'Munich',
'state': 'BY',
'country': 'Germany',
'paymentMethod': 'AmazonPay'
})
self.client.get('/cart/checkout')
self.client.post("/cart/order")
self.client.get("/cart/order")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
wait_time = between(2, 10)
| 3,512 | 1,246 |
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase, override_settings
class ComponentTest(TestCase):
def test_render_inline_component(self):
template = """
{% avatar user="mixxorz" %}
"""
expected = """
<div>I am avatar for mixxorz</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_render_block_component(self):
template = """
{% #button %}I am button{% /button %}
"""
expected = """
<button>I am button</button>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_render_without_children(self):
template = """
{% icon_button icon="envelope" %}
{% #icon_button icon="envelope" %}Submit{% /icon_button %}
"""
expected = """
<button class="icon-button envelope"></button>
<button class="icon-button envelope">Submit</button>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_render_nested(self):
template = """
{% #card heading="I am heading" %}
{% #button %}I am button{% /button %}
{% /card %}
"""
expected = """
<div class="card">
<div class="card__header">I am heading</div>
<div class="card__body">
<button>I am button</button>
</div>
</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_kwargs_with_filters(self):
template = """
{% #card heading="I am heading"|upper %}
{% #button %}I am button{% /button %}
{% /card %}
"""
expected = """
<div class="card">
<div class="card__header">I AM HEADING</div>
<div class="card__body">
<button>I am button</button>
</div>
</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_render_as_variable(self):
template = """
{% avatar user="mixxorz" as my_avatar %}
{% #button as my_button %}I am button{% /button %}
<div>
{{ my_avatar }}
{{ my_button }}
</div>
"""
expected = """
<div>
<div>I am avatar for mixxorz</div>
<button>I am button</button>
</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_pass_boolean_flags(self):
template = """
{% #button disabled %}I am button{% /button %}
"""
expected = """
<button disabled>I am button</button>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
class AttrsTagTest(TestCase):
def test_basic(self):
context = Context(
{
"type": "text",
"id": "the_id",
"name": "the_name",
}
)
template = """
<input {% attrs type id name %}>
"""
expected = """
<input type="text" id="the_id" name="the_name">
"""
self.assertHTMLEqual(expected, Template(template).render(context))
def test_boolean_values(self):
context = Context(
{
"autofocus": False,
"disabled": True,
}
)
template = """
<button {% attrs autofocus disabled %}>Click me</button>
"""
expected = """
<button disabled>Click me</button>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
def test_source_name(self):
context = Context(
{
"input_type": "text",
"id": "the_id",
"name": "the_name",
}
)
template = """
<input {% attrs type=input_type id name %}>
"""
expected = """
<input type="text" id="the_id" name="the_name">
"""
self.assertHTMLEqual(expected, Template(template).render(context))
class VarTagTest(TestCase):
def test_basic(self):
template = """
{% var foo="Hello, World!" %}
<div>{{ foo }}</div>
"""
expected = """
<div>Hello, World!</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_value_filter(self):
template = """
{% var foo=foo|default:"Default value" %}
{% var bar="Hello, World!"|upper %}
<div>{{ foo }}</div>
<div>{{ bar }}</div>
"""
expected = """
<div>Default value</div>
<div>HELLO, WORLD!</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
class MatchFilterTest(TestCase):
def test_basic(self):
context = Context({"first": "outline", "second": "ghost", "third": "square"})
template = """
<button class="{{ first|match:"outline:btn-outline,ghost:btn-ghost" }}">Click me</button>
<button class="{{ second|match:"outline:btn-outline,ghost:btn-ghost" }}">Click me</button>
<button class="{{ third|match:"outline:btn-outline,ghost:btn-ghost" }}">Click me</button>
"""
expected = """
<button class="btn-outline">Click me</button>
<button class="btn-ghost">Click me</button>
<button class="">Click me</button>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
@override_settings(DEBUG=True)
def test_syntax_error(self):
template = """
<button class="{{ "foo"|match:"outline:btn-outline,foo:bar:baz,,:apple,:orange" }}">Click me</button>
"""
with self.assertRaises(TemplateSyntaxError):
Template(template).render(Context())
def test_ignore_spaces(self):
context = Context({"variant": "ghost"})
template = """
<button class="{{ variant|match:"outline:btn-outline, ghost:btn-ghost" }}">Click me</button>
"""
expected = """
<button class="btn-ghost">Click me</button>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
class FragmentTagTest(TestCase):
def test_basic(self):
context = Context({})
template = """
{% fragment as my_fragment %}
<p>Hello, World</p>
{% endfragment %}
Text coming after:
{{ my_fragment }}
"""
expected = """
Text coming after:
<p>Hello, World</p>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
@override_settings(DEBUG=True)
def test_syntax_error(self):
template = """
{% fragment %}
<p>Hello, World</p>
{% endfragment %}
"""
with self.assertRaises(TemplateSyntaxError):
Template(template).render(Context())
def test_with_variables(self):
context = Context({"name": "jonathan wells"})
template = """
{% fragment as my_fragment %}
<p>Hello, {{ name|title }}</p>
{% endfragment %}
Text coming after:
{{ my_fragment }}
"""
expected = """
Text coming after:
<p>Hello, Jonathan Wells</p>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
| 7,918 | 2,190 |
import os
import logging
from flask import Flask, request, render_template
app = Flask(__name__)
def doRender(tname, values={}):
if not os.path.isfile( os.path.join(os.getcwd(), 'templates/'+tname) ):
return render_template('index.htm')
return render_template(tname, **values)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def mainPage(path):
return doRender(path)
@app.route('/terminate')
def terminate():
os.environ['AWS_SHARED_CREDENTIALS_FILE']='./cred'
import sys
import boto3
ids = []
ec2 = boto3.resource('ec2', region_name='us-east-1')
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
ids.append(instance.id)
if (ids != []):
ec2.instances.filter(InstanceIds=ids).stop()
ec2.instances.filter(InstanceIds=ids).terminate()
return doRender( 'index.htm', {})
@app.route('/calculate', methods=['POST'])
def calculate():
#!/usr/bin/env python3
import queue
import threading
import math
import json
import http.client
# Modified from: http://www.ibm.com/developerworks/aix/library/au-threadingpython/
# and fixed with try-except around urllib call
service = request.form.get('service')
shots = int(request.form.get('shots'))
rate = request.form.get('rate')
digits = int(request.form.get('digits'))-1
runs = int(request.form.get('resources'))
eachInstanceShots = shots/runs
count = 0
queue = queue.Queue()
if (service == 'lambda'):
class ThreadUrl(threading.Thread):
def __init__(self, queue, task_id):
threading.Thread.__init__(self)
self.queue = queue
self.task_id = task_id
self.incircles = []
self.results = []
self.resourceId = []
self.runningTime = []
def run(self):
count = self.queue.get()
host = "jy6u38g96k.execute-api.us-east-1.amazonaws.com"
try:
c = http.client.HTTPSConnection(host)
jsons= '{ "key1": "'+str(int(eachInstanceShots))+'", "key2": "'+rate+'", "key3": "'+str(digits)+'"}'
c.request("POST", "/default/test", jsons)
response = c.getresponse()
data = response.read().decode('utf-8')
data = json.loads(data)
self.incircles.extend(data[0])
self.results.extend(data[1])
self.runningTime.append(data[2])
self.resourceId.append(self.task_id)
except IOError:
print( 'Failed to open ' , host )
self.queue.task_done()
def parallel_run():
threads=[]
for i in range(0, runs):
t = ThreadUrl(queue, i)
threads.append(t)
t.setDaemon(True)
t.start()
for x in range(0, runs):
queue.put(count)
queue.join()
incircles = [t.incircles for t in threads]
results = [t.results for t in threads]
resourceId = [t.resourceId for t in threads]
runningTime = [t.runningTime for t in threads]
return incircles, results, resourceId, runningTime
mergedIncircles = []
mergedResults = []
stringedResults = ''
mergedResourceId = []
pi = int(math.pi*(10**digits))/10**digits
piValues = ''
matched = 0
roundNum = 9
sumTime = 0
for a in range(0,9):
incircles, results, resourceId, runningTime = parallel_run()
sumResults = 0
# merging results arrays
for i in range(0, len(results)):
for j in range(0,len(results[i])):
mergedResults.append(results[i][j])
# merging incircles arrays
for i in range(0, len(incircles)):
mergedIncircles.append(incircles[i])
for i in range(0, len(resourceId)):
mergedResourceId.append(resourceId[i])
# Adding up results
for i in range(0, len(mergedResults)):
sumResults = sumResults + mergedResults[i]
# Adding up runningTime
for i in range(0, len(runningTime)):
for j in range(0,len(runningTime[i])):
sumTime = sumTime + runningTime[i][j]
# Final estimation
finalResult = int(sumResults/len(mergedResults)*(10**digits))/10**digits
if( pi == finalResult):
matched = 1
roundNum = a+1
break
# transform results to string
for i in range(0,len(mergedResults)):
stringedResults = stringedResults + str(mergedResults[i]) + ','
stringedResults = stringedResults[:-1]
for i in range(0,len(mergedResults)):
piValues = piValues + str(pi) + ','
comCost = sumTime*512/1024*0.0000000083
reqCost = roundNum*runs*0.2/10**6
finalCost = comCost + reqCost
finalCost = f'{finalCost:.12f}'
comCost = f'{comCost:.12f}'
reqCost = f'{reqCost:.12f}'
return doRender( 'result.htm', {'stringedResults': piValues + '|' + stringedResults, 'incircles': mergedIncircles, 'resourceId': mergedResourceId, 'rate': int(rate), 'roundNum': roundNum, 'matched': matched, 'finalResult': finalResult, 'pi': pi, 'finalCost': finalCost, 'shots': shots, 'rate': rate, 'resources': runs, 'digits': digits+1, 'reqCost': reqCost,'comCost': comCost})
else:
#running ec2 instances
os.environ['AWS_SHARED_CREDENTIALS_FILE']='./cred'
import sys
import boto3
ec2 = boto3.resource('ec2', region_name='us-east-1')
dnss = []
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
dnss.append(instance.public_dns_name)
if (dnss == []):
instances = ec2.create_instances(
ImageId='ami-0147982d8de757491',
MinCount=1,
MaxCount=runs,
InstanceType='t2.micro',)
return doRender( 'result.htm', {})
@app.errorhandler(500)
def server_error(e):
logging.exception('ERROR!')
return """
An error occurred: <pre>{}</pre>
""".format(e), 500
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| 5,599 | 2,345 |
import pytest
@pytest.mark.parametrize("val1, val2, result", ((5, 5, 10), (3, 5, 9)))
def test_addition(val1, val2, result):
assert val1 + val2 == result, "Failed"
| 170 | 75 |