text
stringlengths 3
1.05M
|
|---|
# -*- coding: utf-8 -*-
#
# Copyright 2019-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer - PCA subcommand processing dispatching
"""
import sys
import os
import bigml.api
import bigmler.utils as u
import bigmler.resourcesapi.pcas as r
import bigmler.pre_model_steps as pms
import bigmler.processing.args as a
import bigmler.processing.pca as pc
import bigmler.processing.sources as ps
import bigmler.processing.datasets as pd
from bigmler.resourcesapi.common import shared_changed
from bigmler.resourcesapi.datasets import set_basic_dataset_args
from bigmler.resourcesapi.batch_projections import set_batch_projection_args
from bigmler.defaults import DEFAULTS_FILE
from bigmler.projection import projection, remote_projection
from bigmler.reports import clear_reports, upload_reports
from bigmler.command import get_context
from bigmler.dispatcher import (SESSIONS_LOG,
clear_log_files, get_test_dataset)
COMMAND_LOG = ".bigmler_pca"
DIRS_LOG = ".bigmler_pca_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
MINIMUM_MODEL = "full=false"
DEFAULT_OUTPUT = "projections.csv"
SETTINGS = {
"command_log": COMMAND_LOG,
"sessions_log": SESSIONS_LOG,
"dirs_log": DIRS_LOG,
"default_output": DEFAULT_OUTPUT,
"defaults_file": DEFAULTS_FILE}
def pca_dispatcher(args=sys.argv[1:]):
"""Parses command line and calls the different processing functions
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
settings = {}
settings.update(SETTINGS)
command_args, _, api, session_file, _ = get_context(args, settings)
# Selects the action to perform
if (a.has_train(command_args) or a.has_test(command_args)
or command_args.export_fields):
compute_output(api, command_args)
u.log_message("_" * 80 + "\n", log_file=session_file)
def compute_output(api, args):
""" Creates one or more models using the `training_set` or uses the ids
of previously created BigML models to make predictions for the `test_set`.
"""
pca = None
# variables from command-line options
resume = args.resume_
pca_ids = args.pca_ids_
output = args.projections
# there's only one pca to be generated at present
args.max_parallel_pcas = 1
# pca cannot be published yet.
args.public_pca = False
# It is compulsory to have a description to publish either datasets or
# pcas
if (not args.description_ and (args.public_pca or
args.public_dataset)):
sys.exit("You should provide a description to publish.")
# When using --new-fields, it is compulsory to specify also a dataset
# id
if args.new_fields and not args.dataset:
sys.exit("To use --new-fields you must also provide a dataset id"
" to generate the new dataset from it.")
path = u.check_dir(output)
session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
csv_properties = {}
if args.objective_field:
csv_properties.update({'objective_field': args.objective_field})
# If logging is required set the file for logging
log = None
if args.log_file:
u.check_dir(args.log_file)
log = args.log_file
# If --clear_logs the log files are cleared
clear_log_files([log])
# basic pre-model step: creating or retrieving the source related info
source, resume, csv_properties, fields = pms.get_source_info(
api, args, resume, csv_properties, session_file, path, log)
# basic pre-model step: creating or retrieving the dataset related info
dataset_properties = pms.get_dataset_info(
api, args, resume, source,
csv_properties, fields, session_file, path, log)
(_, datasets, test_dataset,
resume, csv_properties, fields) = dataset_properties
if args.pca_file:
# pca regression is retrieved from the contents of the given local
# JSON file
pca, csv_properties, fields = u.read_local_resource(
args.pca_file,
csv_properties=csv_properties)
pca_ids = [pca]
else:
# pca is retrieved from the remote object or created
pca, resume = \
pc.pca_processing( \
datasets, pca, pca_ids, \
api, args, resume, fields=fields, \
session_file=session_file, path=path, log=log)
# We update the pca public state if needed
if pca:
if isinstance(pca, str):
if not a.has_test(args):
query_string = MINIMUM_MODEL
elif args.export_fields:
query_string = r.ALL_FIELDS_QS
else:
query_string = ''
pca = u.check_resource(pca,
api.get_pca,
query_string=query_string)
if (args.public_pca or
(args.shared_flag and shared_changed(args.shared, pca))):
pca_args = {}
if args.shared_flag and shared_changed(args.shared, pca):
pca_args.update(shared=args.shared)
if args.public_pca:
pca_args.update( \
r.set_publish_pca_args(args))
if pca_args:
pca = r.update_pca( \
pca, pca_args, args,
api=api, path=path, \
session_file=session_file)
# We get the fields of the pca if we haven't got
# them yet and need them
if pca and (args.test_set or args.export_fields):
fields = pc.get_pca_fields( \
pca, csv_properties, args)
if fields and args.export_fields:
fields.summary_csv(os.path.join(path, args.export_fields))
# If predicting
if pca and (a.has_test(args) or \
(test_dataset and args.remote)):
if test_dataset is None:
test_dataset = get_test_dataset(args)
# Remote projections: projections are computed as batch projections
# in bigml.com except when --no-batch flag is set on
if args.remote and not args.no_batch:
# create test source from file
test_name = "%s - test" % args.name
if args.test_source is None:
test_properties = ps.test_source_processing(
api, args, resume, name=test_name,
session_file=session_file, path=path, log=log)
(test_source, resume,
csv_properties, test_fields) = test_properties
else:
test_source_id = bigml.api.get_source_id(args.test_source)
test_source = api.check_resource(test_source_id)
if test_dataset is None:
# create test dataset from test source
dataset_args = set_basic_dataset_args(args, name=test_name)
test_dataset, resume = pd.alternative_dataset_processing(
test_source, "test", dataset_args, api, args,
resume, session_file=session_file, path=path, log=log)
else:
test_dataset_id = bigml.api.get_dataset_id(test_dataset)
test_dataset = api.check_resource(test_dataset_id)
csv_properties.update(objective_field=None,
objective_field_present=False)
test_fields = pd.get_fields_structure(test_dataset,
csv_properties)
batch_projection_args = set_batch_projection_args(
args, fields=fields,
dataset_fields=test_fields)
remote_projection(pca, test_dataset, \
batch_projection_args, args, \
api, resume, projection_file=output, \
session_file=session_file, path=path, log=log)
else:
projection(pca, fields, args,
session_file=session_file)
u.print_generated_files(path, log_file=session_file,
verbosity=args.verbosity)
if args.reports:
clear_reports(path)
if args.upload:
upload_reports(args.reports, path)
|
/**
* Author: Samuel Rohde (rohde.samuel@cubez.io)
*
* Copyright 2020 Samuel Rohde
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CUBEZ_RENDER_DEFS__H
#define CUBEZ_RENDER_DEFS__H
#include <vector>
#include <cubez/render.h>
#include <cubez/render_pipeline.h>
#include <cubez/mesh.h>
#endif // CUBEZ_RENDER_DEFS__H
|
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/sagemaker/SageMaker_EXPORTS.h>
#include <aws/sagemaker/model/AutoMLMetricEnum.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace SageMaker
{
namespace Model
{
/**
* <p>Applies a metric to minimize or maximize for the job's
* objective.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AutoMLJobObjective">AWS
* API Reference</a></p>
*/
class AWS_SAGEMAKER_API AutoMLJobObjective
{
public:
AutoMLJobObjective();
AutoMLJobObjective(Aws::Utils::Json::JsonView jsonValue);
AutoMLJobObjective& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The name of the metric.</p>
*/
inline const AutoMLMetricEnum& GetMetricName() const{ return m_metricName; }
/**
* <p>The name of the metric.</p>
*/
inline bool MetricNameHasBeenSet() const { return m_metricNameHasBeenSet; }
/**
* <p>The name of the metric.</p>
*/
inline void SetMetricName(const AutoMLMetricEnum& value) { m_metricNameHasBeenSet = true; m_metricName = value; }
/**
* <p>The name of the metric.</p>
*/
inline void SetMetricName(AutoMLMetricEnum&& value) { m_metricNameHasBeenSet = true; m_metricName = std::move(value); }
/**
* <p>The name of the metric.</p>
*/
inline AutoMLJobObjective& WithMetricName(const AutoMLMetricEnum& value) { SetMetricName(value); return *this;}
/**
* <p>The name of the metric.</p>
*/
inline AutoMLJobObjective& WithMetricName(AutoMLMetricEnum&& value) { SetMetricName(std::move(value)); return *this;}
private:
AutoMLMetricEnum m_metricName;
bool m_metricNameHasBeenSet;
};
} // namespace Model
} // namespace SageMaker
} // namespace Aws
|
'use strict';
const mongoose = require('mongoose');
const Customer = mongoose.model('Customer');
exports.create = async data => {
var customer = new Customer(data);
await customer.save();
};
exports.authenticate = async data => {
const res = await Customer.findOne({
email: data.email,
password: data.password
});
return res;
};
exports.getByID = async id => {
const res = await Customer.findById(id);
return res;
};
|
from rest_framework.fields import CharField
from rest_framework.serializers import ModelSerializer, Serializer
from services.viewcounts.models import PageViewsModel
class PageViewSerializer(Serializer):
page_url = CharField(required=True, allow_blank=False, max_length=200)
user_id = CharField(required=True, allow_blank=True, max_length=40)
class PageViewsModelSerializer(ModelSerializer):
class Meta:
model = PageViewsModel
fields = "__all__"
read_only_fields = ["id"]
|
#pragma once
#include <cassert>
#include <functional>
#include <span>
#include <utility>
#include <vector>
#include <range/v3/algorithm/sort.hpp>
#include <range/v3/view/enumerate.hpp>
#include <range/v3/view/filter.hpp>
#include <range/v3/view/transform.hpp>
#include <range/v3/view/zip.hpp>
template <typename ItemType>
class SortFilter {
public:
explicit SortFilter(std::span<const ItemType> items)
: items{ items.begin(), items.end() }, passesFilter(items.size(), true) {}
template <typename Predicate>
void sort(Predicate&& predicate)
{
ranges::sort(ranges::views::zip(items, passesFilter), std::forward<Predicate>(predicate), [](const auto& pair) -> decltype(auto) { return std::get<0>(pair).get(); });
}
template <typename Predicate>
void filter(Predicate predicate)
{
assert(items.size() == passesFilter.size());
for (std::size_t i = 0; i < items.size(); ++i)
passesFilter[i] = predicate(items[i].get());
}
[[nodiscard]] auto getItems() const noexcept
{
return ranges::views::transform(items, [](const auto& item) -> decltype(auto) { return item.get(); }) |
ranges::views::enumerate |
ranges::views::filter([this](const auto& pair) { return passesFilter[std::get<0>(pair)]; });
}
[[nodiscard]] std::size_t totalItemCount() const noexcept
{
return items.size();
}
private:
// wrapper around bool to not use std::vector<bool>
// which isn't compatible with ranges::sort()
struct Boolean {
explicit(false) Boolean(bool b) : b{ b } {}
explicit(false) operator bool() const noexcept
{
return b;
}
private:
bool b;
};
std::vector<std::reference_wrapper<const ItemType>> items;
std::vector<Boolean> passesFilter;
};
|
# proxy module
from traitsui.qt4.ui_live import *
|
export * from './slider';
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoicHVibGljX2FwaS5qcyIsInNvdXJjZVJvb3QiOiJuZzovL3ByaW1lbmcvc2xpZGVyLyIsInNvdXJjZXMiOlsicHVibGljX2FwaS50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiQUFBQSxjQUFjLFVBQVUsQ0FBQyIsInNvdXJjZXNDb250ZW50IjpbImV4cG9ydCAqIGZyb20gJy4vc2xpZGVyJzsiXX0=
|
import redis
import click
import json
import os
import zipfile
import io
DATA_BIN_FINE_NAME = 'Data.bin'
META_DATA_FILE_NAME = 'MetaData.json'
class Colors(object):
@staticmethod
def Cyan(data):
return '\033[36m' + data + '\033[0m'
@staticmethod
def Yellow(data):
return '\033[33m' + data + '\033[0m'
@staticmethod
def Bold(data):
return '\033[1m' + data + '\033[0m'
@staticmethod
def Bred(data):
return '\033[31;1m' + data + '\033[0m'
@staticmethod
def Gray(data):
return '\033[30;1m' + data + '\033[0m'
@staticmethod
def Lgray(data):
return '\033[30;47m' + data + '\033[0m'
@staticmethod
def Blue(data):
return '\033[34m' + data + '\033[0m'
@staticmethod
def Green(data):
return '\033[32m' + data + '\033[0m'
@click.group()
def gears_cli():
pass
def create_connection(host, port, password, decode_responses=True):
global args
try:
r = redis.Redis(host, port, password=password, decode_responses=decode_responses)
r.ping()
except Exception as e:
print(Colors.Bred('Cannot connect to Redis. Aborting (%s)' % str(e)))
exit(1)
return r
def print_res(res, res_id):
res = str(res)
res_id = Colors.Bold('%d)' % res_id)
try:
jsonStr = json.dumps(res, indent=4, sort_keys=True)
jsonStr = Colors.Bold(jsonStr)
res = json.loads(res)
print('%s\t%s' % (res_id, jsonStr))
return
except Exception as e:
pass
print('%s\t%s' % (res_id, Colors.Bold(res)))
@gears_cli.command(help='Install give requirements')
@click.option('--host', default='localhost', help='Redis host to connect to')
@click.option('--port', default=6379, type=int, help='Redis port to connect to')
@click.option('--password', default=None, help='Redis password')
@click.option('--requirements-file', default=None, help='Path to requirements.txt file')
@click.argument('requirements', nargs=-1, type=click.UNPROCESSED)
def install_requirements(host, port, password, requirements_file, requirements):
r = create_connection(host, port, password);
requirements = list(requirements)
if requirements_file is not None:
with open(requirements_file, 'r') as f:
reqs = [(el.strip()) for el in f.readlines()]
requirements += reqs
try:
_ = r.execute_command('RG.PYEXECUTE', 'log("installing requirements")', 'REQUIREMENTS', *requirements)
except Exception as e:
print(Colors.Bred("failed running gear function (%s)" % str(e)))
exit(1)
@gears_cli.command(help='Run gears function')
@click.option('--host', default='localhost', help='Redis host to connect to')
@click.option('--port', default=6379, type=int, help='Redis port to connect to')
@click.option('--password', default=None, help='Redis password')
@click.option('--requirements', default=None, help='Path to requirements.txt file')
@click.argument('filepath')
@click.argument('extra_args', nargs=-1, type=click.UNPROCESSED)
def run(host, port, password, requirements, filepath, extra_args):
r = create_connection(host, port, password);
extra_args = list(extra_args)
if requirements is not None:
extra_args.append('REQUIREMENTS')
with open(requirements, 'r') as f:
reqs = [(el.strip()) for el in f.readlines()]
extra_args += reqs
with open(filepath, 'rt') as f:
script = f.read()
q = ['rg.pyexecute', script] + extra_args
try:
reply = r.execute_command(*q)
except Exception as e:
print(Colors.Bred("failed running gear function (%s)" % str(e)))
exit(1)
if reply == 'OK':
print('OK')
else:
results, errors = reply
print(Colors.Bold('Results'))
print(Colors.Bold('-------'))
for i in range(len(results)):
print_res(results[i], i + 1)
print('')
if len(errors) > 0:
print(Colors.Bred('Errors'))
print(Colors.Bred('------'))
for i in range(len(errors)):
print(Colors.Bred('%d)\t%s' % (i + 1, str(errors[i]))))
exit(1)
def decode_utf(d):
if isinstance(d, bytes):
return d.decode('utf-8')
if isinstance(d, dict):
return {decode_utf(k): decode_utf(v) for k, v in d.items()}
if isinstance(d, list):
return [decode_utf(x) for x in d]
return d
def extract_metadata(meta_data_reply):
meta_data = {}
for i in range(0, len(meta_data_reply), 2):
key = meta_data_reply[i].decode('utf-8')
value = decode_utf(meta_data_reply[i + 1])
meta_data[key] = value
return meta_data
def export_single_req(r, req_name, save_directory, output_prefix):
try:
metaDataValues, setializedData = r.execute_command('RG.PYEXPORTREQ', req_name)
except Exception as e:
print(Colors.Bred("failed exporting requirement (%s)" % str(e)))
exit(1)
metaData = extract_metadata(metaDataValues)
jsonMetaDataStr = json.dumps(metaData, indent=4, sort_keys=True)
if output_prefix is None:
fileName = "redisgears-requirement-v%s-%s-%s.zip" % (metaData['GearReqVersion'], os.path.basename(metaData['Name']), metaData['CompiledOs'])
else:
fileName = "%s-%s.zip" % (output_prefix, metaData['CompiledOs'])
filePath = os.path.join(save_directory, fileName)
filePath = os.path.abspath(filePath)
if os.path.exists(filePath):
print(Colors.Bred("File %s already exists" % filePath))
exit(1)
print(Colors.Cyan("Saving exported requirement into %s" % filePath))
with zipfile.ZipFile(filePath, "a", zipfile.ZIP_DEFLATED, False) as zf:
zf.writestr(META_DATA_FILE_NAME, jsonMetaDataStr)
data = b''.join(setializedData)
zf.writestr(DATA_BIN_FINE_NAME, data)
@gears_cli.command(help='Export requirements from RedisGears')
@click.option('--host', default='localhost', help='Redis host to connect to')
@click.option('--port', default=6379, type=int, help='Redis port to connect to')
@click.option('--password', default=None, help='Redis password')
@click.option('--save-directory', default='./', help='Directory for exported files')
@click.option('--output-prefix', default=None, help='Prefix for the requirement zip file')
@click.option('--registration-id', multiple=True, default=[], help='Regisrations ids to extract their requirements')
@click.option('--requirement', multiple=True, default=[], help='Requirement to export')
@click.option('--all', is_flag=True, default=False, help='Export all requirements')
def export_requirements(host, port, password, save_directory, output_prefix, registration_id, all, requirement):
r = create_connection(host, port, password, decode_responses=False);
if all:
all_reqs = r.execute_command('RG.PYDUMPREQS')
if len(all_reqs) == 0:
print(Colors.Bred("No requirements to export"))
exit(1)
for req in all_reqs:
md = extract_metadata(req)
export_single_req(r, md['Name'], save_directory, output_prefix)
return
requirements_to_export = set()
if len(registration_id) > 0:
registrations = r.execute_command('RG.DUMPREGISTRATIONS')
for registration_id in registration_id:
registration = [r for r in registrations if r[1].decode('utf-8') == registration_id]
if len(registration) != 1:
print(Colors.Bred("No such registration %s" % registration_id))
exit(1)
registration = registration[0]
session = registration[9]
session = eval(session.decode('utf-8'))
[requirements_to_export.add(n['name']) for n in session['depsList']]
for req in requirement:
requirements_to_export.add(req)
for req in requirements_to_export:
export_single_req(r, req, save_directory, output_prefix)
def import_single_req(r, req_io, bulk_size_in_bytes):
with zipfile.ZipFile(req_io, "r", zipfile.ZIP_DEFLATED, False) as zf:
try:
data = zf.read(DATA_BIN_FINE_NAME)
except Exception as e:
print(Colors.Bred("Bad zip format (%s)" % str(e)))
exit(1)
data = [data[i : i + bulk_size_in_bytes] for i in range(0, len(data), bulk_size_in_bytes)]
try:
_ = r.execute_command('RG.PYIMPORTREQ', *data)
except Exception as e:
print(Colors.Bred("failed import requirement (%s)" % str(e)))
exit(1)
@gears_cli.command(help='Import requirements to RedisGears')
@click.option('--host', default='localhost', help='Redis host to connect to')
@click.option('--port', default=6379, type=int, help='Redis port to connect to')
@click.option('--password', default=None, help='Redis password')
@click.option('--requirements-path', default='./', help='Path of requirements directory containing requirements zip files, could also be a zip file contains more requirements zip files')
@click.option('--all', is_flag=True, default=False, help='Import all requirements in zip file')
@click.option('--bulk-size', default=10, type=int, help='Max bulk size to send to redis in MB')
@click.argument('requirements', nargs=-1, type=click.UNPROCESSED)
def import_requirements(host, port, password, requirements_path, all, bulk_size, requirements):
def install_req(req):
try:
req_data = zf.read(req)
except Exception as e:
print(Colors.Bred("Requirement %s could not be found in zip, error='%s'" % (req, str(e))))
exit(1)
io_buffer = io.BytesIO(req_data)
import_single_req(r, io_buffer, bulk_size_in_bytes)
print(Colors.Green('Requirement %s imported successfully' % req))
r = create_connection(host, port, password, decode_responses=False);
bulk_size_in_bytes = bulk_size * 1024 * 1024
if len(requirements) == 0 and not all:
print(Colors.Bold('Warngin: no requirements specified'))
requirements_path = os.path.abspath(requirements_path)
if not os.path.exists(requirements_path):
print(Colors.Bred("File %s does not exists" % requirements_path))
exit(1)
if requirements_path.endswith('.zip'):
if len(requirements) == 0:
all = True
with zipfile.ZipFile(requirements_path, "r", zipfile.ZIP_DEFLATED, False) as zf:
if all:
for req in zf.namelist():
install_req(req)
else:
for req in requirements:
install_req(req)
return
if not os.path.isdir(requirements_path):
print(Colors.Bred("%s is not a directory" % requirements_path))
exit(1)
for req in requirements:
req_path = os.path.join(requirements_path, req)
if not os.path.exists(req_path):
print(Colors.Bred("File %s does not exists" % req_path))
exit(1)
import_single_req(r, req_path, bulk_size_in_bytes)
print(Colors.Green('Requirement %s imported successfully' % req_path))
def main():
gears_cli()
if __name__ == '__main__':
gears_cli()
|
/*
Copyright (c) 2003-2009, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.lang.vi={dir:'ltr',editorTitle:'Trình biên tập trực quan, %1',source:'Mã HTML',newPage:'Trang mới',save:'Lưu',preview:'Xem trước',cut:'Cắt',copy:'Sao chép',paste:'Dán',print:'In',underline:'Gạch chân',bold:'Đậm',italic:'Nghiêng',selectAll:'Chọn Tất cả',removeFormat:'Xoá Định dạng',strike:'Gạch xuyên ngang',subscript:'Chỉ số dưới',superscript:'Chỉ số trên',horizontalrule:'Chèn Đường phân cách ngang',pagebreak:'Chèn Ngắt trang',unlink:'Xoá Liên kết',undo:'Khôi phục thao tác',redo:'Làm lại thao tác',common:{browseServer:'Duyệt trên máy chủ',url:'URL',protocol:'Giao thức',upload:'Tải lên',uploadSubmit:'Tải lên Máy chủ',image:'Hình ảnh',flash:'Flash',form:'Biểu mẫu',checkbox:'Nút kiểm',radio:'Nút chọn',textField:'Trường văn bản',textarea:'Vùng văn bản',hiddenField:'Trường ẩn',button:'Nút',select:'Ô chọn',imageButton:'Nút hình ảnh',notSet:'<không thiết lập>',id:'Định danh',name:'Tên',langDir:'Đường dẫn Ngôn ngữ',langDirLtr:'Trái sang Phải (LTR)',langDirRtl:'Phải sang Trái (RTL)',langCode:'Mã Ngôn ngữ',longDescr:'Mô tả URL',cssClass:'Lớp Stylesheet',advisoryTitle:'Advisory Title',cssStyle:'Mẫu',ok:'Đồng ý',cancel:'Bỏ qua',generalTab:'Chung',advancedTab:'Mở rộng',validateNumberFailed:'Giá trị này không phải là số.',confirmNewPage:'Mọi thay đổi không được không được lưu lại của nội dung này sẽ bị mất. Bạn có chắc chắn muốn tải một trang mới?',confirmCancel:'Một vài tùy chọn đã bị thay đổi. Bạn có chắc chắn muốn đóng hộp thoại?',unavailable:'%1<span class="cke_accessibility">, không có</span>'},specialChar:{toolbar:'Chèn Ký tự đặc biệt',title:'Hãy chọn Ký tự đặc biệt'},link:{toolbar:'Chèn/Sửa Liên kết',menu:'Sửa Liên kết',title:'Liên kết',info:'Thông tin Liên kết',target:'Đích',upload:'Tải lên',advanced:'Mở rộng',type:'Kiểu Liên kết',toAnchor:'Neo trong trang này',toEmail:'Thư điện tử',target:'Đích',targetNotSet:'<không thiết lập>',targetFrame:'<khung>',targetPopup:'<cửa sổ popup>',targetNew:'Cửa sổ mới (_blank)',targetTop:'Cửa sổ trên cùng(_top)',targetSelf:'Cùng cửa sổ (_self)',targetParent:'Cửa sổ cha (_parent)',targetFrameName:'Tên Khung đích',targetPopupName:'Tên Cửa sổ Popup',popupFeatures:'Đặc điểm của Cửa sổ Popup',popupResizable:'Có thể thay đổi kích cỡ',popupStatusBar:'Thanh trạng thái',popupLocationBar:'Thanh vị trí',popupToolbar:'Thanh công cụ',popupMenuBar:'Thanh Menu',popupFullScreen:'Toàn màn hình (IE)',popupScrollBars:'Thanh cuộn',popupDependent:'Phụ thuộc (Netscape)',popupWidth:'Rộng',popupLeft:'Vị trí Trái',popupHeight:'Cao',popupTop:'Vị trí Trên',id:'Định danh',langDir:'Đường dẫn Ngôn ngữ',langDirNotSet:'<không thiết lập>',langDirLTR:'Trái sang Phải (LTR)',langDirRTL:'Phải sang Trái (RTL)',acccessKey:'Phím Hỗ trợ truy cập',name:'Tên',langCode:'Đường dẫn Ngôn ngữ',tabIndex:'Chỉ số của Tab',advisoryTitle:'Advisory Title',advisoryContentType:'Advisory Content Type',cssClasses:'Lớp Stylesheet',charset:'Bảng mã của tài nguyên được liên kết đến',styles:'Mẫu',selectAnchor:'Chọn một Neo',anchorName:'Theo Tên Neo',anchorId:'Theo Định danh Thành phần',emailAddress:'Thư điện tử',emailSubject:'Tiêu đề Thông điệp',emailBody:'Nội dung Thông điệp',noAnchors:'(Không có Neo nào trong tài liệu)',noUrl:'Hãy đưa vào Liên kết URL',noEmail:'Hãy đưa vào địa chỉ thư điện tử'},anchor:{toolbar:'Chèn/Sửa Neo',menu:'Thuộc tính Neo',title:'Thuộc tính Neo',name:'Tên của Neo',errorName:'Hãy nhập vào tên của Neo'},findAndReplace:{title:'Tìm kiếm và Thay Thế',find:'Tìm kiếm',replace:'Thay thế',findWhat:'Tìm chuỗi:',replaceWith:'Thay bằng:',notFoundMsg:'Không tìm thấy chuỗi cần tìm.',matchCase:'Phân biệt chữ hoa/thường',matchWord:'Giống toàn bộ từ',matchCyclic:'Giống một phần',replaceAll:'Thay thế Tất cả',replaceSuccessMsg:'%1 vị trí đã được thay thế.'},table:{toolbar:'Bảng',title:'Thuộc tính bảng',menu:'Thuộc tính bảng',deleteTable:'Xóa Bảng',rows:'Hàng',columns:'Cột',border:'Cỡ Đường viền',align:'Canh lề',alignNotSet:'<Chưa thiết lập>',alignLeft:'Trái',alignCenter:'Giữa',alignRight:'Phải',width:'Rộng',widthPx:'điểm (px)',widthPc:'%',height:'Cao',cellSpace:'Khoảng cách Ô',cellPad:'Đệm Ô',caption:'Đầu đề',summary:'Tóm lược',headers:'Đầu đề',headersNone:'Không có',headersColumn:'Cột Đầu tiên',headersRow:'Hàng Đầu tiên',headersBoth:'Cả hai',invalidRows:'Số lượng hàng phải là một số lớn hơn 0.',invalidCols:'Số lượng cột phải là một số lớn hơn 0.',invalidBorder:'Kích cỡ của đường biên phải là một số nguyên.',invalidWidth:'Chiều rộng của Bảng phải là một số nguyên.',invalidHeight:'Chiều cao của Bảng phải là một số nguyên.',invalidCellSpacing:'Khoảng cách giữa các ô phải là một số nguyên.',invalidCellPadding:'Đệm giữa các ô phải là một số nguyên.',cell:{menu:'Ô',insertBefore:'Chèn Ô Phía trước',insertAfter:'Chèn Ô Phía sau',deleteCell:'Xoá Ô',merge:'Kết hợp Ô',mergeRight:'Kết hợp Sang phải',mergeDown:'Kết hợp Xuống dưới',splitHorizontal:'Tách ngang Ô',splitVertical:'Tách dọc Ô',title:'Thuộc tính của Ô',cellType:'Kiểu của Ô',rowSpan:'Kết hợp hàng',colSpan:'Kết hợp cột',wordWrap:'Word Wrap',hAlign:'Canh lề ngang',vAlign:'Canh lề dọc',alignTop:'Trên cùng',alignMiddle:'Chính giữa',alignBottom:'Dưới cùng',alignBaseline:'Đường cơ sở',bgColor:'Màu nền',borderColor:'Màu viền',data:'Dữ liệu',header:'Đầu đề',yes:'Có',no:'Không',invalidWidth:'Chiều rộng của Ô phải là một số nguyên.',invalidHeight:'Chiều cao của Ô phải là một số nguyên.',invalidRowSpan:'Số hàng kết hợp phải là một số nguyên.',invalidColSpan:'Số cột kết hợp phải là một số nguyên.'},row:{menu:'Hàng',insertBefore:'Chèn Hàng Phía trước',insertAfter:'Chèn Hàng Phía sau',deleteRow:'Xoá Hàng'},column:{menu:'Cột',insertBefore:'Chèn Cột Phía trước',insertAfter:'Chèn Cột Phía sau',deleteColumn:'Xoá Cột'}},button:{title:'Thuộc tính Nút',text:'Chuỗi hiển thị (Giá trị)',type:'Kiểu',typeBtn:'Nút Bấm',typeSbm:'Nút Gửi',typeRst:'Nút Nhập lại'},checkboxAndRadio:{checkboxTitle:'Thuộc tính Nút kiểm',radioTitle:'Thuộc tính Nút chọn',value:'Giá trị',selected:'Được chọn'},form:{title:'Thuộc tính Biểu mẫu',menu:'Thuộc tính Biểu mẫu',action:'Hành động',method:'Phương thức',encoding:'Bảng mã',target:'Đích',targetNotSet:'<không thiết lập>',targetNew:'Cửa sổ mới (_blank)',targetTop:'Cửa sổ trên cùng(_top)',targetSelf:'Cùng cửa sổ (_self)',targetParent:'Cửa sổ cha (_parent)'},select:{title:'Thuộc tính Ô chọn',selectInfo:'Thông tin',opAvail:'Các tùy chọn có thể sử dụng',value:'Giá trị',size:'Kích cỡ',lines:'dòng',chkMulti:'Cho phép chọn nhiều',opText:'Văn bản',opValue:'Giá trị',btnAdd:'Thêm',btnModify:'Thay đổi',btnUp:'Lên',btnDown:'Xuống',btnSetValue:'Giá trị được chọn',btnDelete:'Xoá'},textarea:{title:'Thuộc tính Vùng văn bản',cols:'Cột',rows:'Hàng'},textfield:{title:'Thuộc tính Trường văn bản',name:'Tên',value:'Giá trị',charWidth:'Rộng',maxChars:'Số Ký tự tối đa',type:'Kiểu',typeText:'Ký tự',typePass:'Mật khẩu'},hidden:{title:'Thuộc tính Trường ẩn',name:'Tên',value:'Giá trị'},image:{title:'Thuộc tính Hình ảnh',titleButton:'Thuộc tính Nút hình ảnh',menu:'Thuộc tính Hình ảnh',infoTab:'Thông tin Hình ảnh',btnUpload:'Tải lên Máy chủ',url:'URL',upload:'Tải lên',alt:'Chú thích Hình ảnh',width:'Rộng',height:'Cao',lockRatio:'Giữ nguyên tỷ lệ',resetSize:'Kích thước gốc',border:'Đường viền',hSpace:'HSpace',vSpace:'VSpace',align:'Vị trí',alignLeft:'Trái',alignAbsBottom:'Dưới tuyệt đối',alignAbsMiddle:'Giữa tuyệt đối',alignBaseline:'Đường cơ sở',alignBottom:'Dưới',alignMiddle:'Giữa',alignRight:'Phải',alignTextTop:'Phía trên chữ',alignTop:'Trên',preview:'Xem trước',alertUrl:'Hãy đưa vào URL của hình ảnh',linkTab:'Liên kết',button2Img:'Bạn có muốn chuyển nút bấm bằng hình ảnh được chọn thành hình ảnh?',img2Button:'Bạn có muốn chuyển đổi hình ảnh được chọn thành nút bấm bằng hình ảnh?'},flash:{properties:'Thuộc tính Flash',propertiesTab:'Thuộc tính',title:'Thuộc tính Flash',chkPlay:'Tự động chạy',chkLoop:'Lặp',chkMenu:'Cho phép bật Menu của Flash',chkFull:'Cho phép Toàn màn hình',scale:'Tỷ lệ',scaleAll:'Hiển thị tất cả',scaleNoBorder:'Không đường viền',scaleFit:'Vừa vặn',access:'Truy cập Mã',accessAlways:'Luôn luôn',accessSameDomain:'Cùng tên miền',accessNever:'Không bao giờ',align:'Vị trí',alignLeft:'Trái',alignAbsBottom:'Dưới tuyệt đối',alignAbsMiddle:'Giữa tuyệt đối',alignBaseline:'Đường cơ sở',alignBottom:'Dưới',alignMiddle:'Giữa',alignRight:'Phải',alignTextTop:'Phía trên chữ',alignTop:'Trên',quality:'Chất lượng',qualityBest:'TỐt nhất',qualityHigh:'Cao',qualityAutoHigh:'Cao Tự động',qualityMedium:'Trung bình',qualityAutoLow:'Thấp Tự động',qualityLow:'Thấp',windowModeWindow:'Cửa sổ',windowModeOpaque:'Mờ đục',windowModeTransparent:'Trong suốt',windowMode:'Chế độ Cửa sổ',flashvars:'Các biến số dành cho Flash',bgcolor:'Màu nền',width:'Rộng',height:'Cao',hSpace:'HSpace',vSpace:'VSpace',validateSrc:'Hãy đưa vào Liên kết URL',validateWidth:'Chiều rộng phải là số nguyên.',validateHeight:'Chiều cao phải là số nguyên.',validateHSpace:'HSpace phải là số nguyên.',validateVSpace:'VSpace phải là số nguyên.'},spellCheck:{toolbar:'Kiểm tra Chính tả',title:'Kiểm tra Chính tả',notAvailable:'Xin lỗi, dịch vụ này hiện tại không có.',errorLoading:'Lỗi khi đang nạp dịch vụ ứng dụng: %s.',notInDic:'Không có trong từ điển',changeTo:'Chuyển thành',btnIgnore:'Bỏ qua',btnIgnoreAll:'Bỏ qua Tất cả',btnReplace:'Thay thế',btnReplaceAll:'Thay thế Tất cả',btnUndo:'Phục hồi lại',noSuggestions:'- Không đưa ra gợi ý về từ -',progress:'Đang tiến hành kiểm tra chính tả...',noMispell:'Hoàn tất kiểm tra chính tả: Không có lỗi chính tả',noChanges:'Hoàn tất kiểm tra chính tả: Không có từ nào được thay đổi',oneChange:'Hoàn tất kiểm tra chính tả: Một từ đã được thay đổi',manyChanges:'Hoàn tất kiểm tra chính tả: %1 từ đã được thay đổi',ieSpellDownload:'Chức năng kiểm tra chính tả chưa được cài đặt. Bạn có muốn tải về ngay bây giờ?'},smiley:{toolbar:'Hình biểu lộ cảm xúc (mặt cười)',title:'Chèn Hình biểu lộ cảm xúc (mặt cười)'},elementsPath:{eleTitle:'%1 thành phần'},numberedlist:'Danh sách có thứ tự',bulletedlist:'Danh sách không thứ tự',indent:'Dịch vào trong',outdent:'Dịch ra ngoài',justify:{left:'Canh trái',center:'Canh giữa',right:'Canh phải',block:'Canh đều'},blockquote:'Khối Trích dẫn',clipboard:{title:'Dán',cutError:'Các thiết lập bảo mật của trình duyệt không cho phép trình biên tập tự động thực thi lệnh cắt. Hãy sử dụng bàn phím cho lệnh này (Ctrl+X).',copyError:'Các thiết lập bảo mật của trình duyệt không cho phép trình biên tập tự động thực thi lệnh sao chép. Hãy sử dụng bàn phím cho lệnh này (Ctrl+C).',pasteMsg:'Hãy dán nội dung vào trong khung bên dưới, sử dụng tổ hợp phím (<STRONG>Ctrl+V</STRONG>) và nhấn vào nút <STRONG>Đồng ý</STRONG>.',securityMsg:'Do thiết lập bảo mật của trình duyệt nên trình biên tập không thể truy cập trực tiếp vào nội dung đã sao chép. Bạn cần phải dán lại nội dung vào cửa sổ này.'},pastefromword:{toolbar:'Dán với định dạng Word',title:'Dán với định dạng Word',advice:'Hãy dán nội dung vào trong khung bên dưới, sử dụng tổ hợp phím (<STRONG>Ctrl+V</STRONG>) và nhấn vào nút <STRONG>Đồng ý</STRONG>.',ignoreFontFace:'Chấp nhận các định dạng phông',removeStyle:'Gỡ bỏ các định dạng Styles'},pasteText:{button:'Dán theo định dạng văn bản thuần',title:'Dán theo định dạng văn bản thuần'},templates:{button:'Mẫu dựng sẵn',title:'Nội dung Mẫu dựng sẵn',insertOption:'Thay thế nội dung hiện tại',selectPromptMsg:'Hãy chọn Mẫu dựng sẵn để mở trong trình biên tập<br>(nội dung hiện tại sẽ bị mất):',emptyListMsg:'(Không có Mẫu dựng sẵn nào được định nghĩa)'},showBlocks:'Hiển thị các Khối',stylesCombo:{label:'Kiểu',voiceLabel:'Kiểu',panelVoiceLabel:'Chọn một kiểu',panelTitle1:'Kiểu Khối',panelTitle2:'Kiểu Trực tiếp',panelTitle3:'Kiểu Đối tượng'},format:{label:'Định dạng',voiceLabel:'Định dạng',panelTitle:'Định dạng',panelVoiceLabel:'Chọn định dạng đoạn văn bản',tag_p:'Normal',tag_pre:'Formatted',tag_address:'Address',tag_h1:'Heading 1',tag_h2:'Heading 2',tag_h3:'Heading 3',tag_h4:'Heading 4',tag_h5:'Heading 5',tag_h6:'Heading 6',tag_div:'Normal (DIV)'},font:{label:'Phông',voiceLabel:'Phông',panelTitle:'Phông',panelVoiceLabel:'Chọn phông'},fontSize:{label:'Cỡ chữ',voiceLabel:'Kích cỡ phông',panelTitle:'Cỡ chữ',panelVoiceLabel:'Chọn kích cỡ phông'},colorButton:{textColorTitle:'Màu chữ',bgColorTitle:'Màu nền',auto:'Tự động',more:'Màu khác...'},colors:{'000':'Black',800000:'Maroon','8B4513':'Saddle Brown','2F4F4F':'Dark Slate Gray','008080':'Teal','000080':'Navy','4B0082':'Indigo',696969:'Dim Gray',B22222:'Fire Brick',A52A2A:'Brown',DAA520:'Golden Rod','006400':'Dark Green','40E0D0':'Turquoise','0000CD':'Medium Blue',800080:'Purple',808080:'Gray',F00:'Red',FF8C00:'Dark Orange',FFD700:'Gold','008000':'Green','0FF':'Cyan','00F':'Blue',EE82EE:'Violet',A9A9A9:'Dark Gray',FFA07A:'Light Salmon',FFA500:'Orange',FFFF00:'Yellow','00FF00':'Lime',AFEEEE:'Pale Turquoise',ADD8E6:'Light Blue',DDA0DD:'Plum',D3D3D3:'Light Grey',FFF0F5:'Lavender Blush',FAEBD7:'Antique White',FFFFE0:'Light Yellow',F0FFF0:'Honeydew',F0FFFF:'Azure',F0F8FF:'Alice Blue',E6E6FA:'Lavender',FFF:'White'},scayt:{title:'Kiểm tra chính tả ngay khi gõ chữ (SCAYT)',enable:'Bật SCAYT',disable:'Tắt SCAYT',about:'Thông tin về SCAYT',toggle:'Bật tắt SCAYT',options:'Tùy chọn',langs:'Ngôn ngữ',moreSuggestions:'Đề xuất thêm',ignore:'Bỏ qua',ignoreAll:'Bỏ qua Tất cả',addWord:'Thêm Từ',emptyDic:'Tên của từ điển không được để trống.',optionsTab:'Tùy chọn',languagesTab:'Ngôn ngữ',dictionariesTab:'Từ điển',aboutTab:'Thông tin'},about:{title:'Thông tin về CKEditor',dlgTitle:'Thông tin về CKEditor',moreInfo:'Vui lòng ghé thăm trang web của chúng tôi để có thông tin về giấy phép:',copy:'Bản quyền © $1. Giữ toàn quyền.'},maximize:'Phóng to tối đa',fakeobjects:{anchor:'Neo',flash:'Hoạt họa Flash',div:'Ngắt Trang',unknown:'Đối tượng không rõ ràng'},resize:'Kéo rê để thay đổi kích cỡ'};
|
import BackToTop from "@/components/common/BackToTop/BackToTop";
export const backToTop = {
components:{
BackToTop
},
data(){
return {
isShowBackTop:false
}
},
methods:{
backToTop(){
this.$refs.scroll.scrollTo(0,0,500);
},
listenShowBackTop(){
this.isShowBackTop = this.scrollY<=-300
}
}
}
|
/*
* Copyright (C) 2021 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { describe, it } from 'deccjsunit/index';
import Data from '../../../../../../../utils/data.json';
import { HuksSignVerifyRSA } from '../../../../../../../utils/param/signverify/publicSignverifyParam';
import { stringToUint8Array } from '../../../../../../../utils/param/publicFunc';
import { publicSignVerifyFunc } from '../../../../../../../utils/param/signverify/publicSignverifyPromise.js';
let srcData63 = Data.Data63b;
let srcData63Kb = stringToUint8Array(srcData63);
describe('SecurityHuksSignVerifyRSAPromiseJsunit', function () {
it('testSignVerifyRSA102', 0, async function (done) {
const srcKeyAlies = 'testSignVerifyRSASize512SIGNPKCS1MD5KeyAlias102';
let HuksOptions = {
properties: new Array(
HuksSignVerifyRSA.HuksKeyAlgRSA,
HuksSignVerifyRSA.HuksKeyRSAPurposeSIGN,
HuksSignVerifyRSA.HuksTagPKCS1DigestMD5,
HuksSignVerifyRSA.HuksKeyRSAPADDINGPKCS1V15,
HuksSignVerifyRSA.HuksKeyRSASize512
),
inData: srcData63Kb,
};
await publicSignVerifyFunc(
srcKeyAlies,
srcKeyAlies + 'New',
HuksOptions,
'abort',
true
);
done();
});
});
|
3tuple(string,string,uint256,uint256,uint32,uint32,uint32,uint8,uint8,uint8,bool,bool,bool)[]: NPSCZUSr,no swag,0,9252183311529579,1629107003,1629092916,72003,4,50,3,false,false,true,Ashley's 👻,safelips swag,1,1598124591541123,1629107617,1629021347,288006,9,93,3,false,false,true,agCHJbuD,,2,1315411858406600,1629065440,1629030863,36000,2,67,4,false,false,true,Ashley's 🍌,,3,1315411858406600,1629107329,1629100287,72000,4,19,4,false,false,true,GqbWg0hQ,,4,6489365713190396,1629047515,1629029475,72001,2,54,1,false,false,true,Ashley's 🔥,safelips swag,5,1456768224973861,1629107267,1629029837,147600,9,80,1,false,false,true,Ashley's 🌊,,6,1527446408257492,1629107301,1629100319,90001,4,18,2,false,false,true,9yV2D9RZ,,7,5403907985216001,1629050891,1629032859,18000,1,47,4,false,false,true,Unknown,,8,3430338105094931,1629051207,1629032929,3600,1,65,2,false,false,true,Unknown,,9,6341260708312255,1629051243,1629033223,3600,1,36,1,false,false,true,Unknown,,10,3828336283359427,1629051305,1629033269,18000,1,1,1,false,false,true,OY8fAwxm,,11,9174027147231267,1629051435,1629033379,3600,1,6,3,false,false,true,Unknown,,12,5350736777744379,1629116557,1629098419,72002,3,98,1,false,false,true,MwARw8YY,,13,9766186097671807,1629109223,1629091267,25202,3,42,3,false,false,true,QV11BhLs,,14,3214299699040058,1629063466,1629045448,3600,1,14,3,false,false,true,Unknown,,15,3322318902067494,1629063598,1629045480,3600,1,48,1,false,false,true,Unknown,,16,4905842307628945,1629063654,1629045616,36000,2,87,4,false,false,true,78vDmreE,,17,9117391593261885,1629065582,1629047486,3600,1,48,1,false,false,true,Unknown,,18,9145709370246576,1629065710,1629047624,3600,1,72,1,false,false,true,Unknown,,19,6288023737670753,1629065790,1629047748,3600,1,32,1,false,false,true,Unknown,,20,3801717798038676,1629065900,1629047856,18000,1,31,4,false,false,true,Unknown,,21,2629243011506268,1629121369,1629084465,54001,3,78,4,false,false,true,DKGO06LN,icecream swag,22,1447016370160939,1629116243,1629098519,90002,3,78,3,false,false,true,Unknown,,23,8127775905431101,1629109611,1629091633,25201,2,54,3,false,false,true,Unknown,,24,5378509458468684,1629084487,1629066429,3600,1,18,3,false,false,true,Unknown,,25,3412762914314811,1629121217,1629103285,36000,2,75,4,false,false,true,Unknown,,26,3421550509704871,1629121621,1629103589,21601,2,54,3,false,false,true,Unknown,,27,6283629939975723,1629121403,1629085271,54000,3,95,4,false,false,true,Unknown,,28,6739256341587740,1629109501,1629091541,39600,3,73,2,false,false,true,Unknown,,29,3989989894625323,1629121329,1629084763,90000,5,84,1,false,false,true,Unknown,,30,3799520899191161,1629121451,1629098603,36000,2,11,4,false,false,true,Unknown,,31,3506910299115609,1629103455,1629085411,3600,1,70,3,false,false,true,0R2vLSaS,,32,938408255393727,1629122485,1629085473,54000,3,89,2,false,false,true,Unknown,,33,4895270119545666,1629121853,1629098479,36000,2,63,4,false,false,true,Unknown,,34,4154016516930238,1629121705,1629085578,90000,5,72,1,false,false,true,Unknown,,35,6593868303688339,1629109929,1629091757,25200,3,1,1,false,false,true,Unknown,,36,4027334099997170,1629109733,1629091713,36000,2,71,4,false,false,true,Unknown,,37,4533092080412414,1629109695,1629091663,36000,2,53,2,false,false,true,Unknown,,38,6541617008439367,1629110147,1629091973,39600,3,32,1,false,false,true,Unknown,,39,7855629948475112,1629110003,1629091959,36001,2,98,3,false,false,true,Unknown,,40,7896900159984473,1629110227,1629092187,36000,2,95,4,false,false,true,I8QxfIZ4,,41,3334436577470117,1629117571,1629099383,36000,2,45,2,false,false,true,pVCdTV9y,,42,73293379797096,1629117567,1629099433,36000,2,7,4,false,false,true,94Q7ZVXZ,,43,7701286313960044,1629117663,1629099603,72001,2,2,3,false,false,true,VVQHSnMA,,44,5013426811166663,1629118007,1629099849,54001,2,66,3,false,false,true,Trxivut6,,45,6587457647305051,1629118333,1629100251,54000,3,28,1,false,false,true,ys8jZMep,,46,2511294338456091,1629118583,1629100391,54000,3,96,1,false,false,true,yd9C3lnY,,47,1053936099338045,1629118609,1629100453,72001,2,18,3,false,false,true,LpTIJfPH,,48,2202820867549863,1629118627,1629100525,36000,2,69,2,false,false,true,LC6IFjSb,,49,1063659986412318,1629118841,1629100705,36000,2,27,4,false,false,true,tMUASG1M,,50,2838937724278794,1629118853,1629100759,36000,2,59,4,false,false,true,nB0VLB1j,,51,6687202451293699,1629122677,1629101037,198000,11,99,1,false,false,true
|
// import { defaultAction } from '../actions';
// import { DEFAULT_ACTION } from '../constants';
// describe('FinancialWizardContainer actions', () => {
// describe('Default Action', () => {
// it('has a type of DEFAULT_ACTION', () => {
// const expected = {
// type: DEFAULT_ACTION,
// };
// expect(defaultAction()).toEqual(expected);
// });
// });
// });
|
// Init Github
const github = new Github;
// Init UI
const ui = new UI;
// Search input
const searchUser = document.getElementById('searchUser');
searchUser.addEventListener('keyup', (e) => {
// Get input text
const userText = e.target.value;
if(userText !== '') {
// Make http call
github.getUser(userText)
.then(data => {
if(data.profile.message === 'Not Found') {
// Show alert
ui.showAlert('User not found', 'alert alert-danger');
} else {
// Show profile
ui.showProfile(data.profile);
ui.showRepos(data.repos);
}
});
} else {
// Clear the profile
ui.clearProfile();
}
});
|
import React, { Component, PropTypes } from "react";
import { connect } from 'react-redux';
import { passwordReset } from '../../actions/auth';
import MessageError from '../common/MessageError';
import MessageSuccess from '../common/MessageSuccess';
import Button from '../common/Button';
/**
* Password reset
*/
class ResetPassword extends Component {
constructor(props) {
super(props);
}
handleSubmit(event) {
event.preventDefault();
const { reset } = this.props;
let payload = {
'token': this.refs.token.value,
'email': this.refs.email.value,
'password': this.refs.password.value,
'password_confirmation': this.refs.password_confirmation.value
};
reset(payload);
}
render ()
{
// Get the query string from the URL such as ?foo=bar
// Query will now be set to { foo: 'bar' }
// const query = querystring.parse(this.props.location.search);
const { auth } = this.props;
let response;
if (auth.isFetching) {
response = (<img src="/images/loading.gif" alt="" />);
} else if ('undefined' !== typeof auth.serverError) {
response = (
<MessageError title="System error"
message={auth.serverError}
isVisible={true} />
);
} else if (auth.isSuccess) {
response = (
<MessageSuccess title="Password change"
message="We have been able to change your password, please try to login again."
isVisible={true} />
);
}
return (
<div className="container">
<form onSubmit={(event) => this.handleSubmit(event)} method="post" className="sign-up-on-home-page">
<input type="hidden" ref="token" id="token" value={this.props.params.hash} />
<h1 id="title" className="form-signin-heading text-center">Reset password</h1>
<div className="register-form">
{response}
<p><label htmlFor="inputEmail" className="sr-only">Email address</label></p>
<p><input type="text" id="inputEmail" ref="email" className="form-control" value={this.props.location.query.email} /></p>
<p><label htmlFor="inputPassword" className="sr-only">Password</label></p>
<p><input type="password" id="inputPassword" ref="password" className="form-control" placeholder="Password" /></p>
<p><label htmlFor="confirmationPassword" className="sr-only">Password</label></p>
<p><input type="password" id="confirmationPassword" ref="password_confirmation"
className="form-control" placeholder="Password confirmation" /></p>
<br/>
<p className="text-center">
<Button title="Reset password" />
</p>
<br/>
</div>
</form>
</div>
);
}
}
ResetPassword.propTypes = {
reset: PropTypes.func,
auth: PropTypes.object,
params: PropTypes.object,
location: PropTypes.object,
token: PropTypes.string,
email: PropTypes.string
};
function mapStateToProps(state) {
return {
auth: state.auth
};
}
function mapDispatchToProps(dispatch) {
return {
reset:(creds) => {
dispatch(passwordReset(creds));
}
};
}
export default connect(
mapStateToProps,
mapDispatchToProps
)(ResetPassword);
|
# Generated by Django 3.1.4 on 2020-12-16 20:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('administracion', '0005_auto_20201215_1928'),
]
operations = [
migrations.RemoveField(
model_name='detalleventamodel',
name='productoAlmacenId',
),
migrations.AddField(
model_name='detalleventamodel',
name='productoId',
field=models.ForeignKey(db_column='prod_id', default=None, on_delete=django.db.models.deletion.PROTECT, related_name='productoVentas', to='administracion.productomodel'),
preserve_default=False,
),
]
|
# Copyright 2019 Open End AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
here = os.path.dirname(__file__)
top = os.path.abspath(os.path.join(here, '..'))
class jstests_setup:
staticDirs = {
'/clients': here,
'/static': os.path.join(top, 'static'),
'/testsupport': os.path.join(here, 'test', 'support'),
'/node_modules': os.path.join(top, 'node_modules'),
}
jsRepos = []
jsScripts = [
'/node_modules/requirejs/require.js',
'/testsupport/config.js'
]
MochiKit__export__ = True
|
#YOLOv4-DeepSORT用
import os
import glob
import pandas as pd
import re
import statistics
current_dir=os.path.dirname(os.path.abspath(__file__))
path = 'test_info.txt'
df = pd.read_csv(os.path.join(current_dir, path), header=None)
#列名を追加
df.columns=["trackid","class", "xmin", "ymin","xmax","ymax"]
#class別集計
df = df[["trackid","class"]]
df = (df.groupby('trackid')['class']
.apply(list)
.apply(lambda x:sorted(x))
).reset_index()
df.colums = ["trackid","class"]
df.to_csv(os.path.join(current_dir,'test_info.csv'),encoding='utf_8',index=False)
df['class']=df['class'].apply(statistics.mode)
vc = df['class'].value_counts()
print(vc)
|
//@ sourceURL=/controller/main.js
/*
* Derive from the base class.
*/
function Main() {
MudderyMain.call(this);
}
Main.prototype = prototype(MudderyMain.prototype);
Main.prototype.constructor = Main;
/*
* Event when the connection opens.
*/
Main.prototype.onConnectionOpen = function() {
this.puppet = false;
controller.showUnlogin();
}
/*
* Event when the player logins.
*/
Main.prototype.onLogin = function(data) {
}
/*
* Show the layout when players has not connected.
*/
Main.prototype.showConnect = function() {
this.hideTabs();
$("#tab_connect").show();
controller.showContent("connect");
this.leftCombatQueue();
this.clearChannels();
}
/*
* Show the layout when players puppet.
*/
Main.prototype.showPuppet = function() {
// show login UI
this.clearMsgWindow();
this.clearPromptBar();
$("#prompt_content").show();
// show login tabs
this.hideTabs();
$("#tab_scene").show();
$("#tab_status").show();
$("#tab_inventory").show();
$("#tab_honours").show();
$("#tab_system").show();
if (!this._solo_mode) {
$("#tab_social").show();
}
this.showContent("scene");
}
/*
* Show the layout when players unlogin.
*/
MudderyMain.prototype.showUnlogin = function() {
// show unlogin UI
this.clearMsgWindow();
$("#prompt_content").hide();
this.leftCombatQueue();
// show unlogin tabs
this.hideTabs();
$("#tab_quick_login").show();
this.showContent("quick_login");
this.clearChannels();
}
/*
* Reset the view's language.
*/
MudderyMain.prototype.resetLanguage = function() {
$("#view_level").text($$("LEVEL: "));
$("#view_exp").text($$("EXP: "));
$("#view_hp").text($$("HP: "));
$("#view_connect").text($$("Connect"));
$("#view_quick_login").text($$("Login"));
$("#view_scene").text($$("Scene"));
$("#view_char").text($$("Char"));
$("#view_status").text($$("Status"));
$("#view_inventory").text($$("Inventory"));
$("#view_skills").text($$("Skills"));
$("#view_quests").text($$("Quests"));
$("#view_honours").text($$("Honours"));
$("#view_social").text($$("Social"));
$("#view_map").text($$("Map"));
$("#view_system").text($$("Sys"));
$("#view_system_char").text($$("System"));
$("#view_logout").text($$("Logout"));
$("#view_logout_puppet").text($$("Logout"));
$("#msg_send").text($$("Send"));
}
|
#import <UIKit/UIKit.h>
FOUNDATION_EXPORT double Pods_test101_TestsVersionNumber;
FOUNDATION_EXPORT const unsigned char Pods_test101_TestsVersionString[];
|
# -*- coding: utf-8 -*-
from json import loads
from django.test import TestCase
from eth_tester import EthereumTester
from web3.providers.eth_tester import EthereumTesterProvider
from ..event_listener import EventListener
from ..factories import DaemonFactory
from ..models import Block, Daemon
from ..utils import remove_0x_head
from ..web3_service import Web3Service
from .utils import (CentralizedOracle, centralized_oracle_abi,
centralized_oracle_bytecode)
class TestDaemonExec(TestCase):
def setUp(self):
self.web3 = Web3Service(provider=EthereumTesterProvider(EthereumTester())).web3
self.provider = self.web3.providers[0]
self.web3.eth.defaultAccount = self.web3.eth.coinbase
# Mock web3
self.daemon = DaemonFactory()
self.tx_data = {'from': self.web3.eth.accounts[0],
'gas': 1000000}
# create oracles
centralized_contract_factory = self.web3.eth.contract(abi=centralized_oracle_abi,
bytecode=centralized_oracle_bytecode)
tx_hash = centralized_contract_factory.deploy()
self.centralized_oracle_factory_address = self.web3.eth.getTransactionReceipt(tx_hash).get('contractAddress')
self.centralized_oracle_factory = self.web3.eth.contract(self.centralized_oracle_factory_address,
abi=centralized_oracle_abi)
self.contracts = [
{
'NAME': 'Centralized Oracle Factory',
'EVENT_ABI': centralized_oracle_abi,
'EVENT_DATA_RECEIVER': 'django_eth_events.tests.utils.CentralizedOraclesReceiver',
'ADDRESSES': [self.centralized_oracle_factory_address[2::]]
}
]
self.listener_under_test = EventListener(contract_map=self.contracts,
provider=self.provider)
CentralizedOracle().reset()
self.assertEqual(CentralizedOracle().length(), 0)
self.assertEqual(1, self.web3.eth.blockNumber)
def tearDown(self):
self.provider.ethereum_tester.reset_to_genesis()
self.assertEqual(0, self.web3.eth.blockNumber)
def test_create_centralized_oracle(self):
self.assertEqual(CentralizedOracle().length(), 0)
self.assertEqual(0, Daemon.get_solo().block_number)
self.assertEqual(0, Block.objects.all().count())
# Create centralized oracle
tx_hash = self.centralized_oracle_factory.transact(self.tx_data).createCentralizedOracle(
b'QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG')
self.assertIsNotNone(tx_hash)
self.listener_under_test.execute()
self.assertEqual(CentralizedOracle().length(), 1)
self.assertEqual(2, Daemon.get_solo().block_number)
# Check backup
self.assertEqual(2, Block.objects.all().count())
block = Block.objects.get(block_number=2)
self.assertEqual(1, len(loads(block.decoded_logs)))
def test_reorg_centralized_oracle(self):
# initial transaction, to set reorg init
accounts = self.web3.eth.accounts
self.web3.eth.sendTransaction({'from': accounts[0], 'to': accounts[1], 'value': 5000000})
self.assertEqual(0, Block.objects.all().count())
self.assertEqual(CentralizedOracle().length(), 0)
self.assertEqual(2, self.web3.eth.blockNumber)
# Create centralized oracle
tx_hash = self.centralized_oracle_factory.transact(self.tx_data).createCentralizedOracle(
b'QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG')
self.assertIsNotNone(tx_hash)
self.listener_under_test.execute()
self.assertEqual(CentralizedOracle().length(), 1)
self.assertEqual(3, Daemon.get_solo().block_number)
self.assertEqual(3, Block.objects.all().count())
self.assertEqual(3, self.web3.eth.blockNumber)
# Reset blockchain (simulates reorg)
self.tearDown()
self.web3.eth.sendTransaction({'from': accounts[0], 'to': accounts[1], 'value': 1000000})
self.web3.eth.sendTransaction({'from': accounts[0], 'to': accounts[1], 'value': 1000000})
self.web3.eth.sendTransaction({'from': accounts[0], 'to': accounts[1], 'value': 1000000})
self.assertEqual(3, self.web3.eth.blockNumber)
# Force block_hash change (cannot recreate a real reorg with python testrpc)
# TODO Check if it can be done with eth-tester
block_hash = remove_0x_head(self.web3.eth.getBlock(1)['hash'].hex())
Block.objects.filter(block_number=1).update(block_hash=block_hash)
self.listener_under_test.execute()
self.assertEqual(CentralizedOracle().length(), 0)
self.assertEqual(3, Daemon.get_solo().block_number)
self.assertEqual(3, Block.objects.all().count())
|
// @flow
import {LocalStore} from "./localStore";
/*
* A simple abstraction over 'localStorage' to provide transparent JSON
* serialization and deserialization.
*
* The implementation is borrowed heavily from Khan Academy's LocalStore
* module, and also KaVideoPlayer's SafeLocalStore module.
*/
export default class BrowserLocalStore implements LocalStore {
version: string;
keyPrefix: string;
constructor({version, keyPrefix}: {|+version: string, +keyPrefix: string|}) {
this.version = version;
this.keyPrefix = keyPrefix;
}
cacheKey(key: string): string {
if (!key) {
throw new Error("Falsy key provided to cacheKey: " + key);
}
return [this.keyPrefix, this.version, key].join(":");
}
get<T>(key: string, whenUnavailable: T): T {
if (!this.isEnabled()) {
return whenUnavailable;
}
try {
const data = window.localStorage[this.cacheKey(key)];
if (data) {
return JSON.parse(data);
} else {
return whenUnavailable;
}
} catch (e) {
// If we had trouble retrieving, like FF's NS_FILE_CORRUPTED:
// http://stackoverflow.com/q/18877643/
return whenUnavailable;
}
}
set(key: string, data: mixed): void {
if (!this.isEnabled()) {
return;
}
const stringified = JSON.stringify(data);
try {
window.localStorage[this.cacheKey(key)] = stringified;
} catch (e) {
// Probably went over the storage limit... that's not good.
throw e;
}
}
/*
* Delete whatever data was associated with the given key.
*/
del(key: string): void {
if (!this.isEnabled()) {
return;
}
const cacheKey = this.cacheKey(key);
if (cacheKey in window.localStorage) {
// (IE throws when deleting a non-existent entry.)
delete window.localStorage[cacheKey];
}
}
/*
* Local storage might be disabled in old browsers or in Safari's
* private browsing mode. Don't die.
*/
isEnabled(): boolean {
const uid = String(+new Date());
try {
window.sessionStorage[uid] = uid;
const enabled = window.sessionStorage[uid] === uid;
window.sessionStorage.removeItem(uid);
return enabled;
} catch (e) {
return false;
}
}
}
|
import argparse
import typing
from os import path
from pathlib import Path, PurePath
import shtab
import yaml
from opera.commands.info import info
from opera.error import DataError, ParseError
from opera.parser import tosca
from opera.storage import Storage
from opera.utils import prompt_yes_no_question
def add_parser(subparsers):
parser = subparsers.add_parser(
"notify",
help="Notify the orchestrator about changes after deployment and run triggers defined in TOSCA policies"
)
parser.add_argument(
"--instance-path", "-p",
help="Storage folder location (instead of default .opera)"
)
parser.add_argument(
"--trigger", "-t", "--event", "-e", metavar="TRIGGER_OR_EVENT", required=True,
help="TOSCA policy trigger name or event that will invoke all the actions (interface operations) on policy",
)
parser.add_argument(
"--notification", "-n", type=argparse.FileType("r"),
help="Notification file (usually JSON) with changes that will be exposed to TOSCA interfaces",
).complete = shtab.FILE
parser.add_argument(
"--force", "-f", action="store_true",
help="Skip any prompts and force execution",
)
parser.add_argument(
"--verbose", "-v", action="store_true",
help="Enable verbose mode",
)
parser.set_defaults(func=_parser_callback)
def _parser_callback(args):
if args.instance_path and not path.isdir(args.instance_path):
raise argparse.ArgumentTypeError(f"Directory {args.instance_path} is not a valid path!")
storage = Storage.create(args.instance_path)
status = info(None, storage)["status"]
if not args.force and storage.exists("instances"):
if status == "initialized":
print("Running notify without previously running deploy might have unexpected consequences.")
question = prompt_yes_no_question()
if not question:
return 0
elif status in ("deploying", "undeploying"):
print("The project is in the middle of some other operation. Please try again after some time.")
return 0
elif status == "undeployed":
print("Running notify in an undeployed project might have unexpected consequences.")
question = prompt_yes_no_question()
if not question:
return 0
elif status == "error":
print("Running notify after a deployment with an error might have unexpected consequences.")
question = prompt_yes_no_question()
if not question:
return 0
if not args.force and not args.trigger:
print("You have not specified which policy trigger to use (with --trigger/-t or --event/-e) "
"and in this case all the triggers will be invoked which might not be what you want.")
question = prompt_yes_no_question()
if not question:
return 0
# read the notification file and the pass its contents to the library function
notification_file_contents = Path(args.notification.name).read_text(encoding="utf-8") if args.notification else None
try:
notify(storage, args.verbose, args.trigger, notification_file_contents)
except ParseError as e:
print(f"{e.loc}: {e}")
return 1
except DataError as e:
print(str(e))
return 1
return 0
def notify(storage: Storage, verbose_mode: bool, trigger_name_or_event: str,
notification_file_contents: typing.Optional[str]):
if storage.exists("inputs"):
inputs = yaml.safe_load(storage.read("inputs"))
else:
inputs = {}
if storage.exists("root_file"):
service_template_path = PurePath(storage.read("root_file"))
workdir = Path(service_template_path.parent)
if storage.exists("csars"):
csar_dir = Path(storage.path) / "csars" / "csar"
workdir = csar_dir
ast = tosca.load(workdir, service_template_path.relative_to(csar_dir))
else:
ast = tosca.load(workdir, PurePath(service_template_path.name))
template = ast.get_template(inputs)
# check if specified trigger or event name exists in template
if trigger_name_or_event:
trigger_name_or_event_exists = False
for policy in template.policies:
for trigger in policy.triggers.values():
if trigger_name_or_event in (trigger.name, trigger.event.data):
trigger_name_or_event_exists = True
break
if not trigger_name_or_event_exists:
raise DataError(f"The provided trigger or event name does not exist: {trigger_name_or_event}.")
topology = template.instantiate(storage)
topology.notify(verbose_mode, workdir, trigger_name_or_event, notification_file_contents)
else:
print("There is no root_file in storage.")
|
import sys
import sdl2
import sdl2.ext
def run():
sdl2.ext.init()
window = sdl2.ext.Window("The Pong Game", size=(800, 600))
window.show()
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
spriterenderer = factory.create_sprite_render_system(window)
running = True
while running:
events = sdl2.ext.get_events()
for event in events:
if event.type == sdl2.SDL_QUIT:
running = False
break
window.refresh()
return 0
if __name__ == "__main__":
sys.exit(run())
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db.models.base import Model
from django.utils import timezone
from userAuthentication.models import User, User_profile
import random, json
# Create your models here.
class Store(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name = 'store')
def serialize(self):
data_to_return = {'id': self.id, 'owner': self.user.first_name, 'user_id': self.user.id, 'products': [product.serialize() for product in self.products.order_by('-dateCreated')]}
return data_to_return
def __str__(self):
return f"{self.user} {len(self.products.all())}"
class Product(models.Model):
name = models.CharField(max_length=100)
description = models.CharField(max_length=200)
price = models.IntegerField()
image = models.TextField()
watchers = models.ManyToManyField(User, related_name='watched_products', blank = True)
store = models.ForeignKey(Store, on_delete = models.CASCADE, related_name = 'products')
isAvailable = models.BooleanField(default = True)
availableStock = models.IntegerField(default = 1)
initialStock = models.IntegerField(default = 1)
isDelivered = models.BooleanField(default = False)
dateCreated = models.DateTimeField(auto_now_add=True)
def serialize(self):
data_to_return = {'id': self.id, 'name': self.name, 'description': self.description, 'price': self.price, 'initialStock': self.initialStock, 'currentStock': self.availableStock, 'image': self.image, 'isAvailable': self.isAvailable, 'dateCreated': self.dateCreated.timestamp(), 'owner': {'id': self.store.user.id, 'username': self.store.user.username}}
return data_to_return
def __str__(self):
return f"{self.name} {self.price}"
class Cart(models.Model):
user = models.OneToOneField(User, on_delete = models.CASCADE, related_name = 'cart')
# products = models.ManyToManyField(Product, related_name='cart')
def serialize(self):
data_to_return = {'id': self.id, 'user': {'id': self.user.id, 'username': self.user.username}, 'products': [order.serialize() for order in self.orders.order_by('-dateCreated')]}
return data_to_return
def __str__(self):
return f"{self.user} {len(self.orders.all())}"
class Post(models.Model):
content = models.TextField()
poster = models.ForeignKey(User, on_delete= models.DO_NOTHING, related_name='posts')
image = models.TextField()
dateCreated = models.DateTimeField(auto_now_add = True)
def serialize(self, user):
if self.image:
image = self.image
else:
image = None
data_to_return = {'id': self.id, "content": self.content,'posterId': self.poster.id, "poster": self.poster.username, 'image': image , 'dateCreated': self.dateCreated.timestamp(), 'number_of_likes': len(self.likes.all()), 'posterPicture': self.poster.profile_picture}
data_to_return['isLiked'] = user in [like.liker for like in self.likes.all()]
return data_to_return
def test(self, start, end):
index = list(Post.objects.all()).index(self)
if index <= start and index >= end:
return self
def __str__(self):
return f"{self.content[:25]}..."
class Account(models.Model):
owner = models.OneToOneField(User, on_delete = models.CASCADE, related_name = 'account')
number = models.IntegerField(default = random.randint(9999999, 99999999), unique = True)
amount = models.IntegerField(default = 0)
def serialize(self):
data_to_return = {'id': self.id, "owner": self.owner.username, 'number': self.number, 'accountName': f"{self.owner.first_name} {self.owner.last_name}", 'balance': f"${self.amount}"}
return data_to_return
def __str__(self):
return f"NAME: {self.owner.first_name} {self.owner.last_name} NUMBER: {str(self.number)}"
class Like(models.Model):
post = models.ForeignKey(Post, on_delete = models.CASCADE, related_name='likes')
liker = models.ForeignKey(User, on_delete = models.CASCADE, related_name = 'likes')
def __str__(self):
return f"{self.liker} liked {self.post}"
def serialize(self):
data_to_return = {'id': self.id, 'post_id': self.post.id, 'liker_id': self.liker.id, 'post_content_shortened': self.post.__str__()}
return data_to_return
class Comment(models.Model):
text = models.CharField(max_length = 300)
post = models.ForeignKey(Post, on_delete = models.CASCADE, related_name = 'comments')
commenter = models.ForeignKey(User, on_delete = models.DO_NOTHING, related_name = 'comments')
def __str__(self):
return f"{self.text} {self.post}"
def serialize(self):
data_to_return = {'id': self.id, 'post_id': self.post.id, 'commenter': self.commenter.__str__(), 'post_content_shortened': self.post.__str__()}
return data_to_return
class Order(models.Model):
product = models.ForeignKey(Product, on_delete = models.CASCADE, related_name = 'orders')
cart = models.ForeignKey(Cart, on_delete = models.CASCADE, related_name = 'orders')
number = models.IntegerField(default = 1)
dateCreated = models.DateTimeField(auto_now = True)
def __str__(self):
return f"{self.product.name} {self.number}"
def serialize(self):
useless_keys = ['initialStock', 'currentStock', 'isAvailable']
data_to_return = {}
for key in self.product.serialize().keys():
if key not in useless_keys:
data_to_return[key] = self.product.serialize()[key]
data_to_return['currentStock'] = self.number
return data_to_return
class Notification(models.Model):
notification_type = (
('new_post', 'NEW_POST'),
('update_profile', 'UPDATE_PROFILE'),
('user_update_profile', 'USER_UPDATE_PROFILE'),
('view_store', 'VIEW_STORE'),
('from_store_to_cart', 'PLUS_STC'),
('to_store_from_cart', "MINUS_STC"),
('like_post', 'LIKE_POST'),
('followed', 'FOLLOW'),
('new_product', 'NEW_PRODUCT')
)
text = models.CharField(max_length = 200)
owner = models.ForeignKey(User, on_delete = models.CASCADE, related_name = 'notifications')
related_user = models.ForeignKey(User, on_delete = models.CASCADE, related_name = 'notification_related')
notification_type = models.CharField(choices = notification_type, default = 'admin', max_length = 20)
dateCreated = models.DateTimeField(auto_now = True)
def __str__(self):
return self.text
def serialize(self):
data_to_return = {'text': self.text, 'owner': self.owner.username, 'related_picture': self.related_user.profile_picture, 'related_user': self.related_user.id, 'notification_type': self.notification_type, 'dateCreated': self.dateCreated.timestamp()}
return data_to_return
class Conversation(models.Model):
users = models.ManyToManyField(User, related_name = 'conversation')
last_modified = models.DateTimeField(auto_now= True)
class Message(models.Model):
conversation = models.ForeignKey(Conversation, related_name = 'messages', on_delete = models.CASCADE)
sender = models.ForeignKey(User, related_name = 'messages_sent', on_delete = models.CASCADE)
receiver = models.ForeignKey(User, related_name = 'messages_received', on_delete = models.CASCADE)
received = models.BooleanField(default = False)
content = models.TextField()
date_sent = models.DateTimeField(auto_now_add=True)
def serialize(self, user_id:int):
return {
'recipient':
{
'id': self.receiver.id,
'first_name': self.receiver.first_name,
'last_name': self.receiver.last_name,
'picture': self.receiver.profile_picture
},
'sender':
{
'first_name': self.sender.first_name,
'last_name': self.sender.last_name,
'id': self.sender.id,
'picture': self.sender.profile_picture
},
'content': self.content,
'date_sent': self.date_sent.timestamp(),
'message_id': self.id,
'conversation_id': self.conversation.id,
'read' : self.received,
'unread_message_count': self.conversation.messages.filter(received = False).exclude(sender = User.objects.get(id = user_id)).count()
}
def __str__(self):
return f"{self.content[0:70]}"
|
/**
* @author Kai Salmen / https://kaisalmen.de
* Development repository: https://github.com/kaisalmen/WWOBJLoader
*/
import {
BufferAttribute,
BufferGeometry,
LineSegments,
Mesh,
Points,
} from "three";
/**
*
* @param {MaterialHandler} materialHandler
* @constructor
*/
const MeshReceiver = function (materialHandler) {
this.logging = {
enabled: false,
debug: false,
};
this.callbacks = {
onProgress: null,
onMeshAlter: null,
};
this.materialHandler = materialHandler;
};
MeshReceiver.prototype = {
constructor: MeshReceiver,
/**
* Enable or disable logging in general (except warn and error), plus enable or disable debug logging.
*
* @param {boolean} enabled True or false.
* @param {boolean} debug True or false.
*/
setLogging: function (enabled, debug) {
this.logging.enabled = enabled === true;
this.logging.debug = debug === true;
},
/**
*
* @param {Function} onProgress
* @param {Function} onMeshAlter
* @private
*/
_setCallbacks: function (onProgress, onMeshAlter) {
if (
onProgress !== null &&
onProgress !== undefined &&
onProgress instanceof Function
) {
this.callbacks.onProgress = onProgress;
}
if (
onMeshAlter !== null &&
onMeshAlter !== undefined &&
onMeshAlter instanceof Function
) {
this.callbacks.onMeshAlter = onMeshAlter;
}
},
/**
* Builds one or multiple meshes from the data described in the payload (buffers, params, material info).
*
* @param {Object} meshPayload Raw mesh description (buffers, params, materials) used to build one to many meshes.
* @returns {Mesh[]} mesh Array of {@link Mesh}
*/
buildMeshes: function (meshPayload) {
let meshName = meshPayload.params.meshName;
let buffers = meshPayload.buffers;
let bufferGeometry = new BufferGeometry();
if (buffers.vertices !== undefined && buffers.vertices !== null) {
bufferGeometry.setAttribute(
"position",
new BufferAttribute(new Float32Array(buffers.vertices), 3)
);
}
if (buffers.indices !== undefined && buffers.indices !== null) {
bufferGeometry.setIndex(
new BufferAttribute(new Uint32Array(buffers.indices), 1)
);
}
if (buffers.colors !== undefined && buffers.colors !== null) {
bufferGeometry.setAttribute(
"color",
new BufferAttribute(new Float32Array(buffers.colors), 3)
);
}
if (buffers.normals !== undefined && buffers.normals !== null) {
bufferGeometry.setAttribute(
"normal",
new BufferAttribute(new Float32Array(buffers.normals), 3)
);
} else {
bufferGeometry.computeVertexNormals();
}
if (buffers.uvs !== undefined && buffers.uvs !== null) {
bufferGeometry.setAttribute(
"uv",
new BufferAttribute(new Float32Array(buffers.uvs), 2)
);
}
if (buffers.skinIndex !== undefined && buffers.skinIndex !== null) {
bufferGeometry.setAttribute(
"skinIndex",
new BufferAttribute(new Uint16Array(buffers.skinIndex), 4)
);
}
if (buffers.skinWeight !== undefined && buffers.skinWeight !== null) {
bufferGeometry.setAttribute(
"skinWeight",
new BufferAttribute(new Float32Array(buffers.skinWeight), 4)
);
}
let material, materialName, key;
let materialNames = meshPayload.materials.materialNames;
let createMultiMaterial = meshPayload.materials.multiMaterial;
let multiMaterials = [];
for (key in materialNames) {
materialName = materialNames[key];
material = this.materialHandler.getMaterial(materialName);
if (createMultiMaterial) multiMaterials.push(material);
}
if (createMultiMaterial) {
material = multiMaterials;
let materialGroups = meshPayload.materials.materialGroups;
let materialGroup;
for (key in materialGroups) {
materialGroup = materialGroups[key];
bufferGeometry.addGroup(
materialGroup.start,
materialGroup.count,
materialGroup.index
);
}
}
let meshes = [];
let mesh;
let callbackOnMeshAlterResult;
let useOrgMesh = true;
let geometryType =
meshPayload.geometryType === null ? 0 : meshPayload.geometryType;
if (this.callbacks.onMeshAlter) {
callbackOnMeshAlterResult = this.callbacks.onMeshAlter({
detail: {
meshName: meshName,
bufferGeometry: bufferGeometry,
material: material,
geometryType: geometryType,
},
});
}
// here LoadedMeshUserOverride is required to be provided by the callback used to alter the results
if (callbackOnMeshAlterResult) {
if (callbackOnMeshAlterResult.isDisregardMesh()) {
useOrgMesh = false;
} else if (callbackOnMeshAlterResult.providesAlteredMeshes()) {
for (let i in callbackOnMeshAlterResult.meshes) {
meshes.push(callbackOnMeshAlterResult.meshes[i]);
}
useOrgMesh = false;
}
}
if (useOrgMesh) {
if (meshPayload.computeBoundingSphere)
bufferGeometry.computeBoundingSphere();
if (geometryType === 0) {
mesh = new Mesh(bufferGeometry, material);
} else if (geometryType === 1) {
mesh = new LineSegments(bufferGeometry, material);
} else {
mesh = new Points(bufferGeometry, material);
}
mesh.name = meshName;
meshes.push(mesh);
}
let progressMessage = meshPayload.params.meshName;
if (meshes.length > 0) {
let meshNames = [];
for (let i in meshes) {
mesh = meshes[i];
meshNames[i] = mesh.name;
}
progressMessage +=
": Adding mesh(es) (" +
meshNames.length +
": " +
meshNames +
") from input mesh: " +
meshName;
progressMessage +=
" (" +
(meshPayload.progress.numericalValue * 100).toFixed(2) +
"%)";
} else {
progressMessage += ": Not adding mesh: " + meshName;
progressMessage +=
" (" +
(meshPayload.progress.numericalValue * 100).toFixed(2) +
"%)";
}
if (this.callbacks.onProgress) {
this.callbacks.onProgress(
"progress",
progressMessage,
meshPayload.progress.numericalValue
);
}
return meshes;
},
};
/**
* Object to return by callback onMeshAlter. Used to disregard a certain mesh or to return one to many meshes.
* @class
*
* @param {boolean} disregardMesh=false Tell implementation to completely disregard this mesh
* @param {boolean} disregardMesh=false Tell implementation that mesh(es) have been altered or added
*/
const LoadedMeshUserOverride = function (disregardMesh, alteredMesh) {
this.disregardMesh = disregardMesh === true;
this.alteredMesh = alteredMesh === true;
this.meshes = [];
};
LoadedMeshUserOverride.prototype = {
constructor: LoadedMeshUserOverride,
/**
* Add a mesh created within callback.
*
* @param {Mesh} mesh
*/
addMesh: function (mesh) {
this.meshes.push(mesh);
this.alteredMesh = true;
},
/**
* Answers if mesh shall be disregarded completely.
*
* @returns {boolean}
*/
isDisregardMesh: function () {
return this.disregardMesh;
},
/**
* Answers if new mesh(es) were created.
*
* @returns {boolean}
*/
providesAlteredMeshes: function () {
return this.alteredMesh;
},
};
export { MeshReceiver, LoadedMeshUserOverride };
|
// Fill out your copyright notice in the Description page of Project Settings.
#pragma once
#include "CoreMinimal.h"
#include "Engine/GameInstance.h"
#include "PongGameInstance.generated.h"
/**
*
*/
UCLASS()
class PONG_API UPongGameInstance : public UGameInstance
{
GENERATED_BODY()
public:
/** */
UFUNCTION(BlueprintCallable, BlueprintPure = false, Category = "C++")
void ServerTravel() const;
/** */
UFUNCTION(BlueprintCallable, BlueprintPure = false, Category = "C++", meta = (AutoCreateRefTerm = "URL"))
void ClientTravel(const FString& URL) const;
};
|
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
#import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.DelaunayMeshingApplication as KratosDelaunay
import KratosMultiphysics.PfemFluidDynamicsApplication as KratosPfemFluid
#import KratosMultiphysics.SolidMechanicsApplication as KratosSolid
def CreateMeshingDomain(main_model_part, custom_settings):
return FluidMeshingDomain(main_model_part, custom_settings)
class FluidMeshingDomain(object):
##constructor. the constructor shall only take care of storing the settings
##and the pointer to the main_model part.
##
##real construction shall be delayed to the function "Initialize" which
##will be called once the mesher is already filled
def __init__(self, main_model_part, custom_settings):
self.echo_level = 1
self.main_model_part = main_model_part
##settings string in json format
default_settings = KratosMultiphysics.Parameters("""
{
"python_module": "meshing_domain",
"model_part_name": "model_part_name",
"alpha_shape": 2.4,
"offset_factor": 0.0,
"meshing_strategy":{
"python_module": "meshing_strategy",
"meshing_frequency": 0.0,
"remesh": false,
"refine": false,
"reconnect": false,
"transfer": false,
"constrained": false,
"mesh_smoothing": false,
"variables_smoothing": false,
"elemental_variables_to_smooth":[ "DETERMINANT_F" ],
"reference_element_type": "Element2D3N",
"reference_condition_type": "CompositeCondition2D2N"
},
"spatial_bounding_box":{
"use_bounding_box" : true,
"initial_time" : 0.0,
"final_time" : 1000.0,
"upper_point" : [10,10,10],
"lower_point" : [-10,-10,-10]
},
"refining_parameters":{
"critical_size": 0.0,
"threshold_variable": "PLASTIC_STRAIN",
"reference_threshold" : 0.0,
"error_variable": "NORM_ISOCHORIC_STRESS",
"reference_error" : 0.0,
"add_nodes": true,
"insert_nodes": false,
"remove_nodes": {
"apply_removal": false,
"on_distance": false,
"on_threshold": false,
"on_error": false
},
"remove_boundary": {
"apply_removal": false,
"on_distance": false,
"on_threshold": false,
"on_error": false
},
"refine_elements": {
"apply_refinement": false,
"on_distance": false,
"on_threshold": false,
"on_error": false
},
"refine_boundary": {
"apply_refinement": false,
"on_distance": false,
"on_threshold": false,
"on_error": false
},
"refining_box":{
"refine_in_box_only": false,
"radius": 0.0,
"center": [0.0, 0.0, 0.0],
"velocity": [0.0, 0.0, 0.0]
}
},
"elemental_variables_to_transfer":[ "CAUCHY_STRESS_VECTOR", "DEFORMATION_GRADIENT" ]
}
""")
##overwrite the default settings with user-provided parameters
self.settings = custom_settings
self.settings.ValidateAndAssignDefaults(default_settings)
#construct the meshing strategy
meshing_module = __import__(self.settings["meshing_strategy"]["python_module"].GetString())
self.MeshingStrategy = meshing_module.CreateMeshingStrategy(self.main_model_part, self.settings["meshing_strategy"])
self.active_remeshing = False
if( self.settings["meshing_strategy"]["remesh"].GetBool() or self.settings["meshing_strategy"]["transfer"].GetBool() ):
self.active_remeshing = True
print("::[Meshing_Domain]:: (",self.settings["model_part_name"].GetString()," ) -BUILT-")
####
def Initialize(self):
print("::[Meshing Domain]:: -START-")
self.dimension = self.main_model_part.ProcessInfo[KratosMultiphysics.SPACE_DIMENSION]
# Set MeshingParameters
self.SetMeshingParameters()
# Meshing Stratety
self.MeshingStrategy.SetEchoLevel(self.echo_level)
self.MeshingStrategy.Initialize(self.MeshingParameters, self.dimension)
print("::[Meshing Domain]:: -END- ")
####
#
def SetInfoParameters(self):
# Create InfoParameters
self.InfoParameters = KratosDelaunay.MeshingInfoParameters()
self.InfoParameters.Initialize()
#
def SetTransferParameters(self):
# Create TransferParameters
self.TransferParameters = KratosDelaunay.TransferParameters()
transfer_variables = self.settings["elemental_variables_to_transfer"]
#for variable in transfer_variables:
# self.TransferParameters.SetVariable( KratosMultiphysics.KratosGlobals.GetVariable( variable.GetString() ) )
for i in range(0, transfer_variables.size() ):
self.TransferParameters.SetVariable(KratosMultiphysics.KratosGlobals.GetVariable(transfer_variables[i].GetString()))
#
def SetRefiningParameters(self):
# Create RefiningParameters
self.RefiningParameters = KratosDelaunay.RefiningParameters()
self.RefiningParameters.Initialize()
# parameters
self.RefiningParameters.SetAlphaParameter(self.settings["alpha_shape"].GetDouble())
# custom set of the mesh size from settings from initial mesh or other parts
self.SetMeshSizeValues()
# set mesh refinement in box
size = self.dimension
refining_box = self.settings["refining_parameters"]["refining_box"]
if(refining_box["refine_in_box_only"].GetBool()):
radius = refining_box["radius"].GetDouble()
center = Vector(size)
velocity = Vector(size)
for i in range(0, size):
center[i] = refining_box["center"][i].GetDouble()
velocity[i] = refining_box["velocity"][i].GetDouble()
refining_box = KratosDelaunay.SpatialBoundingBox(center, radius, velocity)
self.RefiningParameters.SetRefiningBox(refining_box)
self.RefiningParameters.SetThresholdVariable(KratosMultiphysics.KratosGlobals.GetVariable(self.settings["refining_parameters"]["threshold_variable"].GetString() ))
self.RefiningParameters.SetReferenceThreshold(self.settings["refining_parameters"]["reference_threshold"].GetDouble())
self.RefiningParameters.SetErrorVariable(KratosMultiphysics.KratosGlobals.GetVariable(self.settings["refining_parameters"]["error_variable"].GetString()))
self.RefiningParameters.SetReferenceError(self.settings["refining_parameters"]["reference_error"].GetDouble())
removing_options = KratosMultiphysics.Flags()
#remove nodes
remove_nodes = self.settings["refining_parameters"]["remove_nodes"]
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES, remove_nodes["apply_removal"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES_ON_DISTANCE, remove_nodes["on_distance"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES_ON_ERROR, remove_nodes["on_error"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES_ON_THRESHOLD, remove_nodes["on_threshold"].GetBool())
#remove boundary
remove_boundary = self.settings["refining_parameters"]["remove_boundary"]
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES, remove_boundary["apply_removal"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES_ON_DISTANCE, remove_boundary["on_distance"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES_ON_ERROR, remove_boundary["on_error"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES_ON_THRESHOLD, remove_boundary["on_threshold"].GetBool())
refining_options = KratosMultiphysics.Flags()
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE, self.settings["meshing_strategy"]["refine"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ADD_NODES, self.settings["refining_parameters"]["add_nodes"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_INSERT_NODES, self.settings["refining_parameters"]["insert_nodes"].GetBool())
#refine elements
refine_elements = self.settings["refining_parameters"]["refine_elements"]
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS, refine_elements["apply_refinement"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS_ON_DISTANCE, refine_elements["on_distance"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS_ON_ERROR, refine_elements["on_error"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS_ON_THRESHOLD, refine_elements["on_threshold"].GetBool())
#refine boundary
refine_boundary = self.settings["refining_parameters"]["refine_boundary"]
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY, refine_boundary["apply_refinement"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY_ON_DISTANCE, refine_boundary["on_distance"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY_ON_ERROR, refine_boundary["on_error"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY_ON_THRESHOLD, refine_boundary["on_threshold"].GetBool())
self.RefiningParameters.SetRefiningOptions(refining_options)
self.RefiningParameters.SetRemovingOptions(removing_options)
#
def SetMeshingParameters(self):
# Create MeshingParameters
self.MeshingParameters = KratosDelaunay.MeshingParameters()
self.MeshingParameters.Initialize()
self.MeshingParameters.SetSubModelPartName(self.settings["model_part_name"].GetString())
if(self.active_remeshing):
self.MeshingParameters.SetAlphaParameter(self.settings["alpha_shape"].GetDouble())
self.MeshingParameters.SetOffsetFactor(self.settings["offset_factor"].GetDouble())
self.SetInfoParameters()
self.SetTransferParameters()
self.SetRefiningParameters()
self.MeshingParameters.SetInfoParameters(self.InfoParameters)
self.MeshingParameters.SetTransferParameters(self.TransferParameters)
self.MeshingParameters.SetRefiningParameters(self.RefiningParameters)
bounding_box = self.settings["spatial_bounding_box"]
if(bounding_box["use_bounding_box"].GetBool()):
self.MeshingParameters.SetUseBoundingBox(True)
self.MeshingParameters.SetBoundingBoxLowerPoint(bounding_box["lower_point"][0].GetDouble(),bounding_box["lower_point"][1].GetDouble(),bounding_box["lower_point"][2].GetDouble())
self.MeshingParameters.SetBoundingBoxUpperPoint(bounding_box["upper_point"][0].GetDouble(),bounding_box["upper_point"][1].GetDouble(),bounding_box["upper_point"][2].GetDouble())
self.MeshingParameters.SetBoundingBoxTimeInterval(bounding_box["initial_time"].GetDouble(),bounding_box["final_time"].GetDouble())
#
def ExecuteMeshing(self):
if( self.active_remeshing ):
self.MeshingStrategy.GenerateMesh()
#
def SetMeshSizeValues(self):
critical_mesh_size = self.settings["refining_parameters"]["critical_size"].GetDouble()
# set mesh refinement based on wall tip discretization size
# if(parameters["TipRadiusRefine"]):
# tip arch opening (in degrees = 5-7.5-10)
#tool_arch_opening = 12
# tip surface length
#tool_arch_length = tool_arch_opening * (3.1416 / 180.0)
# critical mesh size based on wall tip
#critical_mesh_size = tool_arch_length * parameters["CriticalTipRadius"]
critical_mesh_size = critical_mesh_size
critical_mesh_side = critical_mesh_size * 3
self.RefiningParameters.SetCriticalRadius(critical_mesh_size)
self.RefiningParameters.SetCriticalSide(critical_mesh_side)
#
def Check(self):
# set mesher utilities
self.mesher_utils = KratosDelaunay.MesherUtilities()
# set the domain labels to mesh mesher
critical_mesh_size = self.settings["refining_parameters"]["critical_size"].GetDouble()
critical_radius = self.mesher_utils.CheckCriticalRadius(self.main_model_part,critical_mesh_size)
print(" CriticalRadius ", critical_radius)
#
def Active(self):
return self.active_remeshing
#
def SetEchoLevel(self, echo_level):
self.echo_level = echo_level
#
def GetVariables(self):
nodal_variables = []
transfer_variables = self.settings["elemental_variables_to_transfer"]
for i in range(0, transfer_variables.size() ):
nodal_variables.append(transfer_variables[i].GetString())
return nodal_variables
#
def ComputeAverageMeshParameters(self):
MesherUtils = KratosDelaunay.MesherUtilities();
self.domain_volume = MesherUtils.ComputeModelPartVolume(self.main_model_part)
self.element_mean_volume = 0
number_of_elements = self.main_model_part.NumberOfElements()
nodes_for_element = self.main_model_part.ProcessInfo[KratosMultiphysics.SPACE_DIMENSION] + 1
if(number_of_elements != 0):
self.element_mean_volume = self.domain_volume/float(number_of_elements*nodes_for_element)
self.RefiningParameters.SetMeanVolume(self.element_mean_volume)
#
def GetMeanVolume(self):
return self.element_mean_volume
#
def GetTotalVolume(self):
return self.domain_volume
#
def ComputeInitialAverageMeshParameters(self):
numFluid=0
mean_nodal_h=0
for node in self.main_model_part.Nodes:
if (node.Is(KratosMultiphysics.FLUID)):
numFluid+=1
nodal_h=node.GetSolutionStepValue(KratosMultiphysics.NODAL_H)
mean_nodal_h+=nodal_h
mean_nodal_h*=1.0/numFluid;
print("the mean_nodal_h is ",mean_nodal_h)
self.RefiningParameters.SetCriticalRadius(mean_nodal_h)
self.RefiningParameters.SetInitialRadius(mean_nodal_h)
delta_time = self.main_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]
self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.INITIAL_DELTA_TIME,delta_time)
self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.CURRENT_DELTA_TIME,delta_time)
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.PREVIOUS_DELTA_TIME,delta_time)
self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.TIME_INTERVAL_CHANGED,False)
#
|
#
# Copyright 2019 Peifeng Yu <peifeng@umich.edu>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
const notasEstudiantes = [
[8, 35],
[10, 45],
[7, 20],
];
function calcularMediaPonderada(elemento1, elemento2, elemento3) {
for (let i = 0; i < notasEstudiantes[0]; i++) {
notasEstudiantes[1] / 100;
var notaPonderada = notasEstudiantes[0] * notasEstudiantes[1];
}
}
|
/**
* @file
* JavaScript behaviors for HTML editor integration.
*/
(function ($, Drupal, drupalSettings) {
'use strict';
// @see http://docs.ckeditor.com/#!/api/CKEDITOR.config
Drupal.webform = Drupal.webform || {};
Drupal.webform.htmlEditor = Drupal.webform.htmlEditor || {};
Drupal.webform.htmlEditor.options = Drupal.webform.htmlEditor.options || {};
/**
* Initialize HTML Editor.
*
* @type {Drupal~behavior}
*/
Drupal.behaviors.webformHtmlEditor = {
attach: function (context) {
if (!window.CKEDITOR) {
return;
}
$(context).find('textarea.js-html-editor').once('webform-html-editor').each(function () {
var $textarea = $(this);
var allowedContent = drupalSettings['webform']['html_editor']['allowedContent'];
// Load additional CKEditor plugins used by the Webform HTML editor.
// @see \Drupal\webform\Element\WebformHtmlEditor::preRenderWebformHtmlEditor
// @see \Drupal\webform\WebformLibrariesManager::initLibraries
var plugins = drupalSettings['webform']['html_editor']['plugins'];
// If requirejs is present don't use the codemirror plugin.
// @see Issue #2936147: ckeditor.codemirror plugin breaks admin textarea.
// @todo Remove the below code once this issue is resolved.
if (plugins.codemirror
&& drupalSettings.yamlEditor
&& drupalSettings.yamlEditor.source
&& drupalSettings.yamlEditor.source.indexOf('noconflict') !== -1) {
delete plugins.codemirror;
if ('console' in window) {
window.console.log('YAML Editor module is not compatible with the ckeditor.codemirror plugin. @see Issue #2936147: ckeditor.codemirror plugin breaks admin textarea.');
}
}
for (var plugin_name in plugins) {
if (plugins.hasOwnProperty(plugin_name)) {
CKEDITOR.plugins.addExternal(plugin_name, plugins[plugin_name]);
}
}
var options = {
// Turn off external config and styles.
customConfig: '',
stylesSet: false,
contentsCss: [],
allowedContent: allowedContent,
// Use <br> tags instead of <p> tags.
enterMode: CKEDITOR.ENTER_BR,
shiftEnterMode: CKEDITOR.ENTER_BR,
// Set height.
height: '100px',
// Remove status bar.
resize_enabled: false,
removePlugins: 'elementspath,magicline',
// Toolbar settings.
format_tags: 'p;h2;h3;h4;h5;h6',
// extraPlugins
extraPlugins: ''
};
// Add toolbar.
if (!options.toolbar) {
options.toolbar = [];
options.toolbar.push({name: 'styles', items: ['Format', 'Font', 'FontSize']});
options.toolbar.push({name: 'basicstyles', items: ['Bold', 'Italic', 'Subscript', 'Superscript']});
// Add IMCE image button.
if (CKEDITOR.plugins.get('imce')) {
CKEDITOR.config.ImceImageIcon = drupalSettings['webform']['html_editor']['ImceImageIcon'];
options.extraPlugins += (options.extraPlugins ? ',' : '') + 'imce';
options.toolbar.push({name: 'insert', items: ['ImceImage', 'SpecialChar']});
}
else {
options.toolbar.push({name: 'insert', items: ['SpecialChar']});
}
// Add link plugin.
if (plugins['link']) {
options.extraPlugins += (options.extraPlugins ? ',' : '') + 'link';
options.toolbar.push({name: 'links', items: ['Link', 'Unlink']});
}
options.toolbar.push({name: 'colors', items: ['TextColor', 'BGColor']});
options.toolbar.push({name: 'paragraph', items: ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote']});
options.toolbar.push({name: 'tools', items: ['Source', '-', 'Maximize']});
}
// Add autogrow plugin.
if (plugins['autogrow']) {
options.extraPlugins += (options.extraPlugins ? ',' : '') + 'autogrow';
options.autoGrow_minHeight = 60;
options.autoGrow_maxHeight = 300;
}
// Add CodeMirror integration plugin.
if (plugins['codemirror']) {
options.extraPlugins += (options.extraPlugins ? ',' : '') + 'codemirror';
options.codemirror = {
mode: 'text/html'
};
}
options = $.extend(options, Drupal.webform.htmlEditor.options);
// Catch and suppress
// "Uncaught TypeError: Cannot read property 'getEditor' of undefined".
//
// Steps to reproduce this error.
// - Goto any form elements.
// - Edit an element.
// - Save the element.
try {
CKEDITOR.replace(this.id, options).on('change', function (evt) {
// Save data onchange since Ajax dialogs don't execute form.onsubmit.
$textarea.val(evt.editor.getData().trim());
});
}
catch (e) {
// Do nothing.
}
});
}
};
})(jQuery, Drupal, drupalSettings);
;
/**
* @file
* JavaScript behaviors for other elements.
*/
(function ($, Drupal) {
'use strict';
/**
* Toggle other input (text) field.
*
* @param {boolean} show
* TRUE will display the text field. FALSE with hide and clear the text field.
* @param {object} $element
* The input (text) field to be toggled.
* @param {string} effect
* Effect.
*/
function toggleOther(show, $element, effect) {
var $input = $element.find('input');
var hideEffect = (effect === false) ? 'hide' : 'slideUp';
var showEffect = (effect === false) ? 'show' : 'slideDown';
if (show) {
// Limit the other inputs width to the parent's container.
// If the parent container is not visible it's width will be 0
// and ignored.
var width = $element.parent().width();
if (width) {
$element.width(width);
}
// Display the element.
$element[showEffect]();
// If not initializing, then focus the other element.
if (effect !== false) {
$input.focus();
}
// Require the input.
$input.prop('required', true).attr('aria-required', 'true');
// Restore the input's value.
var value = $input.data('webform-value');
if (typeof value !== 'undefined') {
$input.val(value);
var input = $input.get(0);
// Move cursor to the beginning of the other text input.
// @see https://stackoverflow.com/questions/21177489/selectionstart-selectionend-on-input-type-number-no-longer-allowed-in-chrome
if ($.inArray(input.type, ['text', 'search', 'url', 'tel', 'password']) !== -1) {
input.setSelectionRange(0, 0);
}
}
// Refresh CodeMirror used as other element.
$element.parent().find('.CodeMirror').each(function (index, $element) {
$element.CodeMirror.refresh();
});
}
else {
// Hide the element.
$element[hideEffect]();
// Save the input's value.
$input.data('webform-value', $input.val());
// Empty and un-required the input.
$input.val('').prop('required', false).removeAttr('aria-required');
}
}
/**
* Attach handlers to select other elements.
*
* @type {Drupal~behavior}
*/
Drupal.behaviors.webformSelectOther = {
attach: function (context) {
$(context).find('.js-webform-select-other').once('webform-select-other').each(function () {
var $element = $(this);
var $select = $element.find('select');
var $input = $element.find('.js-webform-select-other-input');
$select.on('change', function () {
var isOtherSelected = $select
.find('option[value="_other_"]')
.is(':selected');
toggleOther(isOtherSelected, $input);
});
var isOtherSelected = $select
.find('option[value="_other_"]')
.is(':selected');
toggleOther(isOtherSelected, $input, false);
});
}
};
/**
* Attach handlers to checkboxes other elements.
*
* @type {Drupal~behavior}
*/
Drupal.behaviors.webformCheckboxesOther = {
attach: function (context) {
$(context).find('.js-webform-checkboxes-other').once('webform-checkboxes-other').each(function () {
var $element = $(this);
var $checkbox = $element.find('input[value="_other_"]');
var $input = $element.find('.js-webform-checkboxes-other-input');
$checkbox.on('change', function () {
toggleOther(this.checked, $input);
});
toggleOther($checkbox.is(':checked'), $input, false);
});
}
};
/**
* Attach handlers to radios other elements.
*
* @type {Drupal~behavior}
*/
Drupal.behaviors.webformRadiosOther = {
attach: function (context) {
$(context).find('.js-webform-radios-other').once('webform-radios-other').each(function () {
var $element = $(this);
var $radios = $element.find('input[type="radio"]');
var $input = $element.find('.js-webform-radios-other-input');
$radios.on('change', function () {
toggleOther(($radios.filter(':checked').val() === '_other_'), $input);
});
toggleOther(($radios.filter(':checked').val() === '_other_'), $input, false);
});
}
};
/**
* Attach handlers to buttons other elements.
*
* @type {Drupal~behavior}
*/
Drupal.behaviors.webformButtonsOther = {
attach: function (context) {
$(context).find('.js-webform-buttons-other').once('webform-buttons-other').each(function () {
var $element = $(this);
var $buttons = $element.find('input[type="radio"]');
var $input = $element.find('.js-webform-buttons-other-input');
var $container = $(this).find('.js-webform-webform-buttons');
// Create set onchange handler.
$container.change(function () {
toggleOther(($(this).find(':radio:checked').val() === '_other_'), $input);
});
toggleOther(($buttons.filter(':checked').val() === '_other_'), $input, false);
});
}
};
})(jQuery, Drupal);
;
|
var AdminLTEManager = function ()
{
var SECTION_NAME = window.location.href.replace( Constants.SITE_URL+"/", "" ).replace( window.location.search, "" ).replace(/\.\w+#?$/,"").replace(/live_/,""),
NO_CONTROLLO = ["index","recupera_password","registrazione"];
return {
init: function ()
{
this.user_info = this.user_info || JSON.parse( window.localStorage.getItem('user') );
this.pg_info = JSON.parse( window.localStorage.getItem('logged_pg') );
this.controllaModalitaEvento();
this.setListeners();
this.controllaPermessi(".sidebar-menu", true);
},
controllaAccesso: function ()
{
if( SECTION_NAME !== ""
&& NO_CONTROLLO.indexOf( SECTION_NAME ) === -1
&& SECTION_NAME.indexOf( "test" ) === -1 )
Utils.controllaAccessoPagina( SECTION_NAME );
},
controllaModalitaEvento: function ()
{
if( this.pg_info && typeof this.user_info.pg_da_loggare !== "undefined" )
{
$("body").addClass("event_ongoing");
$(".visualizza_pagina_main").remove();
$("#btn_visualizza_pagina_gestione_eventi").remove();
$("#logo_link").attr( "href", Constants.PG_PAGE );
if($("#background_video")[0])
{
$("#background_video").attr("autoplay", null);
$("#background_video")[0].pause();
}
}
else
{
$(".visualizza_pagina_main").removeClass("inizialmente-nascosto").show();
$("#btn_visualizza_pagina_gestione_eventi").removeClass("inizialmente-nascosto").show();
}
},
mostraNomePersonaggio: function ( nome )
{
var id_personaggio = "";
if( typeof nome === "undefined" && typeof this.pg_info !== "undefined" && this.pg_info )
{
nome = this.pg_info.nome_personaggio;
id_personaggio = this.pg_info.id_personaggio;
}
if ( nome )
{
$("#nome_personaggio").find("p").text(nome);
$("#nome_personaggio").find(".fa").removeClass("text-danger").addClass("text-success");
$("#pg_status").text(" Online");
$(".nome_personaggio").text( nome );
if( typeof this.pg_info !== "undefined" && this.pg_info )
$("#live_matricola").text("# SGC0215AT54RD" + this.pg_info.id_personaggio);
}
},
mostraElementiNascosti: function ( in_selector, animate, permesso )
{
var permesso_generico = permesso.replace(Constants.TIPO_GRANT_PG_ALTRI, "").replace(Constants.TIPO_GRANT_PG_PROPRIO,""),
animation = animate ? "fadeIn" : null;
if( typeof permesso === "string" && $("#btn_" + permesso).length > 0 )
{
$(in_selector).find("#btn_" + permesso).show(animation);
$("#btn_" + permesso).removeClass("inizialmente-nascosto");
}
if ( typeof permesso === "string" && $("."+permesso).length > 0 )
{
$(in_selector).find("." + permesso).show(animation);
$("." + permesso).removeClass("inizialmente-nascosto");
}
if ( typeof permesso === "string" && $("#btn_"+permesso_generico).length > 0 )
{
$(in_selector).find("#btn_" + permesso_generico).show(animation);
$("#btn_" + permesso_generico).removeClass("inizialmente-nascosto");
}
if ( typeof permesso === "string" && $("."+permesso_generico).length > 0 )
{
$(in_selector).find("." + permesso_generico).show(animation);
$("." + permesso_generico).removeClass("inizialmente-nascosto");
}
},
controllaPermessi: function ( in_selector, animate )
{
in_selector = typeof in_selector === "undefined" ? ".content-wrapper > .content" : in_selector;
animate = typeof animate === "undefined" ? false : animate;
this.user_info = this.user_info || JSON.parse( window.localStorage.getItem('user') );
this.pg_info = JSON.parse( window.localStorage.getItem('logged_pg') );
$(in_selector).find(".inizialmente-nascosto:not(.no-hide)").hide();
if( this.user_info )
{
for( var p in this.user_info.permessi )
{
var permesso = this.user_info.permessi[p];
this.mostraElementiNascosti( in_selector, animate, permesso );
}
$(".nome_giocatore").each(function( i, el )
{
$(el).text( this.user_info.nome_giocatore );
}.bind( this ) );
}
if (this.pg_info)
{
for (var p in this.pg_info.permessi)
{
var permesso = this.pg_info.permessi[p];
this.mostraElementiNascosti(in_selector, animate, permesso);
}
}
this.mostraNomePersonaggio();
this.controllaModalitaEvento();
this.setupMenuSearch();
},
logout: function ()
{
Utils.requestData(
Constants.API_GET_LOGOUT,
"GET",
"",
function( data )
{
Utils.clearLocalStorage();
window.location.href = Constants.SITE_URL;
}.bind(this)
);
},
setupMenuSearch: function ()
{
$( '#search-input' ).unbind( 'keyup' );
$( '#search-input' ).on( 'keyup', function ()
{
var term = $( '#search-input' ).val().trim();
if ( term.length === 0 )
{
$( '.sidebar-menu li' ).each( function ()
{
var elem = $(this);
if( !elem.hasClass("inizialmente-nascosto") )
elem.show( 0 );
elem.removeClass( 'active' );
if ( elem.data( 'lte.pushmenu.active' ) )
elem.addClass( 'active' );
} );
return;
}
$( '.sidebar-menu li' ).each( function ()
{
var elem = $(this);
if( !elem.hasClass("inizialmente-nascosto") )
{
if (elem.text().toLowerCase().indexOf(term.toLowerCase()) === -1)
{
elem.hide(0);
elem.removeClass('pushmenu-search-found', false);
if (elem.is('.treeview'))
{
elem.removeClass('active');
}
}
else
{
elem.show(0);
elem.addClass('pushmenu-search-found');
if (elem.is('.treeview'))
{
elem.addClass('active');
}
var parent = elem.parents('li').first();
if (parent.is('.treeview'))
{
parent.show(0);
}
}
if (elem.is('.header'))
{
elem.show();
}
}
} );
$( '.sidebar-menu li.pushmenu-search-found.treeview' ).each( function ()
{
$( this ).find( '.pushmenu-search-found' ).show( 0 );
} );
} );
},
aggiornaBadgeMessaggi: function ( data )
{
if( this.pg_info && typeof this.user_info.pg_da_loggare !== "undefined" )
$("#num_mex_fg").remove();
else
$("#num_mex_fg").text( data.result.fg );
if( typeof data.result.ig !== "undefined" )
{
$("#num_mex_ig").text( data.result.ig );
$("#num_mex_ig").removeClass("inizialmente-nascosto");
$("#num_mex_ig").show();
}
},
controllaMessaggi: function ()
{
Utils.requestData(
Constants.API_GET_MESSAGGI_NUOVI,
"GET",
{},
this.aggiornaBadgeMessaggi.bind(this)
);
},
aggiornaDatiPG : function ( datiAggiornati )
{
var dati = { pgid : JSON.parse( window.localStorage.getItem("logged_pg") ).id_personaggio };
Utils.requestData(
Constants.API_GET_PG_LOGIN,
"GET",
dati,
function (data)
{
this.pg_info = data.result;
var pg_no_bg = JSON.parse( JSON.stringify(this.pg_info) );
delete pg_no_bg.background_personaggio;
delete pg_no_bg.note_master_personaggio;
window.localStorage.removeItem('logged_pg');
window.localStorage.setItem('logged_pg', JSON.stringify(pg_no_bg));
if( typeof datiAggiornati === "function" ) datiAggiornati( pg_no_bg );
}.bind(this)
);
},
setListeners: function ()
{
this.setupMenuSearch();
if( jQuery().tree ) $( 'ul.tree' ).tree();
if( jQuery().tooltip ) $( '[data-toggle="tooltip"]' ).tooltip();
$( '#sidebar-form' ).on( 'submit', function ( e )
{
e.preventDefault();
} );
$( '.sidebar-menu li.active' ).data( 'lte.pushmenu.active', true );
$( '#logoutBtn' ).click( this.logout.bind(this) );
$( '#btn_visualizza_pagina_profilo' ).click( Utils.redirectTo.bind(this,Constants.PROFILO_PAGE) );
$( '#logo_link' ).attr("href", Constants.MAIN_PAGE );
Utils.setSubmitBtn();
if( this.pg_info )
$("#nome_personaggio").parents(".user-panel").click(Utils.redirectTo.bind(this,Constants.PG_PAGE));
if ( SECTION_NAME !== ""
&& NO_CONTROLLO.indexOf( SECTION_NAME ) === -1
&& SECTION_NAME.indexOf( "test" ) === -1 )
{
this.controllaMessaggi();
setInterval(this.controllaMessaggi.bind(this), Constants.INTERVALLO_CONTROLLO_MEX);
}
}
}
}();
AdminLTEManager.controllaAccesso();
$( document ).ready( function ( e )
{
AdminLTEManager.init();
} );
|
var group__ADC__Register__Offsets =
[
[ "MXC_R_ADC_CTRL", "group__ADC__Register__Offsets.html#ga9e1797652143ddae4930a834d0f93e6f", null ],
[ "MXC_R_ADC_STATUS", "group__ADC__Register__Offsets.html#gafa877017385418fa56e9ecaae03ca97c", null ],
[ "MXC_R_ADC_DATA", "group__ADC__Register__Offsets.html#gab691c46aee10076bc9b3eeece40d0a4c", null ],
[ "MXC_R_ADC_INTR", "group__ADC__Register__Offsets.html#ga30fb1f06eb28a8bb41ad0c8d304a410b", null ],
[ "MXC_R_ADC_LIMIT", "group__ADC__Register__Offsets.html#gae70d391af4bab878dc53969c66c2999d", null ]
];
|
'use strict';
const fs = require('fs');
const childProcess = require('child_process');
const Daemon = function() {
this.settings = null;
};
Daemon.prototype.lockFailedExitCode = 1;
Daemon.prototype.startFailedExitCode = 2;
Daemon.prototype.stopFailedExitCode = 1;
Daemon.prototype.releaseLockFailedExitCode = 2;
Daemon.prototype.__initRequired = true;
Daemon.prototype.__init = function(units) {
this.settings = units.require('core.settings').core.daemon;
};
Daemon.prototype.start = function() {
let fd;
try {
fd = this.createLock();
} catch (err) {
this.reportLockFailed(err);
process.exit(this.lockFailedExitCode);
}
let child = this.createProcess();
this.reportStarted(child.pid);
try {
this.savePid(fd, child.pid);
} catch (err) {
this.reportSavePidFailed(err);
}
process.exit();
};
Daemon.prototype.stop = function(cb) {
cb = cb || function(code) {
process.exit(code);
};
this.stopInternal(cb);
};
Daemon.prototype.stopInternal = function(cb) {
const pid = this.loadPid();
if (!pid) {
this.reportPidFileNotFound();
} else if (isNaN(pid)) {
this
.reportBadPidFile()
.releaseLockWithReport();
} else {
let notFound = false;
try {
process.kill(pid);
} catch (err) {
if (err.code === 'ESRCH') {
notFound = true;
} else {
this.reportSignalFailed(err, pid);
cb(this.stopFailedExitCode);
return;
}
}
if (notFound) {
this
.reportProcessNotFound(pid)
.releaseLockWithReport();
} else {
this.reportSignalSent(pid);
this.waitForExit(pid, () => {
this
.reportStopped()
.releaseLockWithReport(true);
cb();
});
return;
}
}
cb();
};
Daemon.prototype.restart = function() {
this.stop(() => this.start());
};
// Internal stuff
Daemon.prototype.createLock = function() {
let realPath;
try {
realPath = fs.realpathSync(this.settings.pidFile);
} catch (err) {
if (err.code === 'ENOENT') {
realPath = this.settings.pidFile;
} else {
throw err;
}
}
return fs.openSync(realPath, 'wx');
};
Daemon.prototype.releaseLock = function() {
try {
let realPath = fs.realpathSync(this.settings.pidFile);
fs.unlinkSync(realPath);
} catch (err) {
if (err.code !== 'ENOENT') {
throw err;
}
}
};
Daemon.prototype.releaseLockWithReport = function(skipSuccessReport) {
try {
this.releaseLock();
} catch (err) {
this.reportReleaseLockFailed(err);
process.exit(this.releaseLockFailedExitCode);
}
if (!skipSuccessReport) {
this.reportLockReleased();
}
};
Daemon.prototype.createProcess = function() {
let child;
let exec = this.getStartExec();
let args = this.getStartArgs();
let options = this.getStartOptions();
try {
child = childProcess.spawn(exec, args, options);
} catch (err) {
this.startFailed(err);
}
return child;
};
Daemon.prototype.startFailed = function(err) {
this.reportStartFailed(err);
try {
this.releaseLock();
} catch (e) {
this.reportReleaseLockFailed(e);
}
process.exit(this.startFailedExitCode);
};
Daemon.prototype.getStartExec = function() {
return this.settings.start.exec;
};
Daemon.prototype.getStartArgs = function() {
const startSettings = this.settings.start;
let result;
if (startSettings.args) {
if (typeof startSettings.args === 'function') {
result = startSettings.args(this);
} else {
result = startSettings.args;
}
}
return result;
};
Daemon.prototype.getStartOptions = function() {
let startSettings = this.settings.start;
let stdio = 'ignore';
if (startSettings.stdout || startSettings.stderr) {
stdio = [ 'ignore' ];
let stdout;
if (startSettings.stdout) {
try {
stdout = fs.openSync(startSettings.stdout, 'a');
} catch (err) {
this.startFailed(err);
}
}
stdio.push(!stdout ? 'ignore' : stdout);
let stderr;
if (startSettings.stderr) {
if (stdout === stderr) {
stderr = stdout;
} else {
try {
stderr = fs.openSync(startSettings.stderr, 'a');
} catch (err) {
this.startFailed(err);
}
}
}
stdio.push(!stderr ? 'ignore' : stderr);
}
return {
detached: true,
stdio: stdio
};
};
Daemon.prototype.savePid = function(fd, pid) {
fs.truncateSync(fd, 0);
const buffer = Buffer.from(`${pid}`);
let written = 0;
do {
written += fs.writeSync(fd, buffer, written, buffer.length, written);
} while (written < buffer.length);
};
Daemon.prototype.loadPid = function() {
let pidStr = null;
try {
pidStr = fs.readFileSync(this.settings.pidFile, 'utf8');
} catch (err) {
if (err.code !== 'ENOENT') {
throw err;
}
}
let result = null;
if (pidStr) {
result = parseInt(pidStr, 10);
}
return result;
};
Daemon.prototype.waitForExit = function(pid, cb) {
setTimeout(() => this.checkIfExited(pid, cb), this.settings.exitCheckInterval);
};
Daemon.prototype.checkIfExited = function(pid, cb) {
try {
process.kill(pid);
} catch (err) {
if (err.code === 'ESRCH') {
return cb(); // Process exited
}
this.reportSignalFailed(err, pid);
process.exit(this.stopFailedExitCode);
}
this.waitForExit(pid, cb);
};
Daemon.prototype.reportStarted = function(pid) {
console.log(`Started with pid ${pid}`);
return this;
};
Daemon.prototype.reportStartFailed = function(err) {
console.log(`Failed to start: ${err.message}`);
return this;
};
Daemon.prototype.reportLockFailed = function(err) {
console.error(`Could not obtain lock. Already running?\nError: ${err.message}`);
return this;
};
Daemon.prototype.reportSavePidFailed = function(err) {
console.log('Failed to save pid to file:', err.message);
return this;
};
Daemon.prototype.reportLockReleased = function() {
console.log('Lock released');
return this;
};
Daemon.prototype.reportReleaseLockFailed = function(err) {
console.error(`Could not release lock: ${err.message}`);
return this;
};
Daemon.prototype.reportStopped = function() {
console.log('Stopped');
return this;
};
Daemon.prototype.reportPidFileNotFound = function() {
console.log('Pid file not found. Already stopped?');
return this;
};
Daemon.prototype.reportBadPidFile = function() {
console.log('Pid file is invalid, releasing lock...');
return this;
};
Daemon.prototype.reportProcessNotFound = function(pid) {
console.log(`Could not find process ${pid}. Already stopped?`);
return this;
};
Daemon.prototype.reportSignalSent = function(pid) {
console.log(`Sent termination signal to ${pid}`);
return this;
};
Daemon.prototype.reportSignalFailed = function(err, pid) {
console.error(`Failed to send signal to ${pid}: ${err.message}`);
return this;
};
module.exports = Daemon;
|
'''
* Copyright (C) 2019-2020 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
import sys
import os
import shutil
import subprocess
import tempfile
import shlex
import requests
import yaml
from jsonschema import Draft7Validator, FormatChecker
from schema import model_list_schema
MODEL_OPTIMIZER_ROOT = (
"/opt/intel/dldt"
if os.path.isdir("/opt/intel/dldt")
else "/opt/intel/openvino/deployment_tools"
)
OPEN_MODEL_ZOO_ROOT = os.path.join(MODEL_OPTIMIZER_ROOT, "open_model_zoo")
MODEL_DOWNLOADER_PATH = os.path.join(
OPEN_MODEL_ZOO_ROOT, "tools/downloader/downloader.py"
)
MODEL_CONVERTER_PATH = os.path.join(
OPEN_MODEL_ZOO_ROOT, "tools/downloader/converter.py"
)
MODEL_OPTIMIZER_PATH = (
os.path.join(MODEL_OPTIMIZER_ROOT, "model-optimizer/mo.py")
if MODEL_OPTIMIZER_ROOT == "/opt/intel/dldt"
else os.path.join(MODEL_OPTIMIZER_ROOT, "model_optimizer/mo.py")
)
MODEL_PROC_ROOT = "/opt/intel/dl_streamer/samples/model_proc"
DL_STREAMER_REPO_ROOT = (
"https://raw.githubusercontent.com/openvinotoolkit/dlstreamer_gst"
)
def _validate_schema(model_list):
try:
validator = Draft7Validator(model_list_schema, format_checker=FormatChecker())
validator.validate(model_list)
except Exception as err:
print("Yaml input schema validation error.")
print(err)
sys.exit(1)
def _load_model_list(model_list_path):
model_list = None
try:
with open(model_list_path) as model_list_file:
model_list = yaml.safe_load(model_list_file)
except Exception:
print("Exception while loading yaml file. File could be malformed.")
print("Please make sure model list file is in correct yml file format.")
print("Expected Schema: ")
print("- model(Required): mobilenet-ssd")
print(" alias(Optional): object_detection")
print(" version(Optional): 1")
print(" precision(Optional): [FP16,FP32]")
if model_list is None:
sys.exit(1)
else:
_validate_schema(model_list)
return model_list
def _find_downloaded_model(model_name, download_dir):
for root, directories, _ in os.walk(download_dir):
if model_name in directories:
return os.path.abspath(os.path.join(root, model_name))
return None
def _download_model_proc(target_dir, model_name, dl_streamer_version):
model_proc = None
if os.path.isdir(MODEL_PROC_ROOT):
for root, _, files in os.walk(MODEL_PROC_ROOT):
for filepath in files:
if os.path.splitext(filepath)[0] == model_name:
model_proc = os.path.join(root, filepath)
else:
url = "{0}/{1}/samples/model_proc/{2}.json".format(
DL_STREAMER_REPO_ROOT, dl_streamer_version, model_name
)
response = requests.get(url)
temp_dir = tempfile.TemporaryDirectory()
if response.status_code == 200:
with open(
"{0}/{1}.json".format(temp_dir.name, model_name), "wb"
) as out_file:
out_file.write(response.content)
print(
"Downloaded {0} model-proc file from gst-video-analytics repo".format(
model_name
)
)
model_proc = os.path.abspath(
"{0}/{1}.json".format(temp_dir.name, model_name)
)
else:
print("WARNING: model-proc not found in gst-video-analytics repo!")
if model_proc:
shutil.move(model_proc, os.path.join(target_dir, "{}.json".format(model_name)))
def _create_convert_command(model_name, output_dir, precisions):
if precisions:
cmd = "python3 {0} -d {3} --name {1} --precisions {2} -o {3} --mo {4}"
return shlex.split(
cmd.format(
MODEL_CONVERTER_PATH,
model_name,
",".join(map(str, precisions)),
output_dir,
MODEL_OPTIMIZER_PATH,
)
)
cmd = "python3 {0} -d {2} --name {1} -o {2} --mo {3}"
return shlex.split(
cmd.format(MODEL_CONVERTER_PATH, model_name, output_dir, MODEL_OPTIMIZER_PATH)
)
def _create_download_command(model_name, output_dir, precisions):
if precisions:
cmd = "python3 {0} --name {1} --precisions {2} -o {3}"
return shlex.split(
cmd.format(
MODEL_DOWNLOADER_PATH,
model_name,
",".join(map(str, precisions)),
output_dir,
)
)
cmd = "python3 {0} --name {1} -o {2}"
return shlex.split(cmd.format(MODEL_DOWNLOADER_PATH, model_name, output_dir))
def _run_command(command, model_name, step):
print(" ".join(command))
result = subprocess.run(command, check=False)
if result.returncode != 0:
print("Error occured while {0} {1} model.".format(step, model_name))
print("Please remove from input yml file and try again.")
sys.exit(1)
def _download_model(model_name, output_dir, precisions):
command = _create_download_command(model_name, output_dir, precisions)
_run_command(command, model_name, "downloading")
def _convert_model(model_name, output_dir, precisions):
command = _create_convert_command(model_name, output_dir, precisions)
_run_command(command, model_name, "converting")
def _get_model_properties(model, model_list_path, target_root):
result = model
if not isinstance(model, dict):
result = {}
result["model"] = model
result.setdefault("alias", result["model"])
result.setdefault("version", 1)
result.setdefault("precision", None)
result.setdefault("model-proc", None)
if result["model-proc"]:
result["model-proc"] = os.path.abspath(
os.path.join(os.path.dirname(model_list_path), result["model-proc"])
)
result["target-dir"] = os.path.join(target_root,
result["alias"],
str(result["version"]))
return result
def _download_and_convert_model(
target_root, model, force, model_list_path, dl_streamer_version
):
model_properties = _get_model_properties(model, model_list_path, target_root)
model_name = model_properties["model"]
precision = model_properties["precision"]
model_proc = model_properties["model-proc"]
target_dir = model_properties["target-dir"]
if (not force) and (os.path.isdir(target_dir)):
print("Model Directory {0} Exists - Skipping".format(target_dir))
return
if os.path.isdir(target_dir):
shutil.rmtree(target_dir)
with tempfile.TemporaryDirectory() as output_dir:
_download_model(model_name, output_dir, precision)
_convert_model(model_name, output_dir, precision)
downloaded_model_path = _find_downloaded_model(model_name, output_dir)
for path in os.listdir(downloaded_model_path):
source = os.path.join(downloaded_model_path, path)
target = os.path.join(target_dir, path)
if os.path.isdir(source):
shutil.move(source, target)
if model_proc:
if os.path.isfile(model_proc):
shutil.copy(model_proc, target_dir)
else:
print("Error, model-proc {} specified but not found", model_proc)
sys.exit(1)
else:
_download_model_proc(
target_dir, model_name, dl_streamer_version
)
def download_and_convert_models(
model_list_path, output_dir, force, dl_streamer_version
):
model_list = _load_model_list(model_list_path)
target_root = os.path.join(output_dir, "models")
os.makedirs(target_root, exist_ok=True)
for model in model_list:
_download_and_convert_model(
target_root, model, force, model_list_path, dl_streamer_version
)
|
"use strict";
var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
var ts = require("typescript");
var _ts = require("../ts-internal");
var _ = require("lodash");
var declaration_1 = require("../utils/options/declaration");
var context_1 = require("./context");
var components_1 = require("./components");
var compiler_host_1 = require("./utils/compiler-host");
var component_1 = require("../utils/component");
var fs_1 = require("../utils/fs");
var Converter = (function (_super) {
__extends(Converter, _super);
function Converter() {
return _super !== null && _super.apply(this, arguments) || this;
}
Converter_1 = Converter;
Converter.prototype.initialize = function () {
this.compilerHost = new compiler_host_1.CompilerHost(this);
this.nodeConverters = {};
this.typeTypeConverters = [];
this.typeNodeConverters = [];
};
Converter.prototype.addComponent = function (name, componentClass) {
var component = _super.prototype.addComponent.call(this, name, componentClass);
if (component instanceof components_1.ConverterNodeComponent) {
this.addNodeConverter(component);
}
else if (component instanceof components_1.ConverterTypeComponent) {
this.addTypeConverter(component);
}
return component;
};
Converter.prototype.addNodeConverter = function (converter) {
for (var _i = 0, _a = converter.supports; _i < _a.length; _i++) {
var supports = _a[_i];
this.nodeConverters[supports] = converter;
}
};
Converter.prototype.addTypeConverter = function (converter) {
if ('supportsNode' in converter && 'convertNode' in converter) {
this.typeNodeConverters.push(converter);
this.typeNodeConverters.sort(function (a, b) { return (b.priority || 0) - (a.priority || 0); });
}
if ('supportsType' in converter && 'convertType' in converter) {
this.typeTypeConverters.push(converter);
this.typeTypeConverters.sort(function (a, b) { return (b.priority || 0) - (a.priority || 0); });
}
};
Converter.prototype.removeComponent = function (name) {
var component = _super.prototype.removeComponent.call(this, name);
if (component instanceof components_1.ConverterNodeComponent) {
this.removeNodeConverter(component);
}
else if (component instanceof components_1.ConverterTypeComponent) {
this.removeTypeConverter(component);
}
return component;
};
Converter.prototype.removeNodeConverter = function (converter) {
var converters = this.nodeConverters;
var keys = _.keys(this.nodeConverters);
for (var _i = 0, keys_1 = keys; _i < keys_1.length; _i++) {
var key = keys_1[_i];
if (converters[key] === converter) {
delete converters[key];
}
}
};
Converter.prototype.removeTypeConverter = function (converter) {
var typeIndex = this.typeTypeConverters.indexOf(converter);
if (typeIndex !== -1) {
this.typeTypeConverters.splice(typeIndex, 1);
}
var nodeIndex = this.typeNodeConverters.indexOf(converter);
if (nodeIndex !== -1) {
this.typeNodeConverters.splice(nodeIndex, 1);
}
};
Converter.prototype.removeAllComponents = function () {
_super.prototype.removeAllComponents.call(this);
this.nodeConverters = {};
this.typeTypeConverters = [];
this.typeNodeConverters = [];
};
Converter.prototype.convert = function (fileNames) {
for (var i = 0, c = fileNames.length; i < c; i++) {
fileNames[i] = fs_1.normalizePath(_ts.normalizeSlashes(fileNames[i]));
}
var program = ts.createProgram(fileNames, this.application.options.getCompilerOptions(), this.compilerHost);
var checker = program.getTypeChecker();
var context = new context_1.Context(this, fileNames, checker, program);
this.trigger(Converter_1.EVENT_BEGIN, context);
var errors = this.compile(context);
var project = this.resolve(context);
this.trigger(Converter_1.EVENT_END, context);
return {
errors: errors,
project: project
};
};
Converter.prototype.convertNode = function (context, node) {
if (context.visitStack.indexOf(node) !== -1) {
return null;
}
var oldVisitStack = context.visitStack;
context.visitStack = oldVisitStack.slice();
context.visitStack.push(node);
var result;
if (node.kind in this.nodeConverters) {
result = this.nodeConverters[node.kind].convert(context, node);
}
context.visitStack = oldVisitStack;
return result;
};
Converter.prototype.convertType = function (context, node, type) {
if (node) {
type = type || context.getTypeAtLocation(node);
for (var _i = 0, _a = this.typeNodeConverters; _i < _a.length; _i++) {
var converter = _a[_i];
if (converter.supportsNode(context, node, type)) {
return converter.convertNode(context, node, type);
}
}
}
if (type) {
for (var _b = 0, _c = this.typeTypeConverters; _b < _c.length; _b++) {
var converter = _c[_b];
if (converter.supportsType(context, type)) {
return converter.convertType(context, type);
}
}
}
};
Converter.prototype.compile = function (context) {
var _this = this;
var program = context.program;
program.getSourceFiles().forEach(function (sourceFile) {
_this.convertNode(context, sourceFile);
});
var diagnostics = program.getOptionsDiagnostics();
if (diagnostics.length) {
return diagnostics;
}
diagnostics = program.getSyntacticDiagnostics();
if (diagnostics.length) {
return diagnostics;
}
diagnostics = program.getGlobalDiagnostics();
if (diagnostics.length) {
return diagnostics;
}
diagnostics = program.getSemanticDiagnostics();
if (diagnostics.length) {
return diagnostics;
}
return [];
};
Converter.prototype.resolve = function (context) {
this.trigger(Converter_1.EVENT_RESOLVE_BEGIN, context);
var project = context.project;
for (var id in project.reflections) {
if (!project.reflections.hasOwnProperty(id)) {
continue;
}
this.trigger(Converter_1.EVENT_RESOLVE, context, project.reflections[id]);
}
this.trigger(Converter_1.EVENT_RESOLVE_END, context);
return project;
};
Converter.prototype.getDefaultLib = function () {
return ts.getDefaultLibFileName(this.application.options.getCompilerOptions());
};
Converter.EVENT_BEGIN = 'begin';
Converter.EVENT_END = 'end';
Converter.EVENT_FILE_BEGIN = 'fileBegin';
Converter.EVENT_CREATE_DECLARATION = 'createDeclaration';
Converter.EVENT_CREATE_SIGNATURE = 'createSignature';
Converter.EVENT_CREATE_PARAMETER = 'createParameter';
Converter.EVENT_CREATE_TYPE_PARAMETER = 'createTypeParameter';
Converter.EVENT_FUNCTION_IMPLEMENTATION = 'functionImplementation';
Converter.EVENT_RESOLVE_BEGIN = 'resolveBegin';
Converter.EVENT_RESOLVE = 'resolveReflection';
Converter.EVENT_RESOLVE_END = 'resolveEnd';
__decorate([
component_1.Option({
name: 'name',
help: 'Set the name of the project that will be used in the header of the template.'
})
], Converter.prototype, "name", void 0);
__decorate([
component_1.Option({
name: 'externalPattern',
help: 'Define a pattern for files that should be considered being external.'
})
], Converter.prototype, "externalPattern", void 0);
__decorate([
component_1.Option({
name: 'includeDeclarations',
help: 'Turn on parsing of .d.ts declaration files.',
type: declaration_1.ParameterType.Boolean
})
], Converter.prototype, "includeDeclarations", void 0);
__decorate([
component_1.Option({
name: 'excludeExternals',
help: 'Prevent externally resolved TypeScript files from being documented.',
type: declaration_1.ParameterType.Boolean
})
], Converter.prototype, "excludeExternals", void 0);
__decorate([
component_1.Option({
name: 'excludeNotExported',
help: 'Prevent symbols that are not exported from being documented.',
type: declaration_1.ParameterType.Boolean
})
], Converter.prototype, "excludeNotExported", void 0);
__decorate([
component_1.Option({
name: 'excludePrivate',
help: 'Ignores private variables and methods',
type: declaration_1.ParameterType.Boolean
})
], Converter.prototype, "excludePrivate", void 0);
Converter = Converter_1 = __decorate([
component_1.Component({ name: 'converter', internal: true, childClass: components_1.ConverterComponent })
], Converter);
return Converter;
var Converter_1;
}(component_1.ChildableComponent));
exports.Converter = Converter;
//# sourceMappingURL=converter.js.map
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(
self, gamma=0, alpha=None, size_average=True, ignore_index=-100
):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.size_average = size_average
self.ignore_index = ignore_index
def forward(self, input, target):
if input.dim() > 2:
input = input.view(
input.size(0), input.size(1), -1
) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(
-1, input.size(2)
) # N,H*W,C => N*H*W,C
target = target * (target != self.ignore_index).float()
target = target.view(-1, 1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1 - pt) ** self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
class BinaryFocalLoss(nn.Module):
def __init__(
self, gamma=2, alpha=None, pos_weight=None, ignore_index=-100, **_
):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.pos_weight = pos_weight
self.ignore_index = ignore_index
def forward(self, input, target, reduction=True, weight=None):
target = target * (target != self.ignore_index).float()
input = input.view(-1, 1)
target = target.view(-1, 1)
assert (
target.size() == input.size()
), f"{target.size()} vs {input.size()}"
if weight is not None:
assert target.size() == weight.size()
# For test
if isinstance(self.pos_weight, float) or isinstance(
self.pos_weight, int
):
weight = target * (self.pos_weight - 1.0) + 1.0
else:
if weight is None:
weight = target + 2.0
max_val = (-input).clamp(min=0)
loss = (
input
- input * target
+ max_val
+ ((-max_val).exp() + (-input - max_val).exp()).log()
)
invprobs = F.logsigmoid(-input * (target * 2 - 1))
loss = (invprobs * self.gamma).exp() * loss
if weight is not None:
loss = loss * weight
if reduction:
return loss.mean()
else:
return loss
class BinaryReducedFocalLoss(nn.Module):
def __init__(
self, gamma=2, alpha=None, pos_weight=None, threshold=0.5, **_
):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.pos_weight = pos_weight
self.threshold = threshold
def forward(self, input, target, reduction=True, weight=None):
target = target.float()
input = input.view(-1, 1)
target = target.view(-1, 1)
assert (
target.size() == input.size()
), f"{target.size()} vs {input.size()}"
if weight is not None:
assert target.size() == weight.size()
# For test
if isinstance(self.pos_weight, float) or isinstance(
self.pos_weight, int
):
weight = target * (self.pos_weight - 1.0) + 1.0
else:
if weight is None:
weight = target + 2.0
max_val = (-input).clamp(min=0)
loss = (
input
- input * target
+ max_val
+ ((-max_val).exp() + (-input - max_val).exp()).log()
)
invprobs = F.logsigmoid(-input * (target * 2 - 1))
invprobs = torch.where(
input.gt(0),
invprobs,
torch.tensor(0.0).float().to(invprobs.device),
)
loss = (invprobs / self.threshold * self.gamma).exp() * loss
if weight is not None:
loss = loss * weight
if reduction:
return loss.mean()
else:
return loss
class LabelSmoothBinaryFocalLoss(nn.Module):
def __init__(
self, lb_smooth=0.1, gamma=2, alpha=None, pos_weight=None, **_
):
super().__init__()
self.lb_smooth = lb_smooth
self.gamma = gamma
self.alpha = alpha
self.pos_weight = pos_weight
def forward(self, input, target, reduction=True, weight=None):
target = target.float()
target = target * (1 - self.lb_smooth) + self.lb_smooth / target.size(1)
input = input.view(-1, 1)
target = target.view(-1, 1)
assert (
target.size() == input.size()
), f"{target.size()} vs {input.size()}"
if weight is not None:
assert target.size() == weight.size()
# For test
if isinstance(self.pos_weight, float) or isinstance(
self.pos_weight, int
):
weight = target * (self.pos_weight - 1.0) + 1.0
else:
if weight is None:
weight = target + 2.0
max_val = (-input).clamp(min=0)
loss = (
input
- input * target
+ max_val
+ ((-max_val).exp() + (-input - max_val).exp()).log()
)
invprobs = F.logsigmoid(-input * (target * 2 - 1))
loss = (invprobs * self.gamma).exp() * loss
if weight is not None:
loss = loss * weight
if reduction:
return loss.mean()
else:
return loss
class BinaryDualFocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, pos_weight=None, **_):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.pos_weight = pos_weight
def forward(self, input, target, reduction=True, weight=None):
target = target.float()
input = input.view(-1, 1)
target = target.view(-1, 1)
assert (
target.size() == input.size()
), f"{target.size()} vs {input.size()}"
if weight is not None:
assert target.size() == weight.size()
# For test
if isinstance(self.pos_weight, float) or isinstance(
self.pos_weight, int
):
weight = target * (self.pos_weight - 1.0) + 1.0
else:
if weight is None:
weight = target + 2.0
proba = F.sigmoid(input)
diff = torch.abs(target - proba)
loss = -(diff * (torch.log(1.0 - diff) ** self.gamma)).sum(dim=1)
if weight is not None:
loss = loss * weight
if reduction:
return loss.mean()
else:
return loss
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Exception Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.deployment
# Libre Office Version: 7.3
import typing
from ..uno.exception import Exception as Exception_85530a09
from ..uno.x_interface import XInterface as XInterface_8f010a43
class ExtensionRemovedException(Exception_85530a09):
"""
Exception Class
indicates that a function call with the given arguments is not supported because the extension was removed.
XPackage.isRemoved() will return true on that object.
**since**
OOo 3.3
See Also:
`API ExtensionRemovedException <https://api.libreoffice.org/docs/idl/ref/exceptioncom_1_1sun_1_1star_1_1deployment_1_1ExtensionRemovedException.html>`_
"""
__ooo_ns__: str = 'com.sun.star.deployment'
__ooo_full_ns__: str = 'com.sun.star.deployment.ExtensionRemovedException'
__ooo_type_name__: str = 'exception'
__pyunointerface__: str = 'com.sun.star.deployment.ExtensionRemovedException'
__pyunostruct__: str = 'com.sun.star.deployment.ExtensionRemovedException'
typeName: str = 'com.sun.star.deployment.ExtensionRemovedException'
"""Literal Constant ``com.sun.star.deployment.ExtensionRemovedException``"""
def __init__(self, Message: typing.Optional[str] = '', Context: typing.Optional[XInterface_8f010a43] = None) -> None:
"""
Constructor
Arguments:
Message (str, optional): Message value.
Context (XInterface, optional): Context value.
"""
kargs = {
"Message": Message,
"Context": Context,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
super()._init(**kwargs)
__all__ = ['ExtensionRemovedException']
|
const models = require('../../models');
const tpl = require('@tryghost/tpl');
const errors = require('@tryghost/errors');
const mega = require('../../services/mega');
const messages = {
postNotFound: 'Post not found.'
};
const emailPreview = new mega.EmailPreview({
apiVersion: 'v3'
});
module.exports = {
docName: 'email_preview',
read: {
options: [
'fields'
],
validation: {
options: {
fields: ['html', 'plaintext', 'subject']
}
},
data: [
'id',
'status'
],
permissions: true,
async query(frame) {
const options = Object.assign(frame.options, {formats: 'html,plaintext', withRelated: ['authors', 'posts_meta']});
const data = Object.assign(frame.data, {status: 'all'});
const model = await models.Post.findOne(data, options);
if (!model) {
throw new errors.NotFoundError({
message: tpl(messages.postNotFound)
});
}
return emailPreview.generateEmailContent(model, frame.options.memberSegment);
}
},
sendTestEmail: {
statusCode: 200,
headers: {},
options: [
'id'
],
validation: {
options: {
id: {
required: true
}
}
},
permissions: true,
async query(frame) {
const options = Object.assign(frame.options, {status: 'all'});
let model = await models.Post.findOne(options, {withRelated: ['authors']});
if (!model) {
throw new errors.NotFoundError({
message: tpl(messages.postNotFound)
});
}
const {emails = []} = frame.data;
return await mega.mega.sendTestEmail(model, emails, 'v3');
}
}
};
|
const availableNotes = ["c4", "d4", "e4", "f4", "g4", "a4", "b4", "c5"]
function LevelCreator(resources, audioCache)
{
// this.levelDefinitions = levelDefinitions;
this.resources = resources;
this.audioCache = audioCache;
this.gameBoardBuilder = new GameBoardBuilder({
resources: this.resources,
audioCache: this.audioCache
})
this.keyBoardBuilder = new KeyBoardBuilder({
resources: this.resources,
audioCache: this.audioCache,
})
this.getScene = function(gameStatus, resultsCollector)
{
var scene = {}
var initialRandomNoteIdx = new Math.seedrandom(gameStatus.gameIdx.toString())
initialRandomNoteIdx = Math.floor(initialRandomNoteIdx() * availableNotes.length)
var notes = [availableNotes[initialRandomNoteIdx]]
var randomNumberGenerator = new Math.seedrandom(gameStatus.gameIdx.toString() + "-" + gameStatus.level.toString())
for (var i = 0; i < gameStatus.level; i++) {
var randomIdx = Math.floor(randomNumberGenerator() * availableNotes.length)
var nextNote = availableNotes[randomIdx]
while (nextNote == notes[notes.length - 1]) {
randomIdx = Math.floor(randomNumberGenerator() * availableNotes.length)
nextNote = availableNotes[randomIdx]
}
notes.push(nextNote)
}
scene.gameBoard = this.gameBoardBuilder.build(notes, gameStatus, resultsCollector)
scene.keyBoard = this.keyBoardBuilder.build(gameStatus.level, scene.gameBoard, resultsCollector)
// HACK, think about smth nicer here
scene.gameBoard.addStateChangeListener(scene.keyBoard)
return scene
}
}
|
/**
* description here
* @author xupingmao
* @since 2016
* @modified 2020/10/11 18:25:35
*/
#include "include/mp.h"
#include <setjmp.h>
void mp_push_exception(MpFrame* f){
MpObj file = func_get_file_name_obj(f->fnc);
MpObj fnc_name = func_get_name_obj(f->fnc);
MpObj ex = mp_format(" File %o: in %o , at line %d", file, fnc_name,
f->lineno);
list_append(GET_LIST(tm->ex_list), ex);
}
void mp_traceback() {
int i;
MpObj exlist = tm->ex_list;
printf("Traceback (most recent call last):\n");
int cur = tm->frame - tm->frames;
for (i = LIST_LEN(exlist) - 1; i >= cur; i--) {
mp_println(LIST_NODES(exlist)[i]);
}
fprintf(stderr, "Exception:\n ");
fprintf(stderr, "%s\n", GET_CSTR(tm->ex_line));
}
void mp_raise(char* fmt, ...) {
va_list a;
va_start(a, fmt);
list_clear(GET_LIST(tm->ex_list));
tm->ex = mp_format_va_list(fmt, a, 0);
MpObj file = func_get_file_name_obj(tm->frame->fnc);
MpObj fnc_name = func_get_name_obj(tm->frame->fnc);
tm->ex_line = mp_format("File %o: in %o at line %d\n %os",
file, fnc_name, tm->frame->lineno, tm->ex);
va_end(a);
longjmp(tm->frame->buf, 1);
}
|
import React from 'react';
export default React.createClass({
render () {
return (
<footer className="footer_">
<div className="row">
<div className="medium-6 column">
<ul className="inline-list">
<li><Link to="/terms">Termos de Serviço</Link></li>
<li><Link to="/privacy">Termos de Privacidade</Link></li>
{/*<li><Link to="/policy">Policy & Safety</Link></li>*/}
<li><Link to="/contact">Sugestões</Link></li>
</ul>
</div>
<div className="medium-6 column">
<p className="copy_ small-only-text-center">© {new Date().getFullYear()} Unicorno.com.br Todos os direitos reservados.</p>
</div>
</div>
</footer>
);
}
});
|
'use strict';
/** @namespace DevKit */
var DevKit;
(function (DevKit) {
'use strict';
DevKit.TimeZoneLocalizedNameApi = function (e) {
var EMPTY_STRING = '';
var f = '@OData.Community.Display.V1.FormattedValue';
function webApiField(entity, logicalName, schemaName, entityLogicalCollectionName, entityLogicalName, readOnly, upsertEntity, isMultiOptionSet) {
var l = '@Microsoft.Dynamics.CRM.lookuplogicalname';
var property = {};
var getFormattedValue = function () {
if (entity[logicalName + f] === undefined || entity[logicalName + f] === null) {
return EMPTY_STRING;
}
if (entityLogicalCollectionName !== undefined && entityLogicalCollectionName.length > 0) {
if (entity[logicalName + l] === entityLogicalName) {
return entity[logicalName + f];
}
return EMPTY_STRING;
}
if (isMultiOptionSet) {
return entity[logicalName + f].toString().split(';').map(function (item) { return item.trim(); });
}
return entity[logicalName + f];
};
var getValue = function () {
if (entity[logicalName] === undefined || entity[logicalName] === null) {
return null;
}
if (entityLogicalCollectionName !== undefined && entityLogicalCollectionName.length > 0) {
if (entity[logicalName + l] === undefined || entity[logicalName + l] === entityLogicalName) {
return entity[logicalName];
}
return null;
}
if (isMultiOptionSet) {
return entity[logicalName].toString().split(',').map(function (item) { return parseInt(item, 10); });
}
return entity[logicalName];
};
var setValue = function (value) {
if (isMultiOptionSet) value = value.join(',');
if (entityLogicalCollectionName !== undefined && entityLogicalCollectionName.length > 0) {
value = value.replace('{', EMPTY_STRING).replace('}', EMPTY_STRING);
upsertEntity[schemaName + '@odata.bind'] = '/' + entityLogicalCollectionName + '(' + value + ')';
} else {
upsertEntity[logicalName] = value;
}
entity[logicalName] = value;
};
Object.defineProperty(property, 'FormattedValue', {
get: getFormattedValue
});
if (readOnly) {
Object.defineProperty(property, 'Value', {
get: getValue
});
}
else {
Object.defineProperty(property, 'Value', {
get: getValue,
set: setValue
});
}
return property;
}
var timezonelocalizedname = {
CreatedBy: { b: 'createdby', a: '_createdby_value', c: 'systemusers', d: 'systemuser', r: true },
CreatedOn_UtcDateAndTime: { a: 'createdon', r: true },
CreatedOnBehalfBy: { b: 'createdonbehalfby', a: '_createdonbehalfby_value', c: 'systemusers', d: 'systemuser', r: true },
CultureId: { a: 'cultureid' },
DaylightName: { a: 'daylightname' },
ModifiedBy: { b: 'modifiedby', a: '_modifiedby_value', c: 'systemusers', d: 'systemuser', r: true },
ModifiedOn_UtcDateAndTime: { a: 'modifiedon', r: true },
ModifiedOnBehalfBy: { b: 'modifiedonbehalfby', a: '_modifiedonbehalfby_value', c: 'systemusers', d: 'systemuser', r: true },
OrganizationId: { b: 'organizationid', a: '_organizationid_value', c: 'organizations', d: 'organization', r: true },
StandardName: { a: 'standardname' },
TimeZoneDefinitionId: { b: 'timezonedefinitionid', a: '_timezonedefinitionid_value', c: 'timezonedefinitions', d: 'timezonedefinition' },
TimeZoneLocalizedNameId: { a: 'timezonelocalizednameid' },
UserInterfaceName: { a: 'userinterfacename' },
VersionNumber: { a: 'versionnumber', r: true }
};
if (e === undefined) e = {};
var u = {};
for (var field in timezonelocalizedname) {
var a = timezonelocalizedname[field].a;
var b = timezonelocalizedname[field].b;
var c = timezonelocalizedname[field].c;
var d = timezonelocalizedname[field].d;
var g = timezonelocalizedname[field].g;
var r = timezonelocalizedname[field].r;
timezonelocalizedname[field] = webApiField(e, a, b, c, d, r, u, g);
}
timezonelocalizedname.Entity = u;
timezonelocalizedname.EntityName = 'timezonelocalizedname';
timezonelocalizedname.EntityCollectionName = 'timezonelocalizednames';
timezonelocalizedname['@odata.etag'] = e['@odata.etag'];
timezonelocalizedname.getAliasedValue = function (alias, isMultiOptionSet) {
if (e[alias] === undefined || e[alias] === null) {
return null;
}
if (isMultiOptionSet) {
return e[alias].toString().split(',').map(function (item) { return parseInt(item, 10); });
}
return e[alias];
}
timezonelocalizedname.getAliasedFormattedValue = function (alias, isMultiOptionSet) {
if (e[alias + f] === undefined || e[alias + f] === null) {
return EMPTY_STRING;
}
if (isMultiOptionSet) {
return e[alias + f].toString().split(';').map(function (item) { return item.trim(); });
}
return e[alias + f];
}
return timezonelocalizedname;
};
})(DevKit || (DevKit = {}));
/** @namespace OptionSet */
var OptionSet;
(function (OptionSet) {
OptionSet.TimeZoneLocalizedName = {
RollupState : {
NotCalculated: 0,
Calculated: 1,
OverflowError: 2,
OtherError: 3,
RetryLimitExceeded: 4,
HierarchicalRecursionLimitReached: 5,
LoopDetected: 6
}
};
})(OptionSet || (OptionSet = {}));
|
/**
***********************************************************************************************************************
* Copyright (c) 2020, China Mobile Communications Group Co.,Ltd.
* COPYRIGHT (C) 2006 - 2020,RT-Thread Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* @file vfs_devfs.c
*
* @brief This file implement the DEV filesystem.
*
* @revision
* Date Author Notes
* 2018-02-11 Bernard Ignore O_CREAT flag in open.
***********************************************************************************************************************
*/
#include <os_device.h>
#include <os_assert.h>
#include <os_object.h>
#include <vfs.h>
#include <vfs_fs.h>
#include <vfs_file.h>
#include <devfs/vfs_devfs.h>
/**
***********************************************************************************************************************
* @struct device_dirent
*
* @brief The device dir entry.
***********************************************************************************************************************
*/
struct device_dirent
{
os_device_t **devices; /* The address to save the device object adress. */
os_uint16_t read_index; /* The current read index of dir entry. */
os_uint16_t device_count; /* The total device number. */
};
/**
***********************************************************************************************************************
* @brief Mount device filesystem.
*
* @param[in] fs The VFS object.
* @param[in] rwflag The read/write flag, not used now.
* @param[out] data The private data, not used now.
*
* @return Mount result.
* @retval 0 Mount successfully.
***********************************************************************************************************************
*/
static int vfs_device_fs_mount(struct vfs_filesystem *fs, unsigned long rwflag, const void *data)
{
return 0;
}
/**
***********************************************************************************************************************
* @brief Run ioctl cmd for device.
*
* @param[in] file The file descriptor.
* @param[in] cmd The ioctl cmd.
* @param[in,out] args The arguments, depends on cmd.
*
* @return The cmd result.
* @retval 0 Run cmd successfully.
* @retval -EIO Run cmd failed.
***********************************************************************************************************************
*/
static int vfs_device_fs_ioctl(struct vfs_fd *file, int cmd, void *args)
{
os_err_t result;
os_device_t *dev_id;
OS_ASSERT(file != OS_NULL);
dev_id = (os_device_t *)file->data;
OS_ASSERT(dev_id != OS_NULL);
result = os_device_control(dev_id, cmd, args);
if (result == OS_EOK)
{
return 0;
}
return -EIO;
}
/**
***********************************************************************************************************************
* @brief Read device file.
*
* @param[in,out] file The file descriptor.
* @param[out] buf The pointer of buf to save read content.
* @param[in] count The expected read size.
*
* @return The actual read size.
* @retval int The actual read size.
***********************************************************************************************************************
*/
static int vfs_device_fs_read(struct vfs_fd *file, void *buf, size_t count)
{
int result;
os_device_t *dev_id;
OS_ASSERT(file != OS_NULL);
dev_id = (os_device_t *)file->data;
OS_ASSERT(dev_id != OS_NULL);
result = os_device_read(dev_id, file->pos, buf, count);
file->pos += result;
return result;
}
/**
***********************************************************************************************************************
* @brief Write device file.
*
* @param[in,out] file The file descriptor.
* @param[in] buf The pointer of buf to write.
* @param[in] count The expected write size.
*
* @return The actual write size.
* @retval int The actual write size.
***********************************************************************************************************************
*/
static int vfs_device_fs_write(struct vfs_fd *file, const void *buf, size_t count)
{
int result;
os_device_t *dev_id;
OS_ASSERT(file != OS_NULL);
dev_id = (os_device_t *)file->data;
OS_ASSERT(dev_id != OS_NULL);
result = os_device_write(dev_id, file->pos, buf, count);
file->pos += result;
return result;
}
/**
***********************************************************************************************************************
* @brief Close device file.
*
* @param[in,out] file The file descriptor.
*
* @return Close result.
* @retval 0 Close successfully.
* @retval -EIO Close failed.
***********************************************************************************************************************
*/
static int vfs_device_fs_close(struct vfs_fd *file)
{
os_err_t result;
os_device_t *dev_id;
OS_ASSERT(file != OS_NULL);
/* If directory, free the root dir entry. */
if (file->type == FT_DIRECTORY)
{
struct device_dirent *root_dirent;
root_dirent = (struct device_dirent *)file->data;
OS_ASSERT(root_dirent != OS_NULL);
os_free(root_dirent);
return 0;
}
/* If device file, close device. */
dev_id = (os_device_t *)file->data;
OS_ASSERT(dev_id != OS_NULL);
result = os_device_close(dev_id);
if (result == OS_EOK)
{
file->data = OS_NULL;
return 0;
}
return -EIO;
}
/**
***********************************************************************************************************************
* @brief Open device file.
*
* @param[in,out] file The file descriptor.
*
* @return Open result.
* @retval 0 Open successfully.
* @retval others Open failed, return error code.
***********************************************************************************************************************
*/
static int vfs_device_fs_open(struct vfs_fd *file)
{
os_err_t result;
os_device_t *device;
/* If root directory, traverse devcie object and save device info. */
if ((file->path[0] == '/') && (file->path[1] == '\0') &&
(file->flags & O_DIRECTORY))
{
os_object_t *object;
os_list_node_t *node;
os_object_info_t *information;
struct device_dirent *root_dirent;
os_uint32_t count = 0;
os_enter_critical();
/* Traverse device object to get device number. */
information = os_object_get_info(OS_OBJECT_DEVICE);
OS_ASSERT(information != OS_NULL);
for (node = information->object_list.next; node != &(information->object_list); node = node->next)
{
count ++;
}
/* Allocate memory to save all the device object address. */
root_dirent = (struct device_dirent *)os_malloc(sizeof(struct device_dirent) +
count * sizeof(os_device_t *));
if (root_dirent != OS_NULL)
{
root_dirent->devices = (os_device_t **)(root_dirent + 1);
root_dirent->read_index = 0;
root_dirent->device_count = count;
count = 0;
for (node = information->object_list.next; node != &(information->object_list); node = node->next)
{
object = os_list_entry(node, struct os_object, list);
root_dirent->devices[count] = (os_device_t *)object;
count ++;
}
}
os_exit_critical();
/* Save dir entry info to file->data. */
file->data = root_dirent;
return 0;
}
/* If device file, find the device and open it, and save the device info to file->data. */
device = os_device_find(&file->path[1]);
if (device == OS_NULL)
{
return -ENODEV;
}
#ifdef OS_USING_POSIX
if (device->fops)
{
file->fops = device->fops;
file->data = (void *)device;
if (file->fops->open)
{
result = file->fops->open(file);
if (result == OS_EOK || result == OS_ENOSYS)
{
return 0;
}
}
}
else
#endif
{
result = os_device_open(device, OS_DEVICE_OFLAG_RDWR);
if (result == OS_EOK || result == OS_ENOSYS)
{
file->data = device;
return 0;
}
}
/* Open device failed. */
file->data = OS_NULL;
return -EIO;
}
/**
***********************************************************************************************************************
* @brief Get the device file status.
*
* @param[in] fs The VFS object.
* @param[in] path The device file path.
* @param[out] st The pointer of buffer to save device file status.
*
* @return The operation result.
* @retval 0 Get status successfully.
* @retval -ENOENT Not find the entry.
***********************************************************************************************************************
*/
static int vfs_device_fs_stat(struct vfs_filesystem *fs, const char *path, struct stat *st)
{
if ((path[0] == '/') && (path[1] == '\0')) /* If root directory. */
{
st->st_dev = 0;
st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;
st->st_mode &= ~S_IFREG;
st->st_mode |= S_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;
st->st_size = 0;
st->st_mtime = 0;
return 0;
}
else /* If device file. */
{
os_device_t *dev_id;
dev_id = os_device_find(&path[1]);
if (dev_id != OS_NULL)
{
st->st_dev = 0;
st->st_mode = S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;
if (dev_id->type == OS_DEVICE_TYPE_CHAR)
st->st_mode |= S_IFCHR;
else if (dev_id->type == OS_DEVICE_TYPE_BLOCK)
st->st_mode |= S_IFBLK;
else if (dev_id->type == OS_DEVICE_TYPE_PIPE)
st->st_mode |= S_IFIFO;
else
st->st_mode |= S_IFREG;
st->st_size = 0;
st->st_mtime = 0;
return 0;
}
}
return -ENOENT;
}
/**
***********************************************************************************************************************
* @brief Get the device dir entry.
*
* @param[in] file The file descriptor.
* @param[out] dirp The pointer of buffer to save dir entry.
* @param[in] count The buffer size to save dir entry.
*
* @return The operation result.
* @retval int The actual size of read.
* @retval -EINVAL Invaild parameter.
***********************************************************************************************************************
*/
static int vfs_device_fs_getdents(struct vfs_fd *file, struct dirent *dirp, uint32_t count)
{
os_uint32_t index;
os_object_t *object;
struct dirent *d;
struct device_dirent *root_dirent;
root_dirent = (struct device_dirent *)file->data;
OS_ASSERT(root_dirent != OS_NULL);
/* Convert bytes to dirent count*/
count = (count / sizeof(struct dirent));
if (count == 0)
{
return -EINVAL;
}
for (index = 0; index < count && index + root_dirent->read_index < root_dirent->device_count; index ++)
{
object = (os_object_t *)root_dirent->devices[root_dirent->read_index + index];
d = dirp + index;
d->d_type = DT_REG;
d->d_namlen = OS_NAME_MAX;
d->d_reclen = (os_uint16_t)sizeof(struct dirent);
strncpy(d->d_name, object->name, OS_NAME_MAX);
}
root_dirent->read_index += index;
return index * sizeof(struct dirent);
}
/**
***********************************************************************************************************************
* @brief The poll operation of devcie file, not implemented now.
*
* @param[in] file The file descriptor.
* @param[in] req The poll request.
*
* @return The operation result.
* @retval int The operation result.
***********************************************************************************************************************
*/
static int vfs_device_fs_poll(struct vfs_fd *file, struct os_pollreq *req)
{
int mask = 0;
return mask;
}
static const struct vfs_file_ops _device_fops =
{
vfs_device_fs_open,
vfs_device_fs_close,
vfs_device_fs_ioctl,
vfs_device_fs_read,
vfs_device_fs_write,
OS_NULL, /* Not support flush. */
OS_NULL, /* Not support lseek. */
vfs_device_fs_getdents,
vfs_device_fs_poll,
};
static const struct vfs_filesystem_ops _device_fs =
{
"devfs",
VFS_FS_FLAG_DEFAULT,
&_device_fops,
vfs_device_fs_mount,
OS_NULL, /* Not support unmount. */
OS_NULL, /* Not support mkfs. */
OS_NULL, /* Not support statfs. */
OS_NULL, /* Not support unlink. */
vfs_device_fs_stat,
OS_NULL, /* Not support rename. */
};
/**
***********************************************************************************************************************
* @brief Register DEVFS operation structure to VFS.
*
* @param[in] None
*
* @return The register result.
* @retval 0 Register successfully.
* @retval -1 Register failed.
***********************************************************************************************************************
*/
int vfs_devfs_init(void)
{
/* Register device file system. */
return vfs_register(&_device_fs);
}
|
#!/usr/bin/env python
# import sys
# import os
#
# # Using https://stackoverflow.com/questions/51520/how-to-get-an-absolute-file-path-in-python
# utils_path = os.path.abspath("utils")
#
# # Using https://askubuntu.com/questions/470982/how-to-add-a-python-module-to-syspath/471168
# sys.path.insert(0, utils_path)
import numpy as np
from scipy.ndimage.measurements import label as lb
from scipy.ndimage.measurements import center_of_mass as com
import nibabel as nib
# from utils import getROICOG
from utils import atlasUtility as au
import argparse
import pandas as pd
from collections import OrderedDict
import os
class cluster_reporting_tool:
"""
This tool will take as input the brain map - a 3D file and an atlas name.
User can set a threshold for example <1.3>
Program finds clusters.
For each cluster:
Finds the span of cluster
Check how many ROIs are covered by the cluster.
For each ROI covered
Find the number and percentage of voxels it covers and reports the
peak coordinate closest to COG of the voxels in that ROI.
Also gives the name and coordinates of the peak coordinate of the
cluster.
"""
def __init__(self, contrast, atlas_dict, threshold, volume = 0):
# Read Brain file:
self.brain_img = nib.load(contrast)
self.brain = self.brain_img.get_data()
self.brain[np.isnan(self.brain)] = 0
self.atlas_dict = atlas_dict
# Read Atlas file
self.atlas = nib.load(atlas_dict['atlas_path'][0]).get_data()
self.thresh = threshold
self.volume = volume
def getNearestVoxel(self, roi_mask, COG):
"""
Input:
-----
roi_mask - This is the brain 3D tensor in which the voxels
belonging to some predefined region has non zero value and rest of the
voxels has zero value.
COG: It is a list or tuple of 3 cartesian coordinates (x, y, z)
representing the COG.
Output:
------
The function returns the cartesian coordinates of the coordinate in the
non zero region of roi_mask that is closest to the COG
"""
roiCoord = np.where(roi_mask != 0)
peak_list = []
dist = float(np.inf)
for [x, y, z] in zip(roiCoord[0], roiCoord[1], roiCoord[2]):
peak = [x, y, z]
current_dist = abs(x - COG[0]) + abs(y - COG[1]) + abs(z - COG[2])
if current_dist < dist:
if len(peak_list) != 0:
peak_list = []
peak_list.append(peak)
dist = current_dist
elif current_dist == dist:
peak_list.append(peak)
dist = current_dist
# The above 'For loop' might result in miltiple peak coordinates(peak list)
# having same distance from COG Check which of the peak list has least
# x coordinate i.e closest to midline (My heuristic) to select one peak
x = float(np.inf)
res = []
for coordinates in peak_list:
current_x = abs(coordinates[0])
if current_x < x:
res = []
res.append(coordinates)
elif current_x == x:
res.append(coordinates)
else:
pass
# # Find the MNI coordinates of the peak coordinates
# MNI = []
# for res_peak in res:
# MNI.append(queryAtlas.XYZ2MNI2mm(res_peak))
#
# return MNI
if len(res) > 1:
raise Exception('Multiple candidates for Representative \
coordinates. Please report to the author of the tool about this!')
return res[0]
def _pixDim(self):
"""
Internal Function to be used within this script
Returns the pixel dimension
"""
if self.brain_img.header['pixdim'][1] == 3:
pixdim = 3
elif self.brain_img.header['pixdim'][1] == 2:
pixdim = 2
elif self.brain_img.header['pixdim'][1] == 1:
pixdim = 1
else:
raise Exception('Unknown Pixel Dimension',
self.brain_img.header['pixdim'][1])
return pixdim
def _XYZ2MNI(self, CM):
if self._pixDim() == 1:
MNI = au.queryAtlas.XYZ2MNI1mm(list(CM))
elif self._pixDim() == 2:
MNI = au.queryAtlas.XYZ2MNI2mm(list(CM))
elif self._pixDim() == 3:
MNI = au.queryAtlas.XYZ2MNI3mm(list(CM))
# print("Center of Gravity:", MNI)
return MNI
def report(self, volume = None, threshold = None, out_file = 'ClusterReport.csv'):
# To take care if user has given a 4D contrast
if volume != None:
self.volume = volume
if len(self.brain.shape) > 3:
brain = np.array(self.brain[:,:,:,self.volume])
else:
brain = np.array(self.brain)
if threshold != None:
self.thresh = threshold
# Total number of brain voxels
num_brain_voxels = len(np.where(brain != 0)[0])
"""
Brain_zero is used later to calculate the center of gravity of the
cluster voxels overlapping with atlas voxels
"""
brain_zero = np.zeros(brain.shape)
# Apply thresholding
brain[(brain < self.thresh) & (brain > -self.thresh)] = 0
# Find clusters
clusters, num_clusters = lb(brain)
df_report = pd.DataFrame()
# List to store cluster size information
full_cluster_voxels_percentage_list = []
atlas_path = self.atlas_dict['atlas_path']
atlas_labels_path = self.atlas_dict['atlas_labels_path']
atlas_xml_zero_start_index = \
self.atlas_dict['atlas_xml_zero_start_index']
atlas_obj = au.queryAtlas(atlas_path, atlas_labels_path,
atlas_xml_zero_start_index=atlas_xml_zero_start_index)
for cluster_number in range(1,num_clusters + 1):
# Coordinates that are present in cluster given by cluster_number
cluster_indices = np.where(clusters == cluster_number)
# Number of voxels belonging to the cluster -> cluster_number
num_cluster_voxels = len(cluster_indices[0])
# Percentage of total brain voxels in a cluster
full_cluster_voxels_percentage = \
num_cluster_voxels * 100 / num_brain_voxels
# To create a list to be added to dataframe
full_cluster_voxels_percentage_list.append(
full_cluster_voxels_percentage)
# Find the atlas labels/regions that the cluster spans
atlas_regions_labels = np.unique(self.atlas[cluster_indices])
# print(atlas_regions_labels)
# iterate over all the labes/regions
for label in atlas_regions_labels:
# Lists to be used for dataFrame creation
cog_value_list = []
number_overlapping_cluster_voxels_list = []
overlapping_cluster_voxels_percentage_list = []
MNI_cog_list = []
cog_region_name_list = []
cog_unweighted_value_list = []
cog_weighted_value_list = []
cog_weighted_value_list = []
MNI_cog_unweighted_list = []
MNI_cog_weighted_list = []
cog_region_name_weighted_list = []
cog_region_name_unweighted_list = []
# Find all the coordinates of the labels
# Skipping the Label 0
if label == 0:
continue
atlas_label_indices = np.where(self.atlas == label)
""" Find the cluster coordinates overlapping the label/region
under consideration """
# Changing the form of cluster indices to (x,y,z) tuple
cluster_indices_zip = zip(cluster_indices[0], cluster_indices[1]
, cluster_indices[2])
cluster_indices_tuple_list = list(cluster_indices_zip)
# Changing the form of atlas indices to (x,y,z) tuple
atlas_label_indices_zip = \
zip(atlas_label_indices[0], atlas_label_indices[1],
atlas_label_indices[2])
atlas_label_indices_tuple_list = list(atlas_label_indices_zip)
# Number of voxels belonging to the atlas region
num_atlas_region_voxels = len(atlas_label_indices_tuple_list)
# 1. Find intersecion of the above two lists
overlapping_coordinates = \
list(set(cluster_indices_tuple_list).intersection(
set(atlas_label_indices_tuple_list)))
"""
2. Make an brain array and initialize the overlapping
coordinates with the values from brain
# Transform coordinates list to list of indices as
returned by np.where()
# Ref: https://stackoverflow.com/questions/12974474/
how-to-unzip-a-list-of-tuples-into-individual-lists
"""
overlapping_indices_zip = zip(*overlapping_coordinates)
overlapping_indices_tuple_list = list(overlapping_indices_zip)
# Number of voxels in the overlap of cluster and atlas region
number_overlapping_cluster_voxels = \
len(overlapping_coordinates)
# Creating list to be added to dataframe
number_overlapping_cluster_voxels_list.append(
number_overlapping_cluster_voxels)
#Percentage of voxels in the overlap of cluster and atlas region
overlapping_cluster_voxels_percentage = \
number_overlapping_cluster_voxels*100 / num_atlas_region_voxels
# Creating list to be added to dataframe
overlapping_cluster_voxels_percentage_list.append(
overlapping_cluster_voxels_percentage)
# Assigning the overlap to the empty brain to find COG later
brain_zero[overlapping_indices_tuple_list] = \
brain[overlapping_indices_tuple_list]
"""
3. Then use the already created functions to do the following:
a. Find the representative coordinate of the intersection
Create a dummy atlas (roi_mask) with just one region and label
that as 1
Ref: https://stackoverflow.com/questions/32322281/
numpy-matrix-binarization-using-only-one-expression
"""
roi_mask_for_unweighted_cog = np.where(brain_zero != 0, 1, 0)
roi_mask_for_weighted_cog = brain_zero
cog_unweighted = com(roi_mask_for_unweighted_cog)
cog_weighted = com(roi_mask_for_weighted_cog)
# convert the coordinates to int (math.floor)
cog_unweighted = tuple(map(int, cog_unweighted))
cog_weighted = tuple(map(int, cog_weighted))
"""
If the COG lies outside the overlapping coordinates then find
the coordinate that lies on the overlapping region and is
closest to the COG.
"""
if not roi_mask_for_unweighted_cog[cog_unweighted]:
cog_unweighted = \
tuple(self.getNearestVoxel(roi_mask_for_unweighted_cog,
cog_unweighted))
if not roi_mask_for_weighted_cog[cog_weighted]:
cog_weighted= \
tuple(self.getNearestVoxel(roi_mask_for_weighted_cog,
cog_weighted))
# print('COM Unweighted', cog_unweighted)
# print('COM Weighted', cog_weighted)
# Finding the values at the cluster representative coordinates
cog_unweighted_value = brain[cog_unweighted]
cog_weighted_value = brain[cog_weighted]
# Lists to be added to dataframe
cog_unweighted_value_list.append(cog_unweighted_value)
cog_weighted_value_list.append(cog_weighted_value)
# b. Convert the cartesian coordinates to MNI
MNI_cog_unweighted = self._XYZ2MNI(cog_unweighted)
MNI_cog_weighted = self._XYZ2MNI(cog_weighted)
# Convert the list of coordinates to string to get rid of:
# Exception: Data must be 1-dimensional
str_cog_unweighted = ''
for i in MNI_cog_unweighted:
str_cog_unweighted = str_cog_unweighted + ' ' + str(i)
str_cog_weighted = ''
for i in MNI_cog_weighted:
str_cog_weighted = str_cog_weighted + ' ' + str(i)
# Lists to be added to dataframe
MNI_cog_unweighted_list.append(str_cog_unweighted)
MNI_cog_weighted_list.append(str_cog_weighted)
# c. Report the name of the region
# Names of the regions of COG
cog_region_name_weighted = \
atlas_obj.getAtlasRegions(MNI_cog_weighted)[1]
cog_region_name_unweighted = \
atlas_obj.getAtlasRegions(MNI_cog_unweighted)[1]
# print('Region name weighted COG: ',cog_region_name_weighted)
#
# print('Region name unweighter COG: ',cog_region_name_unweighted)
# List created to be added to dataframe
cog_region_name_weighted_list.append(cog_region_name_weighted)
cog_region_name_unweighted_list.append(cog_region_name_unweighted)
# To choose from weighted and unweighted COG options
WEIGHTED = True
if WEIGHTED:
MNI_cog_list = MNI_cog_weighted_list
cog_region_name_list = cog_region_name_weighted_list
cog_value_list = cog_weighted_value_list
else:
pass
# Sort the Regions bsed on cog value
# Get the indices of elements after they are sorted
sorted_indices = np.argsort(cog_value_list)
# Sort the lists according to the above sorted_indices
cog_value_list = np.array(cog_value_list)[sorted_indices]
number_overlapping_cluster_voxels_list = \
np.array(number_overlapping_cluster_voxels_list)[sorted_indices]
overlapping_cluster_voxels_percentage_list = \
np.array(overlapping_cluster_voxels_percentage_list)[sorted_indices]
MNI_cog_list = np.array(MNI_cog_list)[sorted_indices]
cog_region_name_list = np.array(cog_region_name_list)[sorted_indices]
"""
TODO:
Convert MNI coordinates from list to string (x,y,z)
The next error that I will have to deal with is unequal length
arrays. For that append each list with spaces.
Then care about ordering the dictionary.
"""
# Creating a dictionary to create dataframe
df_dict = OrderedDict()
df_dict['Cluster Number'] = [cluster_number]
df_dict['Max Value'] = cog_value_list
df_dict['Num Voxels'] = number_overlapping_cluster_voxels_list
df_dict['Percentage of Voxel' ] = \
overlapping_cluster_voxels_percentage_list
df_dict['MNI Coordinates'] = MNI_cog_list
df_dict['Region Name'] = cog_region_name_list
df = pd.DataFrame(df_dict)
df_report = df_report.append(df)
# Empty the lists to be filled again
cog_value_list = []
number_overlapping_cluster_voxels_list = []
overlapping_cluster_voxels_percentage_list = []
MNI_cog_list = []
cog_region_name_list = []
"""
TODO:
The order of the columns is not maintained
Test again about the validity of results. The number of voxels
is very low. Check it!
DONE:
Store each of the coordinates in cog_list, names in
name_list, values in value_list, number of voxels etc.
Find the max of the value_list and corresponding name in
name_list and also calculate other details and store them in
lists.
Create a distionary with all the above created lists.
Create a empty data frame and add the above created dictionary
in it.
ATLAS NAME
SIZE (MM)
In one For loop create the details about cluster1 and store them
in lists as said above. As said above, add these lists in a
dictionary.
Then this dictionary is added to a dataframe.
The table should look like the following:
ROI1 Cluster1 MaxValue COG Region Total_#_voxels %_voxels
MaxValue COG Region #_voxels %_voxles_overlap
Value2 COG Region #_voxels %_voxles_overlap
Value3 COG Region #_voxels %_voxles_overlap
. .
. .
. .
Cluster2 MaxValue COG Region Total_#_voxels
MaxValue COG Region #_voxels %_voxles_overlap
Value2 COG Region #_voxels %_voxles_overlap
Value3 COG Region #_voxels %_voxles_overlap
. .
. .
. .
"""
pass
brain_zero.fill(0)
# d. Number and Percentage of voxels overlapping the region
# e. Peak coordinate of the cluster
df_report.to_csv(out_file, index = False)
return os.path.abspath(out_file), df_report
# TODO Test the function
def report(contrast, atlas, threshold, volume=0):
base_path = os.path.abspath('../../Cluster-Reporting-Tool-master') + '/'
if atlas == 'AAL':
atlas_path = [base_path + 'aalAtlas/AAL.nii.gz']
atlas_labels_path = [base_path + 'aalAtlas/AAL.xml']
atlas_xml_zero_start_index = False
elif atlas == 'fb':
atlas_path = [base_path +
'Full_brain_atlas_thr0-2mm/fullbrain_atlas_thr0-2mm_resample.nii']
atlas_labels_path = [base_path +
'Full_brain_atlas_thr0-2mm/fullbrain_atlas.xml']
atlas_xml_zero_start_index = True
atlas_dict = {
'atlas_path': atlas_path,
'atlas_labels_path': atlas_labels_path,
'atlas_xml_zero_start_index': atlas_xml_zero_start_index
}
crl_obj = cluster_reporting_tool(contrast, atlas_dict, threshold, volume)
return crl_obj
# crl_obj.report()
if __name__ == "__main__":
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--contrast", required=True,
help="Path to contrast file")
ap.add_argument("-a", "--atlas", required=False,
help="Path to Atlas file")
ap.add_argument("-t", "--thresh", required=False,
help="Threshold")
ap.add_argument("-v", "--vol", required=False,
help="Volume Number (If a 4D contrast is used as input) \
[Starts from 0]")
args = vars(ap.parse_args())
base_path = os.path.abspath('../Cluster-Reporting-Tool-master') + '/'
if args["contrast"] != None:
contrast = args["contrast"]
else:
contrast = 'map_logq_2mm.nii.gz'
print('Using contrast %s' % contrast)
if args["atlas"] != None:
atlas = args["atlas"]
else:
atlas = 'AAL'
print("Using atlas %s" % atlas)
if args["thresh"] != None:
threshold = float(args["thresh"])
else:
threshold = 1.3
print("Using threshold of %s" % threshold)
if args["vol"] != None:
volume = int(args["vol"])
else:
volume = 0
print("Using Volume_index %s" % str(volume))
if atlas == 'AAL':
atlas_path = [base_path + 'aalAtlas/AAL.nii.gz']
atlas_labels_path = [base_path + 'aalAtlas/AAL.xml']
atlas_xml_zero_start_index = False
elif atlas == 'fb':
atlas_path = [base_path +
'Full_brain_atlas_thr0-2mm/fullbrain_atlas_thr0-2mm_resample.nii']
atlas_labels_path = [base_path +
'Full_brain_atlas_thr0-2mm/fullbrain_atlas.xml']
atlas_xml_zero_start_index = True
atlas_dict = {
'atlas_path': atlas_path,
'atlas_labels_path': atlas_labels_path,
'atlas_xml_zero_start_index': atlas_xml_zero_start_index
}
crl_obj = cluster_reporting_tool(contrast, atlas_dict, threshold, volume)
crl_obj.report()
|
import os
# Configure settings for project
# Need to run this before calling models from application!
os.environ.setdefault('DJANGO_SETTINGS_MODULE','proTwo.settings')
import django
# Import settings
django.setup()
import random
from appTwo.models import User
from faker import Faker
fakegen = Faker()
def populate(N=5):
'''
Create N Entries of Dates Accessed
'''
for entry in range(N):
# Create Fake Data for entry
fake_name = fakegen.name().split()
fake_first_name = fake_name[0]
fake_last_name = fake_name[1]
fake_email = fakegen.email()
# Create new User Entry
user = User.objects.get_or_create(first_name=fake_first_name,
last_name=fake_last_name,
email=fake_email)[0]
if __name__ == '__main__':
print("Populating the databases...Please Wait")
populate(20)
print('Populating Complete')
|
import React, { Component } from 'react';
import { Link } from 'react-router-dom';
import { signup, signInWithGoogle, signInWithGitHub } from "../helpers/auth";
export default class SignUp extends Component {
constructor() {
super();
this.state = {
error: null,
email: '',
password: '',
};
this.handleChange = this.handleChange.bind(this);
this.handleSubmit = this.handleSubmit.bind(this);
this.googleSignIn = this.googleSignIn.bind(this);
this.githubSignIn = this.githubSignIn.bind(this);
}
handleChange(event) {
this.setState({
[event.target.name]: event.target.value
});
}
async handleSubmit(event) {
event.preventDefault();
this.setState({ error: '' });
try {
await signup(this.state.email, this.state.password);
} catch (error) {
this.setState({ error: error.message });
}
}
async googleSignIn() {
try {
await signInWithGoogle();
} catch (error) {
this.setState({ error: error.message });
}
}
async githubSignIn() {
try {
await signInWithGitHub();
} catch (error) {
console.log(error)
this.setState({ error: error.message });
}
}
render() {
return (
<div className="container">
<form className="mt-5 py-5 px-5" onSubmit={this.handleSubmit}>
<h1>
Sign Up to
<Link className="title ml-2" to="/">Chatty</Link>
</h1>
<p className="lead">Fill in the form below to create an account.</p>
<div className="form-group">
<input className="form-control" placeholder="Email" name="email" type="email" onChange={this.handleChange} value={this.state.email}></input>
</div>
<div className="form-group">
<input className="form-control" placeholder="Password" name="password" onChange={this.handleChange} value={this.state.password} type="password"></input>
</div>
<div className="form-group">
{this.state.error ? <p className="text-danger">{this.state.error}</p> : null}
<button className="btn btn-primary px-5" type="submit">Sign up</button>
</div>
<p>You can also sign up with any of these services</p>
<button className="btn btn-danger mr-2" type="button" onClick={this.googleSignIn}>
Sign up with Google
</button>
<button className="btn btn-secondary" type="button" onClick={this.githubSignIn}>
Sign up with GitHub
</button>
<hr></hr>
<p>Already have an account? <Link to="/login">Login</Link></p>
</form>
</div>
)
}
}
|
# class for colorful prints
class color:
CYAN = '\033[96m'
PURPLE = '\033[95m'
BLUE = '\033[94m'
YELLOW = '\033[93m'
GREEN = '\033[92m'
RED = '\033[91m'
DARKGRAY = '\033[90m'
WHITE = '\033[48m'
DARKCYAN = '\033[36m'
BLACK = '\033[35m'
UNDERLINE = '\033[4m'
BOLD = '\033[1m'
END = '\033[0m'
FORMAT_LETTER = {
"o", # octal
"x", # hex
"d", # decimal
"u", # unsigned decimal
"t", # binary
"f", # float
"a", # address
"i", # instruction
"c", # char
"s", # string
"z", # hex, zero padded on the left
}
SIZE_LETTER = {
"b": 1, # 1-byte, byte
"h": 2, # 2-byte, halfword
"w": 4, # 4-byte, word
"g": 8, # 8-byte, giant
}
|
var JobsList = React.createClass({displayName: "JobsList",
getInitialState: function() {
return {
data: JobStore.getState(),
}
}
, componentDidMount: function() {
amplify.subscribe( 'JobStore.change', this._handleChange )
}
, componentDidUnmound: function() {
amplify.unsubscribe( 'JobStore.change', this._handleChange )
}
, render: function() {
this.state.data.map(function(i, e) {
console.log(i, e)
})
return (
React.createElement(JobItem, {job: job})
);
}
, _handleChange: function(data) {
this.setState({data: data})
}
});
var JobItem = React.createClass({displayName: "JobItem",
render: function() {
return (
React.createElement("div", {className: "panel panel-default"},
React.createElement("div", {className: "panel-heading"}, this.props.job.title),
React.createElement("div", {className: "panel-body"},
this.props.job.desc
)
)
)
}
})
|
(function () {
"use strict";
angular.module("dlFullscreen", []);
})();
|
"""
Tests to ensure that the training loop works with a dict
"""
import os
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.core.step_result import TrainResult
from tests.base import EvalModelTemplate
from tests.base.deterministic_model import DeterministicModel
# test with train_step_end
# add logging + row interval tests
def test_training_step_result_log_step_only(tmpdir):
"""
Tests that only training_step can be used with TrainResult
Makes sure that things are routed to pbar, loggers and loss accordingly
Makes sure pbar and logs happen on step only when requested
"""
# enable internal debugging actions
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_step_only
model.training_step_end = None
model.training_epoch_end = None
model.val_dataloader = None
batches = 3
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=batches,
limit_val_batches=batches,
row_log_interval=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert not model.training_epoch_end_called
# make sure correct metrics are logged (one per batch step as requested)
assert len(trainer.dev_debugger.logged_metrics) == batches
for batch_idx, logged_metrics in enumerate(trainer.dev_debugger.logged_metrics):
assert logged_metrics[f'step_log_and_pbar_acc1_b{batch_idx}'] == 11.0
assert logged_metrics[f'step_log_acc2_b{batch_idx}'] == 12.0
assert f'step_pbar_acc3_b{batch_idx}' not in logged_metrics
assert len(logged_metrics) == 4
# make sure we are using the correct metrics for callbacks
assert trainer.logger_connector.callback_metrics['checkpoint_on'] == 171
# make sure pbar metrics are correct ang log metrics did not leak
for batch_idx in range(batches):
assert trainer.logger_connector.progress_bar_metrics[f'step_log_and_pbar_acc1_b{batch_idx}'] == 11
assert trainer.logger_connector.progress_bar_metrics[f'step_pbar_acc3_b{batch_idx}'] == 13
assert f'step_log_acc2_b{batch_idx}' not in trainer.logger_connector.progress_bar_metrics
# make sure training outputs what is expected
for batch_idx, batch in enumerate(model.train_dataloader()):
break
out = trainer.run_training_batch(batch, batch_idx, 0)
assert out.signal == 0
assert out.batch_log_metrics[f'step_log_and_pbar_acc1_b{batch_idx}'] == 11.0
assert out.batch_log_metrics[f'step_log_acc2_b{batch_idx}'] == 12.0
train_step_out = out.training_step_output_for_epoch_end
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out, TrainResult)
assert 'minimize' in train_step_out
assert f'step_log_and_pbar_acc1_b{batch_idx}' in train_step_out
assert f'step_log_acc2_b{batch_idx}' in train_step_out
# make sure the optimizer closure returns the correct things
opt_closure_result = trainer.train_loop.training_step_and_backward(
batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens)
assert opt_closure_result['loss'] == (42.0 * 3) + (15.0 * 3)
def test_training_step_result_log_epoch_only(tmpdir):
"""
Tests that only training_step can be used with TrainResult
Makes sure that things are routed to pbar, loggers and loss accordingly
Makes sure pbar and logs happen on epoch only when requested
"""
# enable internal debugging actions
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_epoch_only
model.training_step_end = None
model.training_epoch_end = None
model.val_dataloader = None
epochs = 3
batches = 2
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=batches,
limit_val_batches=batches,
row_log_interval=1,
max_epochs=epochs,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert not model.training_epoch_end_called
# make sure correct metrics are logged (one per batch step as requested)
assert len(trainer.dev_debugger.logged_metrics) == epochs
epoch_metrics = trainer.dev_debugger.logged_metrics
assert len(epoch_metrics) == epochs
for batch_idx, logged_metrics in enumerate(epoch_metrics):
assert logged_metrics[f'epoch_log_and_pbar_acc1_e{batch_idx}'] == 14.0
assert logged_metrics[f'epoch_log_acc2_e{batch_idx}'] == 15.0
assert f'epoch_pbar_acc3_e{batch_idx}' not in logged_metrics
assert len(logged_metrics) == 4
# make sure we are using the correct metrics for callbacks
assert trainer.logger_connector.callback_metrics['checkpoint_on'] == 171
# make sure pbar metrics are correct ang log metrics did not leak
for epoch_idx in range(epochs):
assert trainer.logger_connector.progress_bar_metrics[f'epoch_log_and_pbar_acc1_e{epoch_idx}'] == 14
assert trainer.logger_connector.progress_bar_metrics[f'epoch_pbar_acc3_e{epoch_idx}'] == 16
assert f'epoch_log_acc2_e{epoch_idx}' not in trainer.logger_connector.progress_bar_metrics
# make sure training outputs what is expected
for batch_idx, batch in enumerate(model.train_dataloader()):
break
out = trainer.run_training_batch(batch, batch_idx, 0)
assert out.signal == 0
assert len(out.batch_log_metrics) == 0
train_step_out = out.training_step_output_for_epoch_end
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out, TrainResult)
assert 'minimize' in train_step_out
assert f'epoch_log_and_pbar_acc1_e{trainer.current_epoch}' in train_step_out
assert f'epoch_log_acc2_e{trainer.current_epoch}' in train_step_out
# make sure the optimizer closure returns the correct things
opt_closure_result = trainer.train_loop.training_step_and_backward(
batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens)
assert opt_closure_result['loss'] == (42.0 * 3) + (15.0 * 3)
def test_training_step_result_log_step_and_epoch(tmpdir):
"""
Tests that only training_step can be used with TrainResult
Makes sure that things are routed to pbar, loggers and loss accordingly
Makes sure pbar and logs happen on epoch only when requested
"""
# enable internal debugging actions
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_epoch_and_step
model.training_step_end = None
model.training_epoch_end = None
model.val_dataloader = None
epochs = 3
batches = 2
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=batches,
limit_val_batches=batches,
row_log_interval=1,
max_epochs=epochs,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert not model.training_epoch_end_called
# make sure correct metrics are logged (one per batch step as requested)
assert len(trainer.dev_debugger.logged_metrics) == (epochs * batches) + epochs
epoch_metrics = trainer.dev_debugger.logged_metrics
epoch_idx = -1
for i_start in range(0, len(epoch_metrics), batches + 1):
epoch_idx += 1
epoch_outputs = epoch_metrics[i_start: i_start + batches + 1]
mean_vals = {
'epoch_step_epoch_log_and_pbar_acc1': [],
'epoch_step_epoch_log_acc2': []
}
# make sure each batch logged the expected value
for batch_idx in range(len(epoch_outputs) - 1):
logged_metrics = epoch_outputs[batch_idx]
expected_val_1 = (5 + batch_idx) * (epoch_idx + 1)
expected_val_2 = (6 + batch_idx) * (epoch_idx + 1)
mean_vals['epoch_step_epoch_log_and_pbar_acc1'].append(torch.tensor(expected_val_1).float())
mean_vals['epoch_step_epoch_log_acc2'].append(torch.tensor(expected_val_2).float())
assert logged_metrics['step_step_epoch_log_and_pbar_acc1'] == expected_val_1
assert logged_metrics['step_step_epoch_log_acc2'] == expected_val_2
assert 'step_epoch_pbar_acc3' not in logged_metrics
assert len(logged_metrics) == 4
# make sure the metrics for the epoch end are actual means (the default reduce fx) or all the batches
epoch_end_metrics = epoch_outputs[-1]
eval_1 = torch.stack(mean_vals['epoch_step_epoch_log_and_pbar_acc1']).mean()
eval_2 = torch.stack(mean_vals['epoch_step_epoch_log_acc2']).mean()
assert epoch_end_metrics['epoch_step_epoch_log_and_pbar_acc1'] == eval_1
assert epoch_end_metrics['epoch_step_epoch_log_acc2'] == eval_2
assert 'step_epoch_pbar_acc3' not in epoch_end_metrics
assert len(logged_metrics) == 4
# make sure we are using the correct metrics for callbacks
assert trainer.logger_connector.callback_metrics['checkpoint_on'] == 171
# -------------------------------
# VERIFY PBAR METRICS
# -------------------------------
# make sure pbar metrics are correct ang log metrics did not leak
all_pbar_metrics = trainer.dev_debugger.pbar_added_metrics
assert len(all_pbar_metrics) == (epochs * batches) + epochs
epoch_idx = -1
for i_start in range(0, len(all_pbar_metrics), batches + 1):
epoch_idx += 1
epoch_outputs = all_pbar_metrics[i_start: i_start + batches + 1]
mean_vals = {
'epoch_step_epoch_log_and_pbar_acc1': [],
'epoch_step_epoch_pbar_acc3': []
}
# make sure each batch logged the expected value
for batch_idx in range(len(epoch_outputs) - 1):
logged_metrics = epoch_outputs[batch_idx]
expected_val_1 = (5 + batch_idx) * (epoch_idx + 1)
expected_val_2 = (7 + batch_idx) * (epoch_idx + 1)
mean_vals['epoch_step_epoch_log_and_pbar_acc1'].append(torch.tensor(expected_val_1).float())
mean_vals['epoch_step_epoch_pbar_acc3'].append(torch.tensor(expected_val_2).float())
assert logged_metrics['step_step_epoch_log_and_pbar_acc1'] == expected_val_1
assert logged_metrics['step_step_epoch_pbar_acc3'] == expected_val_2
assert 'step_epoch_log_acc2' not in logged_metrics
assert len(logged_metrics) == 3
# make sure the metrics for the epoch end are actual means (the default reduce fx) or all the batches
epoch_end_metrics = epoch_outputs[-1]
eval_1 = torch.stack(mean_vals['epoch_step_epoch_log_and_pbar_acc1']).mean()
eval_2 = torch.stack(mean_vals['epoch_step_epoch_pbar_acc3']).mean()
assert epoch_end_metrics['epoch_step_epoch_log_and_pbar_acc1'] == eval_1
assert epoch_end_metrics['epoch_step_epoch_pbar_acc3'] == eval_2
assert 'step_epoch_log_acc2' not in epoch_end_metrics
assert len(logged_metrics) == 3
# -----------------------------------------
# make sure training outputs what is expected
# -----------------------------------------
for batch_idx, batch in enumerate(model.train_dataloader()):
break
out = trainer.run_training_batch(batch, batch_idx, 0)
assert out.signal == 0
assert len(out.batch_log_metrics) == 2
train_step_out = out.training_step_output_for_epoch_end
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out, TrainResult)
assert 'minimize' in train_step_out
assert 'step_step_epoch_log_and_pbar_acc1' in train_step_out
assert 'step_step_epoch_log_acc2' in train_step_out
assert 'epoch_step_epoch_log_and_pbar_acc1' in train_step_out
assert 'epoch_step_epoch_log_acc2' in train_step_out
# make sure the optimizer closure returns the correct things
opt_closure_result = trainer.train_loop.training_step_and_backward(
batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens)
assert opt_closure_result['loss'] == (42.0 * 3) + (15.0 * 3)
def test_training_step_epoch_end_result(tmpdir):
"""
Makes sure training_step and epoch_end can be used with Results (without batch_end)
"""
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_epoch_and_step
model.training_epoch_end = model.training_epoch_end_return_for_log_epoch_and_step
model.val_dataloader = None
batches = 3
epochs = 1
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
row_log_interval=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert model.training_epoch_end_called
# make sure correct metrics were logged
logged_metrics = trainer.dev_debugger.logged_metrics
assert len(logged_metrics) == (epochs * batches) + epochs
last_logged = logged_metrics[-1]
assert last_logged['epoch_step_epoch_log_and_pbar_acc1'] == 210.0
assert last_logged['epoch_step_epoch_log_acc2'] == 336.0
assert last_logged['epoch_epoch_end_log_acc'] == 1212.0
assert last_logged['epoch_epoch_end_log_pbar_acc'] == 1214.0
assert 'epoch_end_pbar_acc' not in last_logged
# make sure pbar metrics are correct
logged_pbar = trainer.dev_debugger.pbar_added_metrics
assert len(logged_pbar) == (epochs * batches) + epochs
assert trainer.logger_connector.progress_bar_metrics['epoch_step_epoch_log_and_pbar_acc1'] == 210.0
assert trainer.logger_connector.progress_bar_metrics['step_step_epoch_log_and_pbar_acc1'] == 7.0
assert trainer.logger_connector.progress_bar_metrics['epoch_step_epoch_pbar_acc3'] == 504.0
assert trainer.logger_connector.progress_bar_metrics['epoch_epoch_end_pbar_acc'] == 1213.0
assert trainer.logger_connector.progress_bar_metrics['epoch_epoch_end_log_pbar_acc'] == 1214.0
assert 'epoch_end_log_acc' not in trainer.logger_connector.progress_bar_metrics
assert 'log_acc2' not in trainer.logger_connector.progress_bar_metrics
# make sure callback metrics didn't change
assert trainer.logger_connector.callback_metrics['checkpoint_on'] == 171
# -----------------------------------------
# make sure training outputs what is expected
# -----------------------------------------
for batch_idx, batch in enumerate(model.train_dataloader()):
break
out = trainer.run_training_batch(batch, batch_idx, 0)
assert out.signal == 0
assert len(out.batch_log_metrics) == 2
train_step_out = out.training_step_output_for_epoch_end
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out, TrainResult)
assert 'minimize' in train_step_out
assert 'step_step_epoch_log_and_pbar_acc1' in train_step_out
assert 'epoch_step_epoch_log_and_pbar_acc1' in train_step_out
assert 'step_step_epoch_log_acc2' in train_step_out
assert 'epoch_step_epoch_log_acc2' in train_step_out
# make sure the optimizer closure returns the correct things
opt_closure_result = trainer.train_loop.training_step_and_backward(
batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens)
assert opt_closure_result['loss'] == (42.0 * 3) + (15.0 * 3)
def test_no_auto_callbacks_with_train_loop_only(tmpdir):
"""
Make sure early stop + checkpoint work with only a train loop
"""
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_no_default_callbacks_for_train_loop
model.training_epoch_end = None
model.val_dataloader = None
batches = 3
epochs = 3
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
row_log_interval=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
all_losses = trainer.dev_debugger.saved_train_losses
assert len(all_losses) == batches * epochs
assert trainer.checkpoint_callback.monitor == 'checkpoint_on'
assert trainer.early_stop_callback is None
trainer = Trainer(
default_root_dir=tmpdir,
early_stop_callback=True,
max_epochs=epochs,
row_log_interval=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
assert trainer.early_stop_callback.monitor == 'val_loss'
def test_no_callbacks_with_train_loop_only(tmpdir):
"""
Make sure early stop + checkpoint work with only a train loop
"""
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_no_callbacks_result_obj
model.training_epoch_end = None
model.val_dataloader = None
batches = 3
epochs = 3
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
row_log_interval=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
all_losses = trainer.dev_debugger.saved_train_losses
assert len(all_losses) == batches * epochs
assert trainer.early_stop_callback is None
assert len(trainer.dev_debugger.checkpoint_callback_history) == 0
assert len(trainer.dev_debugger.early_stopping_history) == 0
def test_use_callbacks_with_train_loop_only(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_epoch_and_step_for_callbacks
model.training_epoch_end = None
model.val_dataloader = None
batches = 3
epochs = 300
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
early_stop_callback=True,
row_log_interval=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
num_expected_epochs = 10
# ----------------------------------
# VERIFY EARLY STOPPING BEHAVIOR
# ----------------------------------
# with train loop only it happens on every epoch
early_stop_vals = trainer.dev_debugger.early_stopping_history
assert len(early_stop_vals) == num_expected_epochs
min_val = min([x['best'] for x in early_stop_vals])
assert min_val == 171 + 9
all_losses = trainer.dev_debugger.saved_train_losses
from collections import Counter
batch_idxs = Counter([x['batch_idx'] for x in all_losses])
for i, val in batch_idxs.items():
assert val == num_expected_epochs
assert i in [0, 1, 2]
# ----------------------------------
# VERIFY CHECKPOINTING BEHAVIOR
# ----------------------------------
ckpt_vals = trainer.dev_debugger.checkpoint_callback_history
assert len(ckpt_vals) == 5, '5 ckpts should have been saved'
for ckpt_val, expected_epoch in zip(ckpt_vals, [0, 1, 2, 3, 6]):
assert ckpt_val['epoch'] == expected_epoch
assert ckpt_val['monitor'] == 'checkpoint_on'
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_full_train_loop_with_results_obj_dp(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
batches = 10
epochs = 3
model = EvalModelTemplate()
model.validation_step = None
model.test_step = None
model.training_step = model.training_step_full_loop_result_obj_dp
model.training_step_end = model.training_step_end_full_loop_result_obj_dp
model.training_epoch_end = model.training_epoch_end_full_loop_result_obj_dp
model.val_dataloader = None
model.test_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
distributed_backend='dp',
gpus=[0, 1],
max_epochs=epochs,
early_stop_callback=True,
row_log_interval=2,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
# make sure we saw all the correct keys
seen_keys = set()
for metric in trainer.dev_debugger.logged_metrics:
seen_keys.update(metric.keys())
assert 'train_step_metric' in seen_keys
assert 'train_step_end_metric' in seen_keys
assert 'epoch_train_epoch_end_metric' in seen_keys
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_loop_steps_only_dp(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
batches = 10
epochs = 3
model = EvalModelTemplate()
model.validation_step = None
model.test_step = None
model.training_step = model.training_step_result_obj_dp
model.training_step_end = None
model.training_epoch_end = None
model.validation_step = model.validation_step_result_obj_dp
model.validation_step_end = None
model.validation_epoch_end = None
model.test_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
distributed_backend='dp',
gpus=[0, 1],
max_epochs=epochs,
early_stop_callback=True,
row_log_interval=2,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
assert model.training_step_called
assert model.validation_step_called
def test_result_map(tmpdir):
result = TrainResult()
result.log_dict({'x1': torch.tensor(1), 'x2': torch.tensor(2)})
result.rename_keys({'x1': 'y1', 'x2': 'y2'})
assert 'x1' not in result
assert 'x2' not in result
assert 'y1' in result
assert 'y2' in result
def test_result_monitor_warnings(tmpdir):
"""
Tests that we warn when the monitor key is changed and we use Results obj
"""
model = EvalModelTemplate()
model.test_step = None
model.training_step = model.training_step_result_obj
model.training_step_end = None
model.training_epoch_end = None
model.validation_step = model.validation_step_result_obj
model.validation_step_end = None
model.validation_epoch_end = None
model.test_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
early_stop_callback=True,
row_log_interval=2,
limit_train_batches=2,
weights_summary=None,
checkpoint_callback=ModelCheckpoint(monitor='not_val_loss')
)
with pytest.warns(UserWarning, match='key of ModelCheckpoint has no effect'):
trainer.fit(model)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
row_log_interval=2,
limit_train_batches=2,
weights_summary=None,
early_stop_callback=EarlyStopping(monitor='not_val_loss')
)
with pytest.warns(UserWarning, match='key of EarlyStopping has no effect'):
trainer.fit(model)
|
"use strict";
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.Component = void 0;
var react_1 = __importDefault(require("react"));
var Component = /** @class */ (function (_super) {
__extends(Component, _super);
function Component() {
return _super !== null && _super.apply(this, arguments) || this;
}
return Component;
}(react_1.default.Component));
exports.Component = Component;
//# sourceMappingURL=types.js.map
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import subprocess
from pathlib import Path
from typing import List, Optional
import libcst
from typing_extensions import Final
from ..configuration import Configuration
from ..errors import Errors, PartialErrorSuppression
from ..filesystem import (
LocalMode,
add_local_mode,
find_directories,
find_files,
find_targets,
get_filesystem,
remove_non_pyre_ignores,
)
from ..repository import Repository
from .command import CommandArguments, ErrorSuppressingCommand
from .strict_default import StrictDefault
LOG: logging.Logger = logging.getLogger(__name__)
class TargetPyreRemover(libcst.CSTTransformer):
def leave_Call(
self, original_node: libcst.Call, updated_node: libcst.Call
) -> libcst.Call:
check_types = False
uses_pyre = True
updated_fields = []
for field in original_node.args:
name = field.keyword
value = field.value
if not name:
continue
name = name.value
if name == "check_types":
if isinstance(value, libcst.Name):
check_types = check_types or value.value.lower() == "true"
elif name == "check_types_options":
if isinstance(value, libcst.SimpleString):
uses_pyre = uses_pyre and "mypy" not in value.value.lower()
elif name not in ["typing", "typing_options"]:
updated_fields.append(field)
if check_types and uses_pyre:
return updated_node.with_changes(args=updated_fields)
return updated_node
class TargetsToConfiguration(ErrorSuppressingCommand):
def __init__(
self,
command_arguments: CommandArguments,
*,
repository: Repository,
subdirectory: Optional[str],
glob: int,
fixme_threshold: int,
no_commit: bool,
submit: bool,
pyre_only: bool,
strict: bool,
) -> None:
super().__init__(command_arguments, repository)
self._subdirectory: Final[Optional[str]] = subdirectory
self._glob: int = glob
self._fixme_threshold: int = fixme_threshold
self._no_commit: bool = no_commit
self._submit: bool = submit
self._pyre_only: bool = pyre_only
self._strict: bool = strict
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "TargetsToConfiguration":
command_arguments = CommandArguments.from_arguments(arguments)
return TargetsToConfiguration(
command_arguments,
repository=repository,
subdirectory=arguments.subdirectory,
glob=arguments.glob,
fixme_threshold=arguments.fixme_threshold,
no_commit=arguments.no_commit,
submit=arguments.submit,
pyre_only=arguments.pyre_only,
strict=arguments.strict,
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(TargetsToConfiguration, cls).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument(
"--subdirectory", help="Only upgrade TARGETS files within this directory."
)
parser.add_argument(
"--glob",
type=int,
help="Use a toplevel glob target instead of listing individual targets. \
Fall back to individual targets if errors per file ever hits given \
threshold.",
)
parser.add_argument(
"--fixme-threshold",
type=int,
help="Ignore all errors in a file if fixme count exceeds threshold.",
)
parser.add_argument(
"--strict",
action="store_true",
help="Turn on default strict mode if any targets were strict.",
)
parser.add_argument(
"--pyre-only",
action="store_true",
help="Only convert pyre targets to configuration.",
)
parser.add_argument(
"--no-commit", action="store_true", help="Keep changes in working state."
)
parser.add_argument("--submit", action="store_true", help=argparse.SUPPRESS)
def remove_target_typing_fields(self, files: List[Path]) -> None:
LOG.info("Removing typing options from %s targets files", len(files))
if self._pyre_only and not self._glob:
for path in files:
targets_file = Path(path)
source = targets_file.read_text()
output = libcst.parse_module(source).visit(TargetPyreRemover()).code
targets_file.write_text(output)
else:
typing_options_regex = [
r"typing \?=.*",
r"check_types \?=.*",
r"check_types_options \?=.*",
r"typing_options \?=.*",
]
remove_typing_fields_command = [
"sed",
"-i",
"/" + r"\|".join(typing_options_regex) + "/d",
] + [str(file) for file in files]
subprocess.run(remove_typing_fields_command)
def convert_directory(self, directory: Path) -> None:
all_targets = find_targets(directory, pyre_only=self._pyre_only)
if not all_targets:
LOG.warning("No configuration created because no targets found.")
return
if self._glob:
new_targets = ["//" + str(directory) + "/..."]
targets_files = [
directory / path
for path in get_filesystem().list(
str(directory), patterns=[r"**/TARGETS"]
)
]
else:
new_targets = []
targets_files = []
for path, targets in all_targets.items():
targets_files.append(Path(path))
new_targets += [
"//" + path.replace("/TARGETS", "") + ":" + target.name
for target in targets
]
apply_strict = self._strict and any(target.strict for target in targets)
configuration_path = directory / ".pyre_configuration.local"
if configuration_path.exists():
LOG.warning(
"Pyre project already exists at %s.\n\
Amending targets to existing configuration.",
configuration_path,
)
configuration = Configuration(configuration_path)
configuration.add_targets(new_targets)
configuration.deduplicate_targets()
configuration.write()
else:
LOG.info("Creating local configuration at %s.", configuration_path)
configuration_contents = {"targets": new_targets}
configuration = Configuration(configuration_path, configuration_contents)
configuration.write()
# Add newly created configuration files to version control
self._repository.add_paths([configuration_path])
# Remove all type-related target settings
self.remove_target_typing_fields(targets_files)
if not self._pyre_only:
remove_non_pyre_ignores(directory)
all_errors = configuration.get_errors()
error_threshold = self._fixme_threshold
glob_threshold = self._glob
for path, errors in all_errors:
errors = list(errors)
error_count = len(errors)
if glob_threshold and error_count > glob_threshold:
# Fall back to non-glob codemod.
LOG.info(
"Exceeding error threshold of %d; falling back to listing "
"individual targets.",
glob_threshold,
)
self._repository.revert_all(remove_untracked=True)
self._glob = None
return self.run()
if error_threshold and error_count > error_threshold:
LOG.info(
"%d errors found in `%s`. Adding file-level ignore.",
error_count,
path,
)
add_local_mode(path, LocalMode.IGNORE)
else:
try:
self._suppress_errors(Errors(errors))
except PartialErrorSuppression:
LOG.warning(f"Could not suppress all errors in {path}")
LOG.info("Run with --unsafe to force suppression anyway.")
self._repository.revert_all(remove_untracked=True)
if apply_strict:
LOG.info(
"Some targets were running strict type checking. "
"Adding strict setting to configuration."
)
strict_codemod = StrictDefault(
self._command_arguments,
repository=self._repository,
local_configuration=directory,
remove_strict_headers=True,
fixme_threshold=0,
)
strict_codemod.run()
# Lint and re-run pyre once to resolve most formatting issues
if self._lint:
if self._repository.format():
errors = configuration.get_errors(should_clean=False)
try:
self._suppress_errors(errors)
except PartialErrorSuppression:
LOG.warning(f"Could not suppress all errors in {path}")
LOG.info("Run with --unsafe to force suppression anyway.")
self._repository.revert_all(remove_untracked=True)
def run(self) -> None:
# TODO(T62926437): Basic integration testing.
subdirectory = self._subdirectory
subdirectory = Path(subdirectory) if subdirectory else Path.cwd()
LOG.info(
"Converting typecheck targets to pyre configurations in `%s`", subdirectory
)
configuration_directories = self._gather_directories(subdirectory)
converted = []
for directory in configuration_directories:
if all(
str(directory).startswith(str(converted_directory)) is False
for converted_directory in converted
):
self.convert_directory(directory)
converted.append(directory)
summary = self._repository.MIGRATION_SUMMARY
glob = self._glob
if glob:
summary += (
f"\n\nConfiguration target automatically expanded to include "
f"all subtargets, expanding type coverage while introducing "
f"no more than {glob} fixmes per file."
)
title = f"Convert type check targets in {subdirectory} to use configuration"
self._repository.submit_changes(
commit=(not self._no_commit),
submit=self._submit,
title=title,
summary=summary,
set_dependencies=False,
)
def _gather_directories(self, subdirectory: Path) -> List[Path]:
configurations = find_files(subdirectory, ".pyre_configuration.local")
configuration_directories = [
configuration.replace("/.pyre_configuration.local", "")
for configuration in configurations
]
sorted_directories = sorted(
(directory.split("/") for directory in configuration_directories),
# pyre-fixme[6]: Expected `(_T) -> _SupportsLessThan` for 2nd param but
# got `(directory: Any) -> Tuple[int, typing.Any]`.
key=lambda directory: (len(directory), directory),
)
if len(configuration_directories) == 0:
configuration_directories = [str(subdirectory)]
else:
# Fill in missing coverage
missing_directories = []
current_depth = len(str(subdirectory).split("/"))
for directory in sorted_directories:
if len(directory) <= current_depth:
continue
all_subdirectories = find_directories(
Path("/".join(directory[0:current_depth]))
)
for subdirectory in all_subdirectories:
if all(
not configuration_directory.startswith(str(subdirectory))
for configuration_directory in configuration_directories
):
missing_directories.append(subdirectory)
current_depth += 1
configuration_directories.extend(missing_directories)
return [Path(directory) for directory in configuration_directories]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
# pylint: disable=import-outside-toplevel
"""ONNX: Open Neural Network Exchange frontend for Relay."""
import copy
import warnings
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import qnn as _qnn
from .. import vision as _vision
from .. import loops as _loops
from .. import ty as _ty
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_shape, infer_channels, infer_value, fold_constant
from .common import infer_type, get_name
__all__ = ["from_onnx"]
class onnx_input:
""" Dual purpose list or dictionary access object."""
def __init__(self):
self.input_keys = []
self.input_dict = {}
def __getitem__(self, item):
if isinstance(item, int):
if item > (len(self.input_keys) - 1):
return None
return self.input_dict[self.input_keys[item]]
if isinstance(item, str):
if item not in self.input_keys:
return None
return self.input_dict[item]
if isinstance(item, slice):
keys = self.input_keys[item]
return [self.input_dict[key] for key in keys]
raise ValueError("Only integer, string, and slice accesses allowed.")
def __setitem__(self, item, value):
if isinstance(item, int):
self.input_dict[self.input_keys[item]] = value
elif isinstance(item, str):
self.input_keys.append(item)
self.input_dict[item] = value
else:
raise ValueError("Only integer and string indexed writes allowed.")
def keys(self):
return self.input_keys
def __len__(self):
return len(self.input_keys)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self.input_keys):
output = self.input_dict[self.input_keys[self.n]]
self.n += 1
return output
raise StopIteration
def get_numpy(tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError as e:
raise ImportError("Unable to import onnx which is required {}".format(e))
return to_array(tensor_proto)
def get_type(elem_type):
"""Converts onnx integer datatype to numpy datatype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError as e:
raise ImportError("Unable to import onnx which is required {}".format(e))
return str(TENSOR_TYPE_TO_NP_TYPE[elem_type])
def get_info(info_proto):
"""Extract the shape from a ValueInfoProto."""
shape = []
shape_name = []
for dim in info_proto.type.tensor_type.shape.dim:
name = dim.dim_param
value = dim.dim_value
if value is None or value == 0:
value = _ty.Any()
shape_name.append(name)
else:
shape_name.append(value)
shape.append(value)
name = info_proto.name
dtype = get_type(info_proto.type.tensor_type.elem_type)
return name, shape, dtype, shape_name
def dimension_picker(prefix, suffix=""):
"""Check that dimensions are supported."""
def _impl(attr):
kernel = attr["kernel_shape"]
if len(kernel) == 1:
return prefix + "1d" + suffix
if len(kernel) == 2:
return prefix + "2d" + suffix
if len(kernel) == 3:
return prefix + "3d" + suffix
msg = "Only 1D, 2D, and 3D kernels are supported for operator {}."
op_name = prefix + "1d/2d/3d"
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
return _impl
def revert_caffe2_pad(pads):
"""Caffe2 requires two times the normal padding."""
if len(pads) == 4:
pads = pads[:2]
elif len(pads) == 2:
pass
else:
raise tvm.error.OpAttributeInvalid("Number of pads must be either 2 or 4.")
return pads
def get_pad_pair(input1d, kernel1d, stride1d, mode):
"""infer pad size"""
if input1d % stride1d == 0:
pad = max(kernel1d - stride1d, 0)
else:
pad = max(kernel1d - (input1d % stride1d), 0)
pad_before = pad // 2
pad_after = pad - pad_before
if "LOWER" in mode:
return [pad_after, pad_before]
return [pad_before, pad_after]
def onnx_default_layout(dims, op_name):
if dims == 1:
return "NCW"
if dims == 2:
return "NCHW"
if dims == 3:
return "NCDHW"
msg = "Only 1D, 2D and 3D layouts are currently supported for operator {}."
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
def onnx_storage_order2layout(storage_order, dims, op_name):
"""converter of onnx storage order parameter to tvm storage order format"""
if storage_order not in (0, 1):
raise tvm.error.OpAttributeInvalid("Mode of storage_order must be either 0 or 1")
if dims == 1:
return "NCW" if storage_order == 0 else "NWC"
if dims == 2:
return "NCHW" if storage_order == 0 else "NHWC"
if dims == 3:
return "NCDHW" if storage_order == 0 else "NDHWC"
msg = "Only 1D, 2D and 3D layouts are currently supported for operator {}."
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
def dimension_constraint():
def _dim_check(attrs):
if len(attrs["kernel_shape"]) in [1, 2, 3]:
return True
return False
return _dim_check, "Only 1d, 2d and 3d kernel supported."
class OnnxOpConverter(object):
"""A helper class for holding onnx op converters."""
@classmethod
def get_converter(cls, opset):
"""Get converter matches given opset.
Parameters
----------
opset: int
opset from model.
Returns
-------
converter, which should be `_impl_vx`. Number x is the biggest
number smaller than or equal to opset belongs to all support versions.
"""
versions = [int(d.replace("_impl_v", "")) for d in dir(cls) if "_impl_v" in d]
versions = sorted(versions + [opset])
version = versions[max([i for i, v in enumerate(versions) if v == opset]) - 1]
if hasattr(cls, "_impl_v{}".format(version)):
return getattr(cls, "_impl_v{}".format(version))
raise NotImplementedError(
"opset version {} of {} not implemented".format(version, cls.__name__)
)
class Unary(OnnxOpConverter):
"""A helper class for unary op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 1, "Unary math op {} takes 1 input, {} given".format(
cls.name, len(inputs)
)
op_name = cls.name
return get_relay_op(op_name)(*inputs)
class Elemwise(OnnxOpConverter):
"""A helper class for elemwise op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Math op {} take 2 inputs, {} given".format(cls.name, len(inputs))
op_name = cls.name
conv_ops = ["conv2d", "conv2d_transpose"]
if attr.get("broadcast", 0) and any(x in str(inputs[0]) for x in conv_ops):
# TODO(zhreshold): remove hard coded infershape
axis = int(attr.get("axis", 0))
inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_relay_op(op_name)(*inputs)
class Pool(OnnxOpConverter):
"""A helper class for pool op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
if cls.name == "avg_pool":
pad_tuple = []
for axis in range(len(input_shape) - 2):
axis_shape = input_shape[2 + axis]
stride = attr.get("strides", [1] * ndim)[axis]
kernel = attr["kernel_shape"][axis]
pad = get_pad_pair(axis_shape, kernel, stride, attr["auto_pad"])
pad_tuple.append(pad)
pad_tuple = tuple([val for pair in zip(*pad_tuple) for val in pair])
attr["pads"] = pad_tuple
else:
# Warning: Pool does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(data, attr["strides"], attr["kernel_shape"], [1] * ndim, ndim)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator {} is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"], cls.name))
attr.pop("auto_pad")
if "storage_order" in attr:
attr["layout"] = onnx_storage_order2layout(
attr["storage_order"], dims=(len(input_shape) - 2), op_name=cls.name
)
else:
attr["layout"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name=cls.name)
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={"kernel_shape": "pool_size", "pads": ("padding", 0)},
ignores=["dilations", "storage_order"],
custom_check=dimension_constraint(),
)([data], attr, params)
class Absolute(Unary):
"""Operator converter for Absolute."""
name = "abs"
class Add(Elemwise):
"""Operator converter for Add."""
name = "add"
class AveragePool(Pool):
"""Operator converter for AveragePool."""
name = "avg_pool"
class BatchNorm(OnnxOpConverter):
"""Operator converter for BatchNorm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# TODO(zhreshold): 'spatial' is not properly handled here.
out = AttrCvt(
op_name="batch_norm", ignores=["spatial", "is_test", "consumed_inputs", "momentum"]
)(inputs, attr, params)
return out[0]
class InstanceNorm(OnnxOpConverter):
"""Operator converter for BatchNorm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name="instance_norm")(inputs, attr, params)
def autopad(data, strides, kernel_shape, dilations, ndim, pad_type="constant", deconv=False):
"""
Perform autopadding with dynamic input shapes
"""
# get attributes as constants
strides = _op.const(np.array(strides), dtype="int64")
dilated_kernel_shape = _op.const(
np.array(
[(kernel - 1) * dilation + 1 for kernel, dilation in zip(kernel_shape, dilations)]
),
dtype="int64",
)
# get input shape
shape = _op.strided_slice(shape_of(data, dtype="int64"), [2], [ndim])
# set up integer constants
zero = _op.const(0, dtype="int64")
one = _op.const(1, dtype="int64")
two = _op.const(2, dtype="int64")
# Calculate total padding
mod = _op.mod(shape, strides)
left = _op.maximum(dilated_kernel_shape - strides, zero)
right = _op.maximum(dilated_kernel_shape - mod, zero)
total_pad = _op.where(_op.equal(mod, zero), left, right)
if deconv:
total_pad = _op.const(np.array(kernel_shape), dtype="int64") - one - total_pad
# split total padding into before and after
pad_before = _op.floor_divide(total_pad, two)
pad_after = total_pad - pad_before
# combine
pad = _op.concatenate(
[_op.reshape(pad_before, [-1, 1]), _op.reshape(pad_after, [-1, 1])], axis=1
)
# pad N and C with zeros
pad = _op.concatenate([_op.const(np.zeros([2, 2], dtype="int64"), dtype="int64"), pad], axis=0)
return _op.nn.pad(data, fold_constant(pad), _op.const(0.0), pad_type)
class Conv(OnnxOpConverter):
"""Operator converter for Conv."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Use shape of input to determine convolution type.
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
kernel_type = infer_type(inputs[1])
kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shapes[0][2:]
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
ndim,
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
attr.pop("auto_pad")
out = AttrCvt(
op_name=dimension_picker("conv"),
transforms={
"kernel_shape": "kernel_size",
"dilations": ("dilation", 1),
"pads": ("padding", 0),
"group": ("groups", 1),
},
custom_check=dimension_constraint(),
)([data, inputs[1]], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class ConvTranspose(OnnxOpConverter):
"""Operator converter for ConvTranspose."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# get number of channels
out_type = infer_type(inputs[1])
out_shapes = [get_const_tuple(out_type.checked_type.shape)]
channels = out_shapes[0][1]
attr["channels"] = channels
groups = attr.get("group", 1)
if "kernel_shape" not in attr:
attr["kernel_shape"] = out_shapes[0][2:]
attr["groups"] = groups
# infer pads for auto_pad
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
ndim,
deconv=True,
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
attr.pop("auto_pad")
out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
transforms={
"kernel_shape": "kernel_size",
"dilations": ("dilation", 1),
"pads": ("padding", 0),
"group": ("groups", 1),
},
disables=["output_shape"],
custom_check=dimension_constraint(),
)([data, inputs[1]], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class GlobalAveragePool(OnnxOpConverter):
"""Operator converter for GlobalAveragePool"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
rank = len(infer_shape(inputs[0]))
if rank == 3:
return _op.nn.global_avg_pool1d(inputs[0])
if rank == 4:
return _op.nn.global_avg_pool2d(inputs[0])
if rank == 5:
return _op.nn.global_avg_pool3d(inputs[0])
raise NotImplementedError(
"Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2),
)
class GlobalMaxPool(OnnxOpConverter):
"""Operator converter for GlobalMaxPool"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
rank = len(infer_shape(inputs[0]))
if rank == 3:
return _op.nn.global_max_pool1d(inputs[0])
if rank == 4:
return _op.nn.global_max_pool2d(inputs[0])
if rank == 5:
return _op.nn.global_max_pool3d(inputs[0])
raise NotImplementedError(
"Global max pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2),
)
class Div(Elemwise):
"""Operator converter for Divide."""
name = "divide"
class Elu(OnnxOpConverter):
"""Operator converter for Elu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.0))
return _expr.const(-alpha) * _op.nn.relu(
_expr.const(1.0) - _op.exp(inputs[0])
) + _op.nn.relu(inputs[0])
class Gemm(OnnxOpConverter):
"""Operator converter for Gemm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 3 or len(inputs) == 2, "Gemm op take 2 or 3 inputs, {} given".format(
len(inputs)
)
# Y = alpha * A * B + beta * C
alpha = float(attr.get("alpha", 1.0))
beta = float(attr.get("beta", 1.0))
transA = int(attr.get("transA", 0))
transB = int(attr.get("transB", 0))
# get number of channels
channels = infer_channels(inputs[1], not transB)
if transA:
inputs[0] = _op.transpose(inputs[0], axes=(1, 0))
if not transB:
inputs[1] = _op.transpose(inputs[1], axes=(1, 0))
inputs[0] = _op.nn.batch_flatten(inputs[0])
if alpha != 1.0:
inputs[0] *= _expr.const(alpha)
out = _op.nn.dense(inputs[0], inputs[1], units=channels)
if len(inputs) == 3:
out = out + _expr.const(beta) * inputs[2]
return out
class MatMul(OnnxOpConverter):
"""Operator converter for MatMul."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "MatMul op take 2 inputs, {} given".format(len(inputs))
# Need to check input shape as batch matmul must be supported.
a_shape = shape_of(inputs[0])
a_rank = infer_shape(a_shape)[0]
b_shape = shape_of(inputs[1])
b_rank = infer_shape(b_shape)[0]
# When performing a batch matmul, we need to properly handle N-dim shapes.
if a_rank > 2 or b_rank > 2:
def flatten_to_3d(x, x_shape):
ndims = infer_shape(x_shape)[0]
newshape = _op.concatenate(
[
_expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),
_op.strided_slice(x_shape, [ndims - 2], [ndims]),
],
0,
)
out = _op.reshape(x, fold_constant(newshape))
return out
# Convert a and b into 3 dimensional tensors.
a = flatten_to_3d(inputs[0], a_shape)
b = flatten_to_3d(inputs[1], b_shape)
# Transpose matrix dimensions of b.
b = _op.transpose(b, [0, 2, 1])
# Perform a batch matmul.
output = _op.nn.batch_matmul(a, b)
# Determine the output batch dimension.
if a_rank > b_rank:
out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])
elif a_rank < b_rank:
out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])
# If its unclear how broadcasting should be applied, the output
# shape is determined by choosing the maximum value from each input.
else:
out_batch = _op.concatenate(
[
_op.maximum(
_op.strided_slice(a_shape, [i], [i + 1]),
_op.strided_slice(b_shape, [i], [i + 1]),
)
for i in range(a_rank - 2)
],
0,
)
# Reshape output to original dimensions.
final_shape = _op.concatenate(
[
out_batch,
_op.strided_slice(
a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]
),
_op.strided_slice(
b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]
),
],
0,
)
return _op.reshape(output, fold_constant(final_shape))
# Otherwise a simple dense op will get the job done.
input_1_t = _op.transpose(inputs[1], axes=(1, 0))
return _op.nn.dense(inputs[0], input_1_t)
class Mod(OnnxOpConverter):
"""Operator converter for Mod."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Mod op take 2 inputs, {} given".format(len(inputs))
# Note: attr['fmod'] determines whether the operator should behave like np.fmod or np.mod.
# attr['fmod'] == 0 will behave as np.mod and attr['fmod'] == 1 will force fmod treatment.
# The relay equivalent of np.fmod is relay.mod and np.mod is relay.floor_mod
if attr.get("fmod", 0) == 0:
op_name = "floor_mod"
else:
op_name = "mod"
return AttrCvt(op_name)(inputs, {}, params)
class MaxPool(Pool):
"""Operator converter for MaxPool"""
name = "max_pool"
class MaxUnpool(OnnxOpConverter):
"""Operator converter for MaxUnpool"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Unpack inputs and attributes
data = inputs[0]
data_type = infer_type(data).checked_type.dtype
indices = inputs[1]
output_shape = inputs[2]
kernel_shape = attr.get("kernel_shape")
pads = attr.get("pads", None)
strides = attr.get("strides", [1] * len(kernel_shape))
# Compute the proper output shape before padding.
multiplier = _op.concatenate(
[_expr.const([1, 1], dtype="int64"), _expr.const(list(strides), dtype="int64")], axis=0
)
total_output_shape = multiplier * shape_of(data, dtype="int64")
# Add extra dimensions from kernel size and stride mismatch
total_output_shape += _op.concatenate(
[_expr.const([0, 0], "int64"), _expr.const(list(kernel_shape), "int64")], axis=0
) - _op.concatenate(
[_expr.const([0, 0], "int64"), _expr.const(list(strides), "int64")], axis=0
)
# Compute padding amount if output shape is specified.
if output_shape is not None:
total_output_shape = output_shape
elif pads is not None:
# Get pads in the proper format for relay.
pads = _op.concatenate(
[_expr.const([0, 0, 0, 0], "int64"), _expr.const(list(pads), "int64")], axis=0
)
pads = _op.reshape(pads, [-1, 2])
# Compute the total padding per axis.
total_pad = _op.sum(pads, axis=-1)
# Reversing maxpool means that padding actually makes our output smaller.
total_output_shape = total_output_shape - total_pad
# Create a tensor of zeros then scatter our data through it.
zeros_tensor = _op.zeros(total_output_shape, data_type)
# We need to flatten all our tensors before scattering.
flat_tensor = _op.scatter(
_op.reshape(zeros_tensor, [-1]),
_op.reshape(indices, [-1]),
_op.reshape(data, [-1]),
axis=0,
)
# Now reshape back to prepadded shape.
output_tensor = _op.reshape(flat_tensor, total_output_shape)
return output_tensor
class LpPool(OnnxOpConverter):
"""A helper class for lppool op converters."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = infer_type(inputs[0]).checked_type.dtype
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: LpPool does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(data, attr["strides"], attr["kernel_shape"], [1] * ndim, ndim)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator {} is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"], "LpPool"))
attr.pop("auto_pad")
if "storage_order" in attr:
attr["layout"] = onnx_storage_order2layout(
attr["storage_order"], dims=(len(input_shape) - 2), op_name="LpPool"
)
else:
attr["layout"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name="LpPool")
p = _expr.const(attr["p"], dtype)
reci_p = _expr.const(1.0 / attr["p"], dtype)
data = _op.power(data, p)
out = AttrCvt(
op_name=dimension_picker("avg_pool"),
transforms={"kernel_shape": "pool_size", "pads": ("padding", 0)},
extras={"count_include_pad": True},
ignores=["p"],
custom_check=dimension_constraint(),
)([data], attr, params)
kernels = attr["kernel_shape"]
out = _op.abs(out) * _expr.const(np.prod(kernels).astype(dtype))
return _op.power(out, reci_p)
class Mul(Elemwise):
"""Operator converter for Multiply."""
name = "multiply"
class Pad(OnnxOpConverter):
"""Operator converter for Pad."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
pad_width = []
pads = attr.pop("paddings")
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i + dims]))
attr["pad_width"] = pad_width
pad_mode = attr.get("mode", b"constant").decode("utf-8")
if pad_mode in ["constant", "edge", "reflect"]:
attr["pad_mode"] = pad_mode
attr.pop("mode", None)
else:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)
return AttrCvt(
_op.nn.pad,
transforms={
"value": "pad_value",
},
)(inputs, attr, params)
@classmethod
def _impl_v2(cls, inputs, attr, params):
pad_width = []
pads = attr.pop("pads")
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i + dims]))
attr["pad_width"] = pad_width
pad_mode = attr.get("mode", b"constant").decode("utf-8")
if pad_mode in ["constant", "edge", "reflect"]:
attr["pad_mode"] = pad_mode
attr.pop("mode", None)
else:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)
return AttrCvt(
"pad",
transforms={
"value": "pad_value",
},
)(inputs, attr, params)
@classmethod
def _impl_v11(cls, inputs, attr, params):
pads = inputs[1]
if len(inputs) == 3:
value = fold_constant(_op.take(inputs[2], _op.const(0)))
else:
value = 0
pad_width_expr = fold_constant(_op.transpose(_op.reshape(pads, (2, -1))))
pad_mode = attr.get("mode", b"constant").decode("utf-8")
if not pad_mode in ["constant", "edge", "reflect"]:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)
return _op.nn.pad(inputs[0], pad_width_expr, value, pad_mode=pad_mode)
class ParametricSoftPlus(OnnxOpConverter):
"""Operator converter for ParametricSoftPlus."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = _expr.const(float(attr.get("alpha", 1.0)))
beta = _expr.const(float(attr.get("beta", 1.0)))
return _op.log(_op.exp(beta * inputs[0]) + _expr.const(1.0)) * alpha
class Prelu(OnnxOpConverter):
"""Operator converter for Prelu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Prelu need 2 inputs, {} given".format(len(inputs))
input_shape = shape_of(inputs[0])
alpha = _op.broadcast_to_like(inputs[1], inputs[0])
alpha = _op.reshape(alpha, [-1])
output = _op.nn.prelu(_op.reshape(inputs[0], [-1]), alpha, axis=0)
return _op.reshape(output, input_shape)
class Reciprocal(OnnxOpConverter):
"""Operator converter for Reciprocal."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = infer_type(inputs[0]).checked_type.dtype
return _expr.const(1.0, dtype=dtype) / inputs[0]
class Flatten(OnnxOpConverter):
"""Operator converter for Flatten."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
ishape = _op.shape_of(inputs[0])
ndim = infer_shape(ishape)[0]
if axis < 0:
axis = axis + ndim
if axis == 1:
out = _op.nn.batch_flatten(inputs[0])
else:
pre_shape = _op.prod(_op.strided_slice(ishape, [0], [axis], [1]), keepdims=True)
post_shape = _op.prod(_op.strided_slice(ishape, [axis], [ndim], [1]), keepdims=True)
newshape = _op.concatenate([pre_shape, post_shape], axis=0)
out = _op.reshape(inputs[0], newshape)
return out
class Reshape(OnnxOpConverter):
"""Operator converter for Reshape."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.reshape(inputs[0], attr["shape"])
@classmethod
def _impl_v5(cls, inputs, attr, params):
if get_name(inputs[1]) in params:
shape = tuple(params[inputs[1].name_hint].asnumpy().astype("int32"))
out = _op.reshape(inputs[0], shape)
else:
out = _op.reshape(*inputs)
return out
class DepthToSpace(OnnxOpConverter):
"""Operator converter for DepthToSpace."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
block_size = int(attr["blocksize"])
mode = attr.get("mode", b"DCR").decode("utf-8")
return _op.nn.depth_to_space(inputs[0], block_size, mode=mode)
class SpaceToDepth(OnnxOpConverter):
"""Operator converter for SpaceToDepth."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
block_size = int(attr["blocksize"])
return _op.nn.space_to_depth(inputs[0], block_size)
class Concat(OnnxOpConverter):
"""Operator converter for Concat."""
@classmethod
def _impl_v1(cls, inputs, args, params):
return AttrCvt(op_name="concatenate")((inputs,), args)
class Scale(OnnxOpConverter):
"""Operator converter for Scale."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
scale = float(attr.get("scale", 1.0))
return inputs[0] * _expr.const(scale)
class Selu(OnnxOpConverter):
"""Operator converter for Selu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.67326319217681884765625))
gamma = float(attr.get("gamma", 1.05070102214813232421875))
return _expr.const(gamma) * (
_expr.const(-alpha) * _op.nn.relu(_expr.const(1.0) - _op.exp(inputs[0]))
+ _op.nn.relu(inputs[0])
)
class ScaledTanh(OnnxOpConverter):
"""Operator converter for ScaledTanh."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.0))
beta = float(attr.get("beta", 1.0))
return _op.tanh(_expr.const(beta) * inputs[0]) * _expr.const(alpha)
class Shrink(OnnxOpConverter):
"""Operator converter for Shrink."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
x = inputs[0]
dtype = infer_type(x).checked_type.dtype
lambd = _op.const(attr.get("lambd", 0.5), dtype=dtype)
bias = _op.const(attr.get("bias", 0.0), dtype=dtype)
zeros = _op.zeros_like(x)
return _op.where(x < -lambd, x + bias, zeros) + _op.where(x > lambd, x - bias, zeros)
class Softsign(OnnxOpConverter):
"""Operator converter for Softsign."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return inputs[0] / (_expr.const(1.0) + Absolute.get_converter(1)(inputs, attr, params))
class Sub(Elemwise):
"""Operator converter for Subtract."""
name = "subtract"
class Sum(OnnxOpConverter):
"""Operator converter for Sum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Onnx Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])
return inputs[len(inputs) - 1]
class Affine(OnnxOpConverter):
"""Operator converter for Affine transformation."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = _expr.const(attr.get("alpha", 1.0))
beta = _expr.const(attr.get("beta", 0.0))
return (alpha * inputs[0]) + beta
class ThresholdedRelu(OnnxOpConverter):
"""Operator converter for ThresholdedRelu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.0))
alpha_tensor = _op.full_like(inputs[0], fill_value=_expr.const(alpha))
mask = _op.greater(inputs[0], alpha_tensor).astype("float32")
return inputs[0] * mask
def _broadcast_constraint():
def _broadcast_check(attrs):
if attrs.get("axis", None):
return False
return True
return _broadcast_check, "Specifying broadcast axis not allowed."
def _fully_connected(opset):
def _impl(inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], params)
attr["units"] = channels
return AttrCvt("dense", ignores=["axis", "axis_w"])(inputs, attr)
return _impl
class Upsample(OnnxOpConverter):
"""Operator converter for Upsample (nearest mode)."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
scales = attr.get("scales")
input_shape = infer_shape(inputs[0])
dims = len(input_shape)
if not scales:
# Here we are going to higher OPSET version.
assert len(inputs) == 2, "Upsample op takes 2 inputs, {} given".format(len(inputs))
if get_name(inputs[1]) in params:
scales = params[inputs[1].name_hint].asnumpy()
else:
scales = inputs[1]
if isinstance(scales, _expr.Constant):
scales = list(scales.data.asnumpy())
if not isinstance(scales, _expr.Expr):
assert scales[0] == 1.0 and scales[1] == 1.0
mode = attr.get("mode")
if mode == b"nearest":
method = "nearest_neighbor"
elif mode == b"linear":
method = "trilinear" if dims == 5 else "bilinear"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode)
)
# in 3d case, we use the purely static op
if dims == 5:
if isinstance(scales, _expr.Expr):
scale_h = _op.take(scales, _op.const(3))
scale_w = _op.take(scales, _op.const(4))
scale_d = _op.take(scales, _op.const(1))
else:
assert len(scales) == 5
scale_h = scales[-2]
scale_w = scales[-1]
scale_d = scales[-3]
layout = "NCDHW"
out = _op.nn.upsampling3d(
inputs[0], scale_d, scale_h, scale_w, layout=layout, method=method
)
# in 2d case, use dynamic op
else:
if isinstance(scales, _expr.Expr):
scale_h = _op.take(scales, _op.const(3))
scale_w = _op.take(scales, _op.const(4))
else:
assert len(scales) == 4
scale_h = scales[-2]
scale_w = scales[-1]
layout = "NCHW"
out = _op.nn.upsampling(
inputs[0],
scale_h,
scale_w,
layout=layout,
method=method,
align_corners=False,
)
return out
def shape_of(x, dtype="int64"):
ttype = infer_type(x).checked_type
if not _ty.is_dynamic(ttype):
shape = list(ttype.shape)
return _expr.const(shape, dtype)
return _op.shape_of(x, dtype)
class Shape(OnnxOpConverter):
"""Operator converter for Shape."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return shape_of(inputs[0], "int64")
class CumSum(OnnxOpConverter):
"""Operator converter for CumSum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
dim = inputs[1]
if dim is not None:
dim = int(infer_value(dim, params).asnumpy())
exclusive = attr.get("exclusive", 0)
reverse = attr.get("reverse", 0)
if reverse != 0:
out = _op.reverse(data, axis=dim)
out = _op.cumsum(out, axis=dim, exclusive=exclusive)
return _op.reverse(out, axis=dim)
return _op.cumsum(data, axis=dim, exclusive=exclusive)
class Cast(OnnxOpConverter):
"""Operator converter for Cast."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name="cast", transforms={"to": "dtype"})(inputs, attr)
@classmethod
def _impl_v5(cls, inputs, attr, params):
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
attr["to"] = str(TENSOR_TYPE_TO_NP_TYPE[attr["to"]])
except ImportError as e:
raise ImportError("Unable to import onnx.mapping which is required {}".format(e))
return AttrCvt(op_name="cast", transforms={"to": "dtype"})(inputs, attr)
class Unsqueeze(OnnxOpConverter):
"""Operator converter for Unsqueeze."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axes = sorted(attr["axes"])
for axis in axes:
inputs[0] = _op.expand_dims(inputs[0], axis=axis, num_newaxis=1)
return inputs[0]
class Split(OnnxOpConverter):
"""Operator converter for Split."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
splits = attr.get("split", None)
if splits is not None:
indices = []
attr["indices_or_sections"] = []
index = 0
for i in splits[:-1]:
index += i
indices.append(index)
# When splits isnt specified divide evenly over axis.
else:
indices = attr["tvm_custom"]["num_outputs"]
output = _op.split(inputs[0], indices, attr.get("axis", 0))
# If the output of split is a single value, unpack if from the TupleWrapper
if len(output) == 1:
output = output[0]
return output
class Slice(OnnxOpConverter):
"""Operator converter for Slice."""
@classmethod
def _common(cls, starts, ends, axes):
new_axes = []
new_starts = []
new_ends = []
pop_index = 0
for i in range(max(axes) + 1):
if i in axes:
new_axes.append(i)
new_starts.append(starts[pop_index])
new_ends.append(ends[pop_index])
pop_index += 1
else:
new_axes.append(i)
new_starts.append(0)
new_ends.append(np.iinfo(np.int32).max)
return new_starts, new_ends, new_axes
@classmethod
def _impl_v1(cls, inputs, attr, params):
if isinstance(attr["starts"], int):
attr["starts"] = (attr["starts"],)
attr["ends"] = (attr["ends"],)
try:
# Update the starts and ends according to axes if required.
if isinstance(attr["axes"], int):
attr["axes"] = (attr["axes"],)
if (max(attr["axes"]) + 1) != len(attr["axes"]):
new_starts, new_ends, new_axes = cls._common(
attr["starts"], attr["ends"], attr["axes"]
)
attr["axes"] = new_axes
attr["starts"] = new_starts
attr["ends"] = new_ends
except KeyError:
pass
begin = list(attr["starts"])
end = list(attr["ends"])
return _op.strided_slice(inputs[0], begin=begin, end=end)
@classmethod
def _impl_v10(cls, inputs, attr, params):
starts = inputs[1]
ends = inputs[2]
axes = inputs[3]
steps = inputs[4]
data_rank = len(infer_shape(inputs[0]))
# Update the starts and ends according to axes if required.
if axes is not None:
data_shape = shape_of(inputs[0], dtype=infer_type(ends).checked_type.dtype)
starts = _op.scatter(
_op.const([0] * data_rank, dtype=infer_type(starts).checked_type.dtype),
axes,
starts,
axis=0,
)
ends = _op.scatter(data_shape, axes, ends, axis=0)
if steps is not None:
steps = _op.scatter(
_op.const([1] * data_rank, dtype=infer_type(steps).checked_type.dtype),
axes,
steps,
axis=0,
)
if steps is None:
steps = _op.const([1] * data_rank, dtype=infer_type(starts).checked_type.dtype)
return _op.strided_slice(
inputs[0], fold_constant(starts), fold_constant(ends), fold_constant(steps)
)
def normalize_gather_indices(data, indices, axis):
"""Make sure gather indicies aren't negative"""
ind_dtype = infer_type(indices).checked_type.dtype
# Normalize the indices to a positive range
s = _op.take(_op.shape_of(data, dtype=ind_dtype), _op.const(axis))
cond = fold_constant(indices < _op.const(0, ind_dtype))
if isinstance(cond, _expr.Constant):
val = cond.data.asnumpy()
if val.size == 1:
cond = val.item()
if cond:
indices = indices + s
return indices
indices = _op.where(cond, indices + s, indices)
return indices
class Gather(OnnxOpConverter):
"""Operator converter for Gather."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 0)
data = inputs[0]
indices = inputs[1]
indices = normalize_gather_indices(data, indices, axis)
return _op.take(data, indices, axis)
class GatherElements(OnnxOpConverter):
"""Operator converter for GatherElements."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
indices = inputs[1]
axis = attr.get("axis", 0)
indices = normalize_gather_indices(data, indices, axis)
return _op.gather(data, axis, indices)
class GatherND(OnnxOpConverter):
"""Operator converter for GatherND."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
indices_dims = len(infer_shape(inputs[1]))
indices = _op.transpose(inputs[1], axes=[-1] + list(range(indices_dims - 1)))
return _op.gather_nd(inputs[0], indices)
class Scatter(OnnxOpConverter):
"""Operator converter for Scatter."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 0)
return _op.scatter(inputs[0], inputs[1], inputs[2], axis)
class ScatterND(OnnxOpConverter):
"""Operator converter for Scatter."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
indices_dim = len(infer_shape(inputs[1]))
axes = list(range(indices_dim))
return _op.scatter_nd(
inputs[0], _op.transpose(inputs[1], axes[-1:] + axes[:-1]), inputs[2], "update"
)
class Greater(OnnxOpConverter):
"""Operator logical greater."""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.greater(inputs[0], inputs[1])
class Less(OnnxOpConverter):
"""Operator logical less than."""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.less(inputs[0], inputs[1])
class LRN(OnnxOpConverter):
"""Operator converter for Local Response Normalization."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
"""LRN support only NCHW format
https://github.com/onnx/onnx/blob/master/docs/Operators.md#LRN
"""
axis = 1
alpha = attr.get("alpha", 0.0001)
beta = attr.get("beta", 0.75)
bias = attr.get("bias", 1.0)
nsize = attr.get("size")
attr = {"size": nsize, "axis": axis, "alpha": alpha, "beta": beta, "bias": bias}
return AttrCvt("lrn")(inputs, attr)
class Maximum(OnnxOpConverter):
"""Operator converter for Maximum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) == 1:
return inputs[0]
_max = inputs[0]
for i in range(1, len(inputs)):
_max = AttrCvt("maximum")([_max, inputs[i]], {})
return _max
class Minimum(OnnxOpConverter):
"""Operator converter for Minimum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) == 1:
return inputs[0]
_min = inputs[0]
for i in range(1, len(inputs)):
_min = AttrCvt("minimum")([_min, inputs[i]], {})
return _min
class Mean(OnnxOpConverter):
"""Operator converter for Mean."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) == 1:
return inputs[0]
# avoid overflow
concat = _op.concatenate([_op.expand_dims(x, axis=0) for x in inputs], axis=0)
return _op.mean(concat, axis=0, keepdims=False)
class HardSigmoid(OnnxOpConverter):
"""Operator converter for HardSigmoid."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr.get("alpha", 0.2)
beta = attr.get("beta", 0.5)
transformX = (inputs[0] * _expr.const(alpha)) + _expr.const(beta)
attr = {"a_min": 0, "a_max": 1}
return AttrCvt("clip")([transformX], attr)
class Reduce(OnnxOpConverter):
"""Operator converter for reduce ops."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
return AttrCvt(cls.name)(inputs, attr)
class ReduceMax(Reduce):
"""Operator converter for ReduceMax."""
name = "max"
class ReduceMin(Reduce):
"""Operator converter for ReduceMin."""
name = "min"
class ReduceSum(Reduce):
"""Operator converter for ReduceSum."""
name = "sum"
class ReduceMean(Reduce):
"""Operator converter for ReduceMean."""
name = "mean"
class ReduceProd(Reduce):
"""Operator converter for ReduceProd."""
name = "prod"
class ReduceLogSumExp(Reduce):
"""Operator converter for ReduceLogSumExp."""
name = "logsumexp"
class ReduceSumSquare(OnnxOpConverter):
"""Operator converter for ReduceSumSquare."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
inputs[0] = inputs[0] * inputs[0]
return AttrCvt("sum")(inputs, attr)
class ReduceL1(OnnxOpConverter):
"""Operator converter for ReduceL1."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
inputs[0] = _op.abs(inputs[0])
return AttrCvt("sum")(inputs, attr)
class ReduceL2(OnnxOpConverter):
"""Operator converter for ReduceL2."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
inputs[0] = inputs[0] * inputs[0]
out = AttrCvt("sum")(inputs, attr)
return _op.sqrt(out)
class ReduceLogSum(OnnxOpConverter):
"""Operator converter for ReduceLogSum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
out = AttrCvt("sum")(inputs, attr)
return _op.log(out)
class ArgMax(OnnxOpConverter):
"""Operator converter for ArgMax."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "select_last_index" in attr:
raise NotImplementedError("select_last_index not supported in ArgMax")
axis = attr.get("axis", 0)
keepdims = attr.get("keepdims", True)
attr = {"axis": axis, "keepdims": keepdims}
return _op.cast(AttrCvt("argmax")(inputs, attr), "int64")
class ArgMin(OnnxOpConverter):
"""Operator converter for ArgMin."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "select_last_index" in attr:
raise NotImplementedError("select_last_index not supported in ArgMin")
axis = attr.get("axis", 0)
keepdims = attr.get("keepdims", True)
attr = {"axis": axis, "keepdims": keepdims}
return _op.cast(AttrCvt("argmin")(inputs, attr), "int64")
class Softmax(OnnxOpConverter):
"""Operator converter for Softmax."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
axes = list(range(axis, ndim))
x = inputs[0]
m = _op.max(x, axes, keepdims=True)
e = _op.exp(x - m)
return e / _op.sum(e, axes, keepdims=True)
class LogSoftmax(OnnxOpConverter):
"""Operator converter for Softmax."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
axes = list(range(axis, ndim))
x = inputs[0]
m = _op.max(x, axes, keepdims=True)
e = _op.exp(x - m)
s = _op.sum(e, axes, keepdims=True)
return x - m - _op.log(s)
class Hardmax(OnnxOpConverter):
"""Operator converter for Hardmax."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
dtype = infer_type(inputs[0]).checked_type.dtype
if axis == 0:
pre = _op.const([1], "int64")
else:
pre = _op.prod(
_op.strided_slice(shape_of(inputs[0]), [0], [axis], [1]), axis=0, keepdims=True
)
post = _op.prod(
_op.strided_slice(shape_of(inputs[0]), [axis], [2147483647], [1]), axis=0, keepdims=True
)
newshape = _op.concatenate([pre, post], axis=0)
x = _op.reshape(inputs[0], fold_constant(newshape))
argmax = _op.argmax(x, axis=1)
onehot = _op.one_hot(
argmax,
_op.const(1.0, dtype),
_op.const(0.0, dtype),
fold_constant(_op.take(shape_of(x), _op.const([1], "int64"))),
1,
dtype,
)
return _op.reshape(onehot, shape_of(inputs[0]))
class OneHot(OnnxOpConverter):
"""Operator converter for OneHot."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
# Extract relay one_hot inputs.
indices, depth, values = inputs
ndim = len(infer_shape(indices))
# Split onnx on off values into two separate expressions.
off_value, on_value = _op.take(values, _op.const(0)), _op.take(values, _op.const(1))
# Extract the datatype of the output from on_value.
dtype = infer_type(on_value).checked_type.dtype
ind_dtype = infer_type(indices).checked_type.dtype
# Normalize the indices to a positive range
indices = _op.where(
indices < _op.const(0, ind_dtype), indices + _op.cast(depth, ind_dtype), indices
)
# set default value when axis is not set in the model
if "axis" not in attr:
attr["axis"] = -1
axis = attr["axis"]
if axis < 0:
axis += ndim + 1
return _op.one_hot(indices, on_value, off_value, depth, axis, dtype=dtype)
class ConstantOfShape(OnnxOpConverter):
"""Operator converter for ConstantOfShape."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if "value" in attr:
np_value = get_numpy(attr.pop("value"))[0]
value = _expr.const(np_value)
dtype = np_value.dtype.name
else:
value = _expr.const(0)
dtype = "float32"
output = _op.full(value, inputs[0], dtype=dtype)
return output
class Constant(OnnxOpConverter):
"""Operator converter for ConstantOfShape."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if "value" not in attr:
raise tvm.errors.OpAttributeRequired("no value in Constant")
value = attr.pop("value")
# Constants may rarely have string types. These are likely exported
# from other frameworks and not actually used in TVM. We'll just use
# a zero valued constant for compatibility.
if isinstance(value, bytes):
np_value = np.asarray([0]).astype("int64")
else:
np_value = get_numpy(value)
dtype = np_value.dtype.name
value = _expr.const(np_value, dtype)
return value
class Sign(OnnxOpConverter):
"""Operator converter for Sign."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.sign(inputs[0])
class Equal(Elemwise):
"""Operator converter for Equal."""
name = "equal"
class Not(Elemwise):
"""Operator converter for Not."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.logical_not(inputs[0])
class And(Elemwise):
"""Operator converter for And."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.logical_and(inputs[0], inputs[1])
class Tile(Elemwise):
"""Operator converter for Tile"""
@classmethod
def _impl_v6(cls, inputs, attr, params):
return _op.tile(inputs[0], inputs[1])
class Erf(OnnxOpConverter):
"""Operator converter for Erf"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.erf(inputs[0])
class Where(OnnxOpConverter):
"""Operator converter for Where"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
condition_rank = len(infer_shape(inputs[0]))
x_rank = len(infer_shape(inputs[1]))
y_rank = len(infer_shape(inputs[2]))
ranks = [condition_rank, x_rank, y_rank]
# If one rank is longer than others, then we can broadcast
# to that shape.
max_rank = max(ranks)
max_rank_idxs = [i for i, x in enumerate(ranks) if x == max_rank]
broadcast_shape = shape_of(inputs[max_rank_idxs[0]])
# If two or more inputs have the same rank, compute the broadcast
# shape by taking the maximum value of each dimensions.
if len(max_rank_idxs) > 1:
for idx in max_rank_idxs:
broadcast_shape = _op.maximum(broadcast_shape, shape_of(inputs[idx]))
broadcast_shape = fold_constant(broadcast_shape)
condition = _op.broadcast_to(inputs[0], broadcast_shape)
x = _op.broadcast_to(inputs[1], broadcast_shape)
y = _op.broadcast_to(inputs[2], broadcast_shape)
return _op.where(condition, x, y)
class Or(Elemwise):
"""Operator converter for Or."""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.logical_or(inputs[0], inputs[1])
class Expand(OnnxOpConverter):
"""Operator converter for Expand."""
@classmethod
def _impl_v8(cls, inputs, attr, params):
dtype = infer_type(inputs[1]).checked_type.dtype
in_shape = shape_of(inputs[0], dtype=dtype)
shape = inputs[1]
# Currently 'op.broadcast_to' expect the rank of the given 'shape'
# (the 2nd input) is always higher than that of the given 'input' (the 1st input)
# However, ONNX Expand supports multi-directional broadcasting, which allows
# above pattern and also some extent of 'shape' can be smaller than the corresponding
# extent of 'input'. In this case, the extent of 'shape' must be 1.
# https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
# In above cases, we cannot directorly apply 'op.broadcast_to' instead of 'expand'
# so, here we solved this problem by expanding the given 'shape' itself.
def expand_shape(in_shape, shape):
"""A function expands the shape when the rank is lower than that of the given
intput. Also it replaces the extent of the shape with the corresponding extent
of the intput when it is 1.
"""
in_dims = infer_shape(in_shape)[0]
new_dims = infer_shape(shape)[0]
if in_dims < new_dims:
in_shape = _op.concatenate(
[
_expr.const(
[
1,
]
* (new_dims - in_dims),
dtype=dtype,
),
in_shape,
],
axis=0,
)
elif new_dims > in_dims:
shape = _op.concatenate(
[
_expr.const(
[
1,
]
* (in_dims - new_dims),
dtype=dtype,
),
shape,
],
axis=0,
)
new_shape = _op.maximum(in_shape, shape)
return new_shape
shape = fold_constant(expand_shape(in_shape, shape))
return _op.broadcast_to(inputs[0], shape=shape)
class RNN(OnnxOpConverter):
"""Operator converter for RNNs such as LSTM and GRU."""
@classmethod
def _activation_helper(cls, activation, alpha, beta):
convert_map = _get_convert_map(1)
attrs = {}
if alpha is not None:
attrs["alpha"] = alpha
if beta is not None:
attrs["beta"] = beta
return lambda x: convert_map[activation.decode("utf-8")]([x], attrs, {})
@classmethod
def _activation_needs_alpha(cls, activation):
needs_alpha = [
"Affine",
"LeakyRelu",
"ThresholdedRelu",
"ScaledTanh",
"HardSigmoid",
"Elu",
]
return activation.decode("utf-8") in needs_alpha
@classmethod
def _activation_needs_beta(cls, activation):
needs_beta = [
"Affine",
"ScaledTanh",
"HardSigmoid",
]
return activation.decode("utf-8") in needs_beta
class LSTM(RNN):
"""Operator converter for LSTM"""
@classmethod
def _impl_v7(cls, inputs, attr, params):
# Unpack inputs, note that if optional and not provided then value will be None.
X = inputs[0]
W = inputs[1]
R = inputs[2]
B = inputs[3]
# Sequence length currently unused as it can be inferred from shapes.
# sequence_lens = inputs['sequence_lens']
h_0 = inputs[5]
c_0 = inputs[6]
P = inputs[7]
num_directions = infer_shape(W)[0]
W_dtype = infer_type(W).checked_type.dtype
if num_directions != 1:
raise NotImplementedError("Bidirectional LSTMs not yet supported.")
# Remove num_directions axis from weights.
W = _op.squeeze(W, axis=[0])
R = _op.squeeze(R, axis=[0])
if B is not None:
B = _op.squeeze(B, axis=[0])
X_shape = infer_shape(X)
hidden_size = infer_shape(R)[-1]
batch_size = X_shape[1]
# Initialize state if not provided.
# Otherwise remove bidirectional axis.
if h_0 is None:
h_0 = _op.zeros((batch_size, hidden_size), W_dtype)
else:
h_0 = _op.squeeze(h_0, axis=[0])
if c_0 is None:
c_0 = _op.zeros((batch_size, hidden_size), W_dtype)
else:
c_0 = _op.squeeze(c_0, axis=[0])
if P is not None:
P = _op.squeeze(P, axis=[0])
p_i, p_o, p_f = _op.split(P, 3)
H_t = h_0
C_t = c_0
h_list = []
if "activations" in attr:
activations = attr["activations"]
if len(activations) != 3:
raise NotImplementedError("LSTM assumes 3 activation functions are provided")
alpha_loc = 0
alphas = attr.get("activation_alpha", [])
if isinstance(alphas, float):
alphas = [alphas]
beta_loc = 0
betas = attr.get("activation_beta", [])
if isinstance(betas, float):
betas = [betas]
acts = []
for i in range(3):
alpha = None
beta = None
activation = activations[i]
if cls._activation_needs_alpha(activation) and len(alphas) > alpha_loc:
alpha = alphas[alpha_loc]
alpha_loc += 1
if cls._activation_needs_beta(activation) and len(betas) > beta_loc:
beta = betas[beta_loc]
beta_loc += 1
acts.append(cls._activation_helper(activation, alpha, beta))
f_act, g_act, h_act = acts
else:
f_act = _op.sigmoid
g_act = _op.tanh
h_act = _op.tanh
X_steps = _op.split(X, indices_or_sections=X_shape[0], axis=0)
for step in X_steps:
step = _op.squeeze(step, axis=[0])
gates = _op.nn.dense(step, W) + _op.nn.dense(H_t, R)
if B is not None:
WB, RB = _op.split(B, 2)
gates += WB + RB
i, o, f, c = _op.split(gates, 4, axis=-1)
if P is not None:
i = f_act(i + p_i * C_t)
f = f_act(f + p_f * C_t)
else:
i = f_act(i)
f = f_act(f)
c = g_act(c)
C = f * C_t + i * c
if P is not None:
o = f_act(o + p_o * C)
else:
o = f_act(o)
H = o * h_act(C)
H_t = H
C_t = C
h_list.append(_op.expand_dims(H, axis=0))
# Concatenate outputs and add back in direction axis.
concatenated = _op.concatenate(h_list, 0)
output = _op.expand_dims(concatenated, axis=1)
H_t = _op.expand_dims(H_t, axis=0)
C_t = _op.expand_dims(C_t, axis=0)
return _expr.TupleWrapper(_expr.Tuple((output, H_t, C_t)), 3)
class GRU(RNN):
"""Operator convert for GRU"""
@classmethod
def _impl_v7(cls, inputs, attr, params):
# Unpack inputs, note that if optional and not provided then value will be None.
X = inputs[0]
W = inputs[1]
R = inputs[2]
B = inputs[3]
# Sequence length currently unused as it can be inferred from shapes.
# sequence_lens = inputs['sequence_lens']
h_0 = inputs[5]
linear_before_reset = attr.get("linear_before_reset", 0)
num_directions = infer_shape(W)[0]
W_dtype = infer_type(W).checked_type.dtype
if num_directions != 1:
raise NotImplementedError("Bidirectional GRUs not yet supported.")
# Remove num_directions axis from weights.
W = _op.squeeze(W, axis=[0])
R = _op.squeeze(R, axis=[0])
if B is not None:
B = _op.squeeze(B, axis=[0])
X_shape = infer_shape(X)
hidden_size = infer_shape(R)[-1]
batch_size = X_shape[1]
# Initialize state if not provided.
# Otherwise remove bidirectional axis.
if h_0 is None:
h_0 = _op.zeros((batch_size, hidden_size), W_dtype)
else:
h_0 = _op.squeeze(h_0, axis=[0])
H_t = h_0
h_list = []
if "activations" in attr:
activations = attr["activations"]
if len(activations) != 2:
raise NotImplementedError("GRU assumes 2 activation functions are provided")
alpha_loc = 0
alphas = attr.get("activation_alpha", [])
if isinstance(alphas, float):
alphas = [alphas]
beta_loc = 0
betas = attr.get("activation_beta", [])
if isinstance(betas, float):
betas = [betas]
acts = []
for i in range(2):
alpha = None
beta = None
activation = activations[i]
if cls._activation_needs_alpha(activation) and len(alphas) > alpha_loc:
alpha = alphas[alpha_loc]
alpha_loc += 1
if cls._activation_needs_beta(activation) and len(betas) > beta_loc:
beta = betas[beta_loc]
beta_loc += 1
acts.append(cls._activation_helper(activation, alpha, beta))
f_act, g_act = acts
else:
f_act = _op.sigmoid
g_act = _op.tanh
X_steps = _op.split(X, indices_or_sections=X_shape[0], axis=0)
for step in X_steps:
step = _op.squeeze(step, axis=[0])
current = _op.nn.dense(step, W)
cz, cr, ch = _op.split(current, 3, axis=1)
rz, rr, rh = _op.split(R, 3, axis=0)
z = cz + _op.nn.dense(H_t, rz)
r = cr + _op.nn.dense(H_t, rr)
if B is not None:
WB, RB = _op.split(B, 2)
wbz, wbr, wbh = _op.split(WB, 3, axis=-1)
rbz, rbr, rbh = _op.split(RB, 3, axis=-1)
z += wbz + rbz
r += wbr + rbr
if linear_before_reset:
h = ch + (r * (_op.nn.dense(H_t, rh) + rbh)) + wbh
else:
h = ch + _op.nn.dense((r * H_t), rh) + wbh + rbh
else:
if linear_before_reset:
h = ch + (r * (_op.nn.dense(H_t, rh)))
else:
h = ch + _op.nn.dense((r * H_t), rh)
z = f_act(z)
r = f_act(r)
h = g_act(h)
H_t = ((_expr.const(1, dtype=W_dtype) - z) * h) + (z * H_t)
h_list.append(_op.expand_dims(H_t, axis=0))
# Concatenate outputs and add back in direction axis.
concatenated = _op.concatenate(h_list, 0)
output = _op.expand_dims(concatenated, axis=1)
H_t = _op.expand_dims(H_t, axis=0)
return _expr.TupleWrapper(_expr.Tuple((output, H_t)), 2)
class Resize(OnnxOpConverter):
"""Operator converter for Resize"""
@classmethod
def _impl_v10(cls, inputs, attr, params):
mode = attr.get("mode").decode("ascii")
if mode == "nearest":
method = "nearest_neighbor"
elif mode == "linear":
method = "bilinear"
elif mode == "cubic":
method = "bicubic"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Resize is not valid.'.format(mode)
)
scale = inputs[1]
size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale
layout = "NCHW" # ONNX assumes NCHW layout
out_size = fold_constant(_op.strided_slice(size, [2], [4]))
return _op.image.resize(inputs[0], out_size, layout, method, "asymmetric")
@classmethod
def _impl_v11(cls, inputs, attr, params):
layout = "NCHW" # ONNX assumes NCHW layout
mode = attr.get("mode").decode("ascii")
if mode == "nearest":
method = "nearest_neighbor"
elif mode == "linear":
method = "bilinear"
elif mode == "cubic":
method = "bicubic"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Resize is not valid.'.format(mode)
)
coord_trans = attr.get("coordinate_transformation_mode", b"half_pixel").decode("ascii")
nearest_mode = attr.get("nearest_mode", b"round_prefer_floor").decode("ascii")
alpha = attr.get("cubic_coeff_a", -0.75)
exclude = attr.get("exclude_outside", 0)
scale = inputs[2]
scale_shape = infer_shape(scale)
if len(inputs) == 4:
assert (
len(scale_shape) == 0 or scale_shape[0] == 0
), "One of scale or size should be passed, not both."
size = inputs[3]
else:
assert len(scale_shape) != 0, "One of scale or size should be passed."
size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale
out_size = fold_constant(_op.strided_slice(size, [2], [4]))
return _op.image.resize(
inputs[0], out_size, layout, method, coord_trans, nearest_mode, alpha, exclude
)
class NonZero(OnnxOpConverter):
"""Operator converter for NonZero"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if len(inputs) > 1:
raise ValueError("Expect 1 input only")
output = AttrCvt(op_name="argwhere")(inputs, attr, params)
# ONNX NonZero always outputs int64
output = _op.cast(output, "int64")
return _op.transpose(output, axes=(1, 0))
class TopK(OnnxOpConverter):
"""Operator converter for TopK"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 2:
raise ValueError("Expect 2 input only")
axis = attr.get("axis", -1)
largest = attr.get("largest", 1)
if largest == 0:
raise NotImplementedError("TVM only supports finding TopK largest elements")
return _op.topk(inputs[0], inputs[1], axis=axis, dtype="int64")
class Range(OnnxOpConverter):
"""Operator converter for Range"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 3:
raise ValueError("Expect 3 input only")
return _op.arange(
inputs[0], inputs[1], inputs[2], dtype=infer_type(inputs[0]).checked_type.dtype
)
class MaxRoiPool(OnnxOpConverter):
"""Operator converter for MaxRoiPool."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "MMaxRoiPool op take 2 inputs, {} given".format(len(inputs))
data = inputs[0]
rois = inputs[1]
pooled_shape = attr.get("pooled_shape")
spatial_scale = attr.get("spatial_scale", 1.0)
return _vision.roi_pool(data, rois, pooled_shape, spatial_scale)
class RoiAlign(OnnxOpConverter):
"""Operator converter for RoiAlign."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 3:
raise ValueError("Expect 3 inputs only")
x = inputs[0]
rois = inputs[1]
batch_indices = inputs[2]
mode = attr.get("mode", b"avg")
if mode not in (b"avg", b"max"):
raise NotImplementedError("RoiAlign in Relay only uses avg and max modes")
output_height = attr.get("output_height", 1)
output_width = attr.get("output_width", 1)
sampling_ratio = attr.get("sampling_ratio", 0)
spatial_scale = attr.get("spatial_scale", 1.0)
batch_indices = _op.expand_dims(batch_indices, axis=1, num_newaxis=1)
batch_indices = _op.cast(batch_indices, infer_type(rois).checked_type.dtype)
rois = _op.concatenate([batch_indices, rois], 1)
return _vision.roi_align(
x, rois, [output_height, output_width], spatial_scale, sampling_ratio, mode=mode
)
class Clip(OnnxOpConverter):
"""Operator converter for Clip."""
@staticmethod
def convert_attributes(inputs, attr, params):
convert = AttrCvt("clip", transforms={"min": "a_min", "max": "a_max"})
return convert(inputs, attr, params)
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "min" not in attr:
attr["min"] = -np.inf
if "max" not in attr:
attr["max"] = np.inf
return Clip.convert_attributes(inputs, attr, params)
@classmethod
def _impl_v11(cls, inputs, attr, params):
if "min" in attr and "max" in attr:
return Clip.convert_attributes(inputs, attr, params)
assert len(inputs) <= 3, "Clip-11 takes up to 3 inputs, input, min, max"
result = inputs[0]
for i, op in enumerate([_op.tensor.maximum, _op.tensor.minimum]):
if i < len(inputs) - 1:
if inputs[i + 1] is not None:
result = op(result, inputs[i + 1])
return result
class Softplus(OnnxOpConverter):
"""Operator converter for Softplus."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
data_dtype = infer_type(data).checked_type.dtype
data = _op.exp(data) + _expr.const(1, dtype=data_dtype)
return _op.log(data)
class Loop(OnnxOpConverter):
"""Operator converter for Loop"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
max_loop_count = inputs[0]
cond = inputs[1]
loop_deps = inputs[2:]
num_deps = len(loop_deps)
# Create a copy of the body function to prevent the original
# from being modified.
body = copy.copy(attr["body"])
iter_dtype = infer_type(max_loop_count).checked_type.dtype
# Determine what condition mode we're in.
assert cond is not None or max_loop_count is not None
is_for_loop = max_loop_count is not None and cond is None
is_condition_for_loop = cond is not None and max_loop_count is not None
# Loop inputs will be packed as
# [iter_count, max_count, condition, loop_deps, scan_outputs]
def cond_fn(*loop_inputs):
i = loop_inputs[0]
max_count = loop_inputs[1]
w = loop_inputs[2]
if cond is not None:
out_while = _op.equal(w, _expr.const(True, "bool"))
if max_loop_count is not None:
out_loop = _op.less(i, max_count)
if is_condition_for_loop:
return _op.logical_and(out_while, out_loop)
if is_for_loop:
return out_loop
return out_while
# Get the current graph proto and create a clone for the subgraph
graph_scope = GraphProto.current
subgraph_scope = GraphProto(
graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params
)
# Load nodes from outer graph into inner graph.
subgraph_scope._nodes = graph_scope._nodes.copy()
# Create a list of variables for each value updated in the loop.
def get_var(name, val, scan=False):
checked_type = infer_type(val)
if hasattr(checked_type, "type_annotation"):
checked_type = checked_type.type_annotation
if hasattr(checked_type, "checked_type"):
checked_type = checked_type.checked_type
shape = get_const_tuple(checked_type.shape)
actual_shape = []
for dim in shape:
if isinstance(dim, int) and dim == 0:
actual_shape.append(_ty.Any())
else:
actual_shape.append(dim)
if scan:
return _expr.var(name, shape=[_ty.Any()] + actual_shape, dtype=checked_type.dtype)
return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)
loop_vars = [
_expr.var(body.input[0].name, shape=(), dtype=iter_dtype), # iteration count
_expr.var("max_count", shape=(), dtype=iter_dtype), # iteration count
get_var(body.input[1].name, cond), # exit condition
]
loop_vars += [get_var(body.input[i + 2].name, v) for i, v in enumerate(loop_deps)]
loop_var_names = [v.name_hint for v in loop_vars]
num_scan_outputs = len(body.output) - (1 + num_deps)
# TODO (jwfromm) Test with strided slice once type unifier for this case is fixed.
if num_scan_outputs != 0 and "Slice" in [n.op_type for n in body.node]:
warnings.warn(
"""
Using scan outputs in a loop with strided slice
currently may cause errors during compilation.
"""
)
# Construct variables and intial empty tensors for any scan outputs.
scan_output_vars = []
scan_output_init = []
for i in range(num_scan_outputs):
name, shape, dtype, _ = get_info(body.output[i + 1 + num_deps])
if dtype == "float":
dtype = "float32"
scan_output_vars.append(
_expr.var(name, shape=([_ty.Any()] * (len(shape) + 1)), dtype=dtype)
)
scan_output_init.append(
_op.reshape(_expr.const(np.array([]).astype(dtype)), [0] + [1] * len(shape))
)
# Now we can remove loop iter variables from our inner loop's inputs.
# This is kind of a hack since we have graph inputs that we don't
# want to treat as actual inputs.
while len(body.input) != 0:
body.input.pop(0)
# Define the loop body, in this function we need to unpack loop inputs,
# convert the loop subgraph, and pack outputs for the next iteration.
def body_fn(*loop_inputs):
# Unpack inputs
loop_count = loop_inputs[0]
max_count = loop_inputs[1]
cond = loop_inputs[2]
current_vars = list(loop_inputs[3 : (3 + num_deps)])
scan_outputs = loop_inputs[(3 + num_deps) :]
# Prepare body inputs by adding them to node dictionary.
new_inputs = [loop_count, max_count, cond] + current_vars
for i, inp in enumerate(new_inputs):
subgraph_scope._nodes[loop_var_names[i]] = inp
# Get the output of the current loop using the updated inputs.
with subgraph_scope:
loop_outputs = subgraph_scope.from_onnx(
body, graph_scope.opset, get_output_expr=True
)
# Unpack the body outputs and prepare variables for next iteration.
new_cond = loop_outputs[0]
new_loop_vars = [loop_outputs[i] for i in range(1, 1 + num_deps)]
new_scan_outputs = [loop_outputs[i] for i in range(1 + num_deps, len(loop_outputs))]
# Add new scan outputs to tracking
combined_scan_outputs = []
for i, scan in enumerate(scan_outputs):
rank = len(infer_shape(scan)) - 1
new_scan = new_scan_outputs[i]
expand_scan = _op.expand_dims(new_scan, axis=0)
# For non scalar outputs we need to broadcast the initial value.
if rank > 0:
new_scan_shape = shape_of(new_scan, dtype=iter_dtype)
scan_broadcast = _op.concatenate(
[_op.reshape(loop_count, [1]), new_scan_shape], axis=0
)
scan = _op.broadcast_to(scan, scan_broadcast)
combined_scan = _op.concatenate([scan, expand_scan], axis=0)
combined_scan_outputs.append(combined_scan)
# Increment counter.
if max_loop_count is not None:
incr = _expr.const(1, dtype=iter_dtype)
loop_count = loop_count + incr
# Pack loop outputs for next iteration
# [iter_count, cond, loop_deps, loop_scans]
return [loop_count, max_count, new_cond] + new_loop_vars + combined_scan_outputs
# Create the loop function.
loop = fold_constant(_loops.while_loop(cond_fn, loop_vars + scan_output_vars, body_fn))
# Now need to run initial values through the graph.
init_count = _expr.const(0, dtype=iter_dtype)
loop_vals = loop(init_count, max_loop_count, cond, *loop_deps, *scan_output_init)
# Extract final iteration outputs.
if num_deps + num_scan_outputs == 1:
outputs = _expr.TupleGetItem(loop_vals, 3)
else:
outputs = _expr.TupleWrapper(
_expr.Tuple(
[
_expr.TupleGetItem(loop_vals, i + 3)
for i in range(num_deps + num_scan_outputs)
]
),
num_deps + num_scan_outputs,
)
# Update outer graph with constants found in the subgraph.
free_vars = analysis.free_vars(loop)
graph_scope._params.update(subgraph_scope._params)
graph_scope._nodes.update(subgraph_scope._nodes)
for var in free_vars:
graph_scope._nodes.update({var.name_hint: var})
return outputs
class If(OnnxOpConverter):
"""Operator converter for If"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
cond = inputs[0]
# Convert array to bool if needed.
if len(infer_shape(cond)) > 0:
cond = _op.take(cond, _expr.const(0, dtype="int64"))
then_branch = attr.get("then_branch", None)
else_branch = attr.get("else_branch", None)
assert then_branch is not None and else_branch is not None
# Create graph converters for both branches.
graph_scope = GraphProto.current
then_graph = GraphProto(graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params)
then_graph._nodes = graph_scope._nodes.copy()
else_graph = GraphProto(graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params)
else_graph._nodes = graph_scope._nodes.copy()
# Convert each branch to a relay expression.
with then_graph:
then_expr = then_graph.from_onnx(then_branch, graph_scope.opset, get_output_expr=True)
with else_graph:
else_expr = else_graph.from_onnx(else_branch, graph_scope.opset, get_output_expr=True)
# Add constants from both branches to parent graph.
graph_scope._params.update(then_graph._params)
graph_scope._nodes.update(then_graph._nodes)
then_free_vars = analysis.free_vars(then_expr)
for var in then_free_vars:
graph_scope._nodes.update({var.name_hint: var})
graph_scope._params.update(else_graph._params)
graph_scope._nodes.update(else_graph._nodes)
else_free_vars = analysis.free_vars(else_expr)
for var in else_free_vars:
graph_scope._nodes.update({var.name_hint: var})
# Now we can construct the relay if statement and return.
return _expr.If(cond, then_expr, else_expr)
class NonMaxSuppression(OnnxOpConverter):
"""Operator converter for NonMaxSuppression."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
# Get parameter values
boxes = inputs[0]
scores = inputs[1]
max_output_boxes_per_class = inputs[2]
iou_threshold = inputs[3]
score_threshold = inputs[4]
boxes_dtype = infer_type(boxes).checked_type.dtype
if attr.get("center_point_box", 0) != 0:
xc, yc, w, h = _op.split(boxes, 4, axis=2)
half_w = w / _expr.const(2.0, boxes_dtype)
half_h = h / _expr.const(2.0, boxes_dtype)
x1 = xc - half_w
x2 = xc + half_w
y1 = yc - half_h
y2 = yc + half_h
boxes = _op.concatenate([y1, x1, y2, x2], axis=2)
if iou_threshold is None:
iou_threshold = _expr.const(0.0, dtype="float32")
if score_threshold is None:
score_threshold = _expr.const(0.0, dtype="float32")
def conditionally_squeeze_scalar(x):
rank = len(infer_shape(x))
assert rank <= 1, "nms thresholds must be scalars"
if rank == 1:
return _op.squeeze(x, [0])
return x
max_output_boxes_per_class = conditionally_squeeze_scalar(max_output_boxes_per_class)
iou_threshold = conditionally_squeeze_scalar(iou_threshold)
score_threshold = conditionally_squeeze_scalar(score_threshold)
nms_out = _op.vision.all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold
)
three = _op.const(np.array([3]), dtype="int64")
begin = _op.const(np.array([0, 0]), dtype="int64")
end = _op.concatenate([nms_out[1], three], axis=0)
strides = _op.const(np.array([1, 1]), dtype="int64")
return _op.strided_slice(nms_out[0], begin, end, strides)
class ATen(OnnxOpConverter):
"""Operator converter for Pytorch ATen ops."""
@classmethod
def _op_dispatch(cls, operator, inputs, attr, params):
op_map = {
"size": cls._size,
"arange": cls._arange,
"reshape": cls._reshape,
"embedding_bag": cls._embedding_bag,
}
assert operator in op_map, "Operator %s is not supported." % operator
return op_map[operator](inputs, attr, params)
@classmethod
def _size(cls, inputs, attr, params):
return _op.take(
_op.shape_of(inputs[0], dtype="int64"),
_expr.const(-1, dtype="int64"),
axis=0,
mode="wrap",
)
@classmethod
def _arange(cls, inputs, attr, params):
return _op.arange(inputs[0], inputs[1], inputs[2], dtype="int64")
@classmethod
def _reshape(cls, inputs, attr, params):
return _op.reshape(inputs[0], inputs[1])
@classmethod
def _embedding_bag(cls, inputs, attr, params):
mode_map = {0: _op.sum, 1: _op.mean, 2: _op.max}
mode = attr.get("mode", 1)
reduction_fn = mode_map[mode]
weights, indices, offsets = inputs[0], inputs[1], inputs[2]
offsets_shape = _op.shape_of(offsets, dtype="int64")
indices_shape = _op.stack(
[
_op.take(offsets_shape, _expr.const(0, dtype="int64")),
_expr.const(-1, dtype="int64"),
],
axis=0,
)
indices = _op.reshape(indices, indices_shape)
embedding = _op.take(weights, indices.astype("int64"), axis=0)
rembedding = reduction_fn(embedding, axis=1)
# EmbeddingBag has 4 outputs for some reason despite only one ever being used.
# Fill the rest with 0s.
unused_output = _expr.const(0, dtype="float32")
return _expr.TupleWrapper(
_expr.Tuple((rembedding, unused_output, unused_output, unused_output)), 4
)
@classmethod
def _impl_v1(cls, inputs, attr, params):
operator = attr.get("operator", None).decode("utf-8")
assert operator, "ATen Operator not found"
return cls._op_dispatch(operator, inputs, attr, params)
class QuantizeLinear(OnnxOpConverter):
"""Operator converter for QuantizeLinear."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data, scale, zp = inputs
out_dtype = infer_type(zp).checked_type.dtype
return _qnn.op.quantize(data, scale, _op.cast(zp, "int32"), 0, out_dtype)
@classmethod
def _impl_v13(cls, inputs, attr, params):
data, scale, zp = inputs
out_dtype = infer_type(zp).checked_type.dtype
axis = attr.get("axis", 1)
return _qnn.op.quantize(data, scale, _op.cast(zp, "int32"), axis, out_dtype)
class DequantizeLinear(OnnxOpConverter):
"""Operator converter for QuantizeLinear."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data, scale, zp = inputs
return _qnn.op.dequantize(data, scale, _op.cast(zp, "int32"), 0)
@classmethod
def _impl_v13(cls, inputs, attr, params):
data, scale, zp = inputs
axis = attr.get("axis", 1)
return _qnn.op.dequantize(data, scale, _op.cast(zp, "int32"), axis)
class DynamicQuantizeLinear(OnnxOpConverter):
"""Operator converter for QuantizeLinear."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
"""This op is deprecated an only supports uint8"""
data = inputs[0]
data_dtype = infer_type(data).checked_type.dtype
zero = _op.const(0, dtype=data_dtype)
maximum = _op.maximum(zero, _op.max(data))
minimum = _op.minimum(zero, _op.min(data))
scale = (maximum - minimum) / _op.const(255, dtype=data_dtype)
zp = zero - _op.min(data) / scale
zp = _op.cast(_op.round(_op.clip(zp, 0, 255)), "uint8")
return _expr.TupleWrapper(
_expr.Tuple(
[_qnn.op.quantize(data, scale, _op.cast(zp, "int32"), 0, "uint8"), scale, zp]
),
size=3,
)
class BitShift(OnnxOpConverter):
"""Operator converter for NonZero"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
if len(inputs) != 2:
raise ValueError("Bitshift expects 2 inputs")
direction = attr.get("direction", "LEFT").decode("ascii")
if direction == "LEFT":
out = _op.left_shift(*inputs)
elif direction == "RIGHT":
out = _op.right_shift(*inputs)
else:
raise ValueError("Unsupported Shift Direction: " + direction)
return out
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
def _get_convert_map(opset):
return {
# defs/experimental
"Identity": Renamer("copy"),
"Affine": Affine.get_converter(opset),
"BitShift": BitShift.get_converter(opset),
"ThresholdedRelu": ThresholdedRelu.get_converter(opset),
"ScaledTanh": ScaledTanh.get_converter(opset),
"ParametricSoftplus": ParametricSoftPlus.get_converter(opset),
"Constant": Constant.get_converter(opset),
"ConstantOfShape": ConstantOfShape.get_converter(opset),
# 'GivenTensorFill'
"FC": AttrCvt("dense", ignores=["axis", "axis_w"]),
"Scale": Scale.get_converter(opset),
# 'GRUUnit'
# 'ATen'
# 'ImageScaler'
# 'MeanVarianceNormalization'
# 'Crop'
# 'Embedding'
"Upsample": Upsample.get_converter(opset),
"SpatialBN": BatchNorm.get_converter(opset),
# defs/generator
# 'Constant' # Implemented
# 'RandomUniform'
# 'RandomNormal'
# 'RandomUniformLike'
# 'RandomNormalLike'
# defs/logical
# defs/math
"Add": Add.get_converter(opset),
"Sub": Sub.get_converter(opset),
"Mul": Mul.get_converter(opset),
"Div": Div.get_converter(opset),
"Neg": Renamer("negative"),
"Abs": Absolute.get_converter(opset),
"Reciprocal": Reciprocal.get_converter(opset),
"Floor": Renamer("floor"),
"Ceil": Renamer("ceil"),
"Round": Renamer("round"),
"IsInf": Renamer("isinf"),
"IsNaN": Renamer("isnan"),
"Sqrt": Renamer("sqrt"),
"Relu": Renamer("relu"),
"LeakyRelu": Renamer("leaky_relu"),
"Selu": Selu.get_converter(opset),
"Elu": Elu.get_converter(opset),
"Exp": Renamer("exp"),
"Greater": Greater.get_converter(opset),
"Less": Less.get_converter(opset),
"Log": Renamer("log"),
"Acos": Renamer("acos"),
"Acosh": Renamer("acosh"),
"Asin": Renamer("asin"),
"Asinh": Renamer("asinh"),
"Atan": Renamer("atan"),
"Atanh": Renamer("atanh"),
"Cos": Renamer("cos"),
"Cosh": Renamer("cosh"),
"Sin": Renamer("sin"),
"Sinh": Renamer("sinh"),
"Tan": Renamer("tan"),
"Tanh": Renamer("tanh"),
"Pow": Renamer("power"),
"PRelu": Prelu.get_converter(opset),
"Sigmoid": Renamer("sigmoid"),
"HardSigmoid": HardSigmoid.get_converter(opset),
"Max": Maximum.get_converter(opset),
"Min": Minimum.get_converter(opset),
"Sum": Sum.get_converter(opset),
"Mean": Mean.get_converter(opset),
"Clip": Clip.get_converter(opset),
"Softplus": Softplus.get_converter(opset),
# softmax default axis is different in onnx
"Softmax": Softmax.get_converter(opset),
"LogSoftmax": LogSoftmax.get_converter(opset),
"OneHot": OneHot.get_converter(opset),
"Hardmax": Hardmax.get_converter(opset),
"Shrink": Shrink.get_converter(opset),
"Softsign": Softsign.get_converter(opset),
"Gemm": Gemm.get_converter(opset),
"MatMul": MatMul.get_converter(opset),
"Mod": Mod.get_converter(opset),
"Xor": Renamer("logical_xor"),
# defs/nn
"AveragePool": AveragePool.get_converter(opset),
"LpPool": LpPool.get_converter(opset),
"MaxPool": MaxPool.get_converter(opset),
"MaxUnpool": MaxUnpool.get_converter(opset),
"Conv": Conv.get_converter(opset),
"ConvTranspose": ConvTranspose.get_converter(opset),
"GlobalAveragePool": GlobalAveragePool.get_converter(opset),
"GlobalMaxPool": GlobalMaxPool.get_converter(opset),
"BatchNormalization": BatchNorm.get_converter(opset),
"InstanceNormalization": InstanceNorm.get_converter(opset),
# 'LpNormalization'
"Dropout": AttrCvt("dropout", {"ratio": "rate"}, ignores=["is_test"]),
"Flatten": Flatten.get_converter(opset),
"LRN": LRN.get_converter(opset),
# Recurrent Layers
"LSTM": LSTM.get_converter(opset),
"GRU": GRU.get_converter(opset),
# defs/vision
"MaxRoiPool": MaxRoiPool.get_converter(opset),
"RoiAlign": RoiAlign.get_converter(opset),
"NonMaxSuppression": NonMaxSuppression.get_converter(opset),
# defs/reduction
"ReduceMax": ReduceMax.get_converter(opset),
"ReduceMin": ReduceMin.get_converter(opset),
"ReduceSum": ReduceSum.get_converter(opset),
"ReduceMean": ReduceMean.get_converter(opset),
"ReduceProd": ReduceProd.get_converter(opset),
"ReduceLogSumExp": ReduceLogSumExp.get_converter(opset),
"ReduceLogSum": ReduceLogSum.get_converter(opset),
"ReduceSumSquare": ReduceSumSquare.get_converter(opset),
"ReduceL1": ReduceL1.get_converter(opset),
"ReduceL2": ReduceL2.get_converter(opset),
# defs/sorting
"ArgMax": ArgMax.get_converter(opset),
"ArgMin": ArgMin.get_converter(opset),
"TopK": TopK.get_converter(opset),
# defs/tensor
"Cast": Cast.get_converter(opset),
"Reshape": Reshape.get_converter(opset),
"Expand": Expand.get_converter(opset),
"Concat": Concat.get_converter(opset),
"Split": Split.get_converter(opset),
"Slice": Slice.get_converter(opset),
"Transpose": AttrCvt("transpose", {"perm": "axes"}),
"DepthToSpace": DepthToSpace.get_converter(opset),
"SpaceToDepth": SpaceToDepth.get_converter(opset),
"Gather": Gather.get_converter(opset),
"GatherElements": GatherElements.get_converter(opset),
"GatherND": GatherND.get_converter(opset),
"Size": AttrCvt("ndarray_size", extras={"dtype": "int64"}),
"Scatter": Scatter.get_converter(opset),
"ScatterElements": Scatter.get_converter(opset),
"ScatterND": ScatterND.get_converter(opset),
"Squeeze": AttrCvt("squeeze", {"axes": "axis"}),
"Unsqueeze": Unsqueeze.get_converter(opset),
"Pad": Pad.get_converter(opset),
"Shape": Shape.get_converter(opset),
"Sign": Sign.get_converter(opset),
"Equal": Equal.get_converter(opset),
"Not": Not.get_converter(opset),
"And": And.get_converter(opset),
"Tile": Tile.get_converter(opset),
"Erf": Erf.get_converter(opset),
"Where": Where.get_converter(opset),
"Or": Or.get_converter(opset),
"Resize": Resize.get_converter(opset),
"NonZero": NonZero.get_converter(opset),
"Range": Range.get_converter(opset),
"CumSum": CumSum.get_converter(opset),
# defs/control_flow
"Loop": Loop.get_converter(opset),
"If": If.get_converter(opset),
# Torch ATen Dispatcher.
"ATen": ATen.get_converter(opset),
# Quantization
"QuantizeLinear": QuantizeLinear.get_converter(opset),
"DequantizeLinear": DequantizeLinear.get_converter(opset),
"DynamicQuantizeLinear": DynamicQuantizeLinear.get_converter(opset),
}
class GraphProto:
"""A helper class for handling Relay expression copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
Parameters
----------
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
freeze_params: bool
If this parameter is true, the importer will take any provided
onnx input values (weights, shapes, etc) and embed them into the relay model
as Constants instead of variables. This allows more aggressive optimizations
at compile time and helps in making models static if certain inputs represent
attributes relay would traditionally consider compile-time constants.
"""
current = None
def __init__(self, shape, dtype, freeze_params=False):
self._nodes = {}
self._params = {}
self._inputs = {}
self._renames = {}
self._num_input = 0
self._num_param = 0
self._shape = shape.copy() if shape else {}
self._input_names = []
self._dtype = dtype
self.opset = None
self._freeze_params = freeze_params
def __enter__(self):
self._old_manager = GraphProto.current
GraphProto.current = self
return self
def __exit__(self, ptype, value, trace):
GraphProto.current = self._old_manager
def freeze(self, func, params):
bind_map = {}
for name in params.keys():
if name in self._nodes.keys():
bind_map[self._nodes[name]] = _expr.const(params[name])
body = _expr.bind(func.body, bind_map)
fn = _function.Function(analysis.free_vars(body), body)
return fn, {}
def from_onnx(self, graph, opset, get_output_expr=False):
"""Construct Relay expression from ONNX graph.
Onnx graph is a python protobuf object.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
opset : opset version
get_output_expr: bool
If set to true, this conversion will return each output expression rather
than a packaged module. This can be useful when converting subgraphs to
relay.
Returns
-------
mod : tvm.IRModule
The returned relay module
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
self.opset = opset
# parse network inputs to relay, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
array = self._parse_array(init_tensor)
if self._freeze_params:
self._nodes[init_tensor.name] = _expr.const(array)
else:
self._params[init_tensor.name] = array
self._nodes[init_tensor.name] = new_var(
init_tensor.name,
shape=self._params[init_tensor.name].shape,
dtype=self._params[init_tensor.name].dtype,
)
for i in graph.input:
# from onnx v0.2, GraphProto.input has type ValueInfoProto,
# and the name is 'i.name'
i_name, i_shape, d_type, i_shape_name = get_info(i)
if i_name in self._params:
# i is a param instead of input
self._num_param += 1
self._params[i_name] = self._params.pop(i_name)
self._nodes[i_name] = new_var(
i_name, shape=self._params[i_name].shape, dtype=self._params[i_name].dtype
)
elif i_name in self._nodes:
continue
else:
self._num_input += 1
self._input_names.append(i_name)
if i_name in self._shape:
i_shape = self._shape[i_name]
else:
if "?" in str(i_shape):
warning_msg = (
"Input %s has unknown dimension shapes: %s. "
"Specifying static values may improve performance"
% (i_name, str(i_shape_name))
)
warnings.warn(warning_msg)
if isinstance(self._dtype, dict):
dtype = self._dtype[i_name] if i_name in self._dtype else d_type
else:
dtype = d_type
self._nodes[i_name] = new_var(i_name, shape=i_shape, dtype=dtype)
self._inputs[i_name] = self._nodes[i_name]
# Only check user inputs in the outer-most graph scope.
if self._old_manager is None:
assert all(
[name in self._input_names for name in self._shape.keys()]
), "User specified the shape for inputs that weren't found in the graph: " + str(
self._shape
)
# get list of unsupported ops
convert_map = _get_convert_map(opset)
unsupported_ops = set()
for node in graph.node:
op_name = node.op_type
if (
op_name not in convert_map
and op_name != "Constant"
and op_name not in _identity_list
):
unsupported_ops.add(op_name)
if unsupported_ops:
msg = "The following operators are not supported for frontend ONNX: "
msg += ", ".join(unsupported_ops)
raise tvm.error.OpNotImplemented(msg)
# construct nodes, nodes are stored as directed acyclic graph
for node in graph.node:
op_name = node.op_type
attr = self._parse_attr(node.attribute)
# Create and populate onnx input object.
inputs = onnx_input()
for i in node.input:
if i != "":
inputs[i] = self._nodes[self._renames.get(i, i)]
else:
inputs[i] = None
i_name = self._parse_value_proto(node)
node_output = self._fix_outputs(op_name, node.output)
attr["tvm_custom"] = {}
attr["tvm_custom"]["name"] = i_name
attr["tvm_custom"]["num_outputs"] = len(node_output)
op = self._convert_operator(op_name, inputs, attr, opset)
if not isinstance(op, _expr.TupleWrapper):
outputs_num = 1
else:
outputs_num = len(op)
if outputs_num > 1:
# ONNX supports optional outputs for some nodes.
# This block searches for missing outputs in the ONNX graph
# and removes any unneeded ops
valid_outputs = [False] * outputs_num
for i, output in enumerate(node_output):
if output != "":
valid_outputs[i] = True
# If we have outputs ONNX isn't expecting, we need to drop them
if not all(valid_outputs):
tup = op.astuple()
# TupleWrapper can also wrap ops with TupleType outputs
if isinstance(tup, _expr.Tuple):
# For tuples, we extract the fields instead of using GetTupleItem
outputs = [tup.fields[i] for i, valid in enumerate(valid_outputs) if valid]
else:
# For call nodes, we need to GetTupleItem
outputs = [op[i] for i, valid in enumerate(valid_outputs) if valid]
# Create the new op with valid outputs
if len(outputs) == 1:
op = outputs[0]
else:
op = _expr.TupleWrapper(outputs, len(outputs))
# Drop invalid outputs for the onnx node
outputs_num = len(outputs)
node_output = [output for output in node_output if output != ""]
assert (
len(node_output) == outputs_num
), "Number of output mismatch {} vs {} in {}.".format(
len(node_output), outputs_num, op_name
)
if outputs_num == 1:
self._nodes[node_output[0]] = fold_constant(op)
else:
op = _expr.TupleWrapper(fold_constant(op.astuple()), len(op))
for k, i in zip(list(node_output), range(len(node_output))):
self._nodes[k] = op[i]
# now return the outputs
outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
# If requested, directly return the converted expressions.
if get_output_expr:
return outputs
## Maintain the order of inputs and parameters from the ONNX graph, but only include
## those parameters that are needed to execute the relay graph
free_vars = analysis.free_vars(outputs)
nodes = {v: k for k, v in self._nodes.items()}
free_vars = [nodes[var] for var in free_vars]
for i_name in self._params:
if i_name in free_vars and i_name not in self._inputs:
self._inputs[i_name] = self._nodes[i_name]
# Create a function from our output expression and all input variables.
func = _function.Function([v for k, v in self._inputs.items()], outputs)
return IRModule.from_expr(func), self._params
def _parse_value_proto(self, value_proto):
"""Parse ValueProto or raw str."""
try:
name = value_proto.name
except AttributeError:
name = value_proto
return name
def _parse_array(self, tensor_proto):
np_array = get_numpy(tensor_proto).reshape(tuple(tensor_proto.dims))
return _nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ["f", "i", "s", "g"]:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ["floats", "ints", "strings"]:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ["t"]:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ["tensors"]:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ["graphs"]:
if list(getattr(a, f)):
raise NotImplementedError("Field {} is not supported in relay.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs
def _convert_operator(self, op_name, inputs, attrs, opset):
"""Convert ONNX operator into a Relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
inputs : list of tvm.relay.function.Function
List of inputs.
attrs : dict
Dict of operator attributes
opset : int
Opset version
Returns
-------
sym : tvm.relay.function.Function
Converted relay function
"""
convert_map = _get_convert_map(opset)
if op_name in _identity_list:
sym = get_relay_op(op_name)(*inputs, **attrs)
elif op_name in convert_map:
sym = convert_map[op_name](inputs, attrs, self._params)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
return sym
def _fix_outputs(self, op_name, outputs):
"""A hack to handle dropout or similar operator that have more than one out
in ONNX.
"""
if op_name == "Dropout":
if len(outputs) == 1:
return outputs
# TODO(zhreshold): support dropout mask?
outputs = outputs[:-1]
return outputs
def from_onnx(model, shape=None, dtype="float32", opset=None, freeze_params=False):
"""Convert a ONNX model into an equivalent Relay Function.
ONNX graphs are represented as Python Protobuf objects.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
By default, ONNX defines models in terms of dynamic shapes. The ONNX importer
retains that dynamism upon import, and the compiler attempts to convert the
model into a static shapes at compile time. If this fails, there may still
be dynamic operations in the model. Not all TVM kernels currently support
dynamic shapes, please file an issue on discuss.tvm.apache.org
if you hit an error with dynamic kernels.
Parameters
----------
model : protobuf object
ONNX ModelProto after ONNX v1.1.0
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
opset : int, optional
Override to autodetected opset.
This can be helpful for some testing.
freeze_params: bool
If this parameter is true, the importer will take any provided
onnx input values (weights, shapes, etc) and embed them into the relay model
as Constants instead of variables. This allows more aggressive optimizations
at compile time and helps in making models static if certain inputs represent
attributes relay would traditionally consider compile-time constants.
Returns
-------
mod : tvm.IRModule
The relay module for compilation
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
try:
import onnx
if hasattr(onnx.checker, "check_model"):
# try use onnx's own model checker before converting any model
try:
onnx.checker.check_model(model)
except Exception as e: # pylint: disable=c-extension-no-member, broad-except
# the checker is a bit violent about errors, so simply print warnings here
warnings.warn(str(e))
except ImportError:
pass
g = GraphProto(shape, dtype, freeze_params)
graph = model.graph
if opset is None:
try:
opset = model.opset_import[0].version if model.opset_import else 1
except AttributeError:
opset = 1
# Use the graph proto as a scope so that ops can access other nodes if needed.
with g:
mod, params = g.from_onnx(graph, opset)
return mod, params
|
from aws_cdk import aws_ecr as ecr, core
class ECRStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.airflow_webserver_repo = ecr.Repository(
self,
"airflow_webserver_repo",
repository_name="airflow_webserver_cdk",
removal_policy=core.RemovalPolicy.DESTROY,
)
self.airflow_scheduler_repo = ecr.Repository(
self,
"airflow_scheduler_repo",
repository_name="airflow_scheduler_cdk",
removal_policy=core.RemovalPolicy.DESTROY,
)
self.airflow_worker_repo = ecr.Repository(
self,
"airflow_worker_repo",
repository_name="airflow_worker_cdk",
removal_policy=core.RemovalPolicy.DESTROY,
)
self.dbt_repo = ecr.Repository(
self,
"dbt_ecr_repository",
repository_name="dbt_cdk",
removal_policy=core.RemovalPolicy.DESTROY,
)
|
import { EventEmitter } from "events";
import { _makeEmitter } from "./emitter";
import { SubscriptionState } from "./subscription-state";
/**
* A subscription provides {@link Notification} of events.
*
* @remarks
* See {@link Subscriber} for details on establishing a subscription.
*
* @public
*/
export class Subscription {
/**
* Constructor.
* @param userAgent - User agent. See {@link UserAgent} for details.
* @internal
*/
constructor(userAgent, options = {}) {
this._disposed = false;
this._state = SubscriptionState.Initial;
this._stateEventEmitter = new EventEmitter();
this._logger = userAgent.getLogger("sip.Subscription");
this._userAgent = userAgent;
this.delegate = options.delegate;
}
/**
* Destructor.
*/
dispose() {
if (this._disposed) {
return Promise.resolve();
}
this._disposed = true;
this._stateEventEmitter.removeAllListeners();
return Promise.resolve();
}
/**
* The subscribed subscription dialog.
*/
get dialog() {
return this._dialog;
}
/**
* True if disposed.
* @internal
*/
get disposed() {
return this._disposed;
}
/**
* Subscription state. See {@link SubscriptionState} for details.
*/
get state() {
return this._state;
}
/**
* Emits when the subscription `state` property changes.
*/
get stateChange() {
return _makeEmitter(this._stateEventEmitter);
}
/** @internal */
stateTransition(newState) {
const invalidTransition = () => {
throw new Error(`Invalid state transition from ${this._state} to ${newState}`);
};
// Validate transition
switch (this._state) {
case SubscriptionState.Initial:
if (newState !== SubscriptionState.NotifyWait && newState !== SubscriptionState.Terminated) {
invalidTransition();
}
break;
case SubscriptionState.NotifyWait:
if (newState !== SubscriptionState.Subscribed && newState !== SubscriptionState.Terminated) {
invalidTransition();
}
break;
case SubscriptionState.Subscribed:
if (newState !== SubscriptionState.Terminated) {
invalidTransition();
}
break;
case SubscriptionState.Terminated:
invalidTransition();
break;
default:
throw new Error("Unrecognized state.");
}
// Guard against duplicate transition
if (this._state === newState) {
return;
}
// Transition
this._state = newState;
this._logger.log(`Subscription ${this._dialog ? this._dialog.id : undefined} transitioned to ${this._state}`);
this._stateEventEmitter.emit("event", this._state);
// Dispose
if (newState === SubscriptionState.Terminated) {
this.dispose();
}
}
}
|
// Leif Peterson and Eric Rissler 2016
#pragma once
#include "InteractInterface.generated.h"
/**
*
*/
UINTERFACE(Blueprintable)
class UInteractInterface : public UInterface
{
GENERATED_UINTERFACE_BODY()
};
class IInteractInterface
{ GENERATED_IINTERFACE_BODY()
public:
UFUNCTION(BlueprintNativeEvent, BlueprintCallable, Category = "Interact")
bool Interact();
};
|
define(['util', 'jquery', 'jquery.highlight'], function(util, $) {
$(document).ready(function () {
/*
* Codeblock copy to clipboard action
*/
$('.codeblock').mouseover(function(){
// WH-1806
var item = $('<span class="copyTooltip wh-tooltip-container" data-tooltip-position="left"/>');
if ( $(this).find('.copyTooltip').length == 0 ){
$(this).prepend(item);
$('.codeblock .copyTooltip').click(function(){
var txt = $(this).closest(".codeblock").text();
if(!txt || txt == ''){
return;
}
copyTextToClipboard(txt, $(this));
});
}
});
$('.codeblock').mouseleave(function(){
$(this).find('.copyTooltip').remove();
});
/**
* @description Copy the text to the clipboard
*/
function copyTextToClipboard(text, copyTooltipSpan) {
var textArea = document.createElement("textarea");
textArea.style.position = 'fixed';
textArea.value = text;
document.body.appendChild(textArea);
textArea.select();
try {
var successful = document.execCommand('copy');
// WH-1806
if (copyTooltipSpan.find('.wh-tooltip').length == 0) {
var tooltipContainer = $(
'<span>' +
' <span class="wh-tooltip"><p class="wh-tooltip-content">Copied to clipboard</p></span>' +
'</span>'
);
copyTooltipSpan.prepend(tooltipContainer);
copyTooltipSpan.mouseleave(function() {
tooltipContainer.remove();
});
setTimeout(function(){ tooltipContainer.remove();}, 3000);
}
} catch (err) {
// Unable to copy
if (copyTooltipSpan.find('.wh-tooltip').length == 0) {
var tooltipContainer = $(
'<span>' +
' <span class="wh-tooltip"><p class="wh-tooltip-content">Oops, unable to copy</p></span>' +
'</span>'
);
copyTooltipSpan.mouseleave(function() {
tooltipContainer.remove();
});
copyTooltipSpan.prepend(tooltipContainer);
setTimeout(function(){ tooltipContainer.remove(); }, 3000);
}
// WH-1806
//$('.copyTooltip').tooltip({title: 'Oops, unable to copy', trigger: "click"});
util.debug('Oops, unable to copy codeblock content!', err)
}
document.body.removeChild(textArea);
}
});
});
|
import argparse
import random
import time
from tqdm import trange
import hivemind
from hivemind.utils.threading import increase_file_limit
logger = hivemind.get_logger(__name__)
def random_endpoint() -> hivemind.Endpoint:
return f"{random.randint(0, 256)}.{random.randint(0, 256)}.{random.randint(0, 256)}." \
f"{random.randint(0, 256)}:{random.randint(0, 65535)}"
def benchmark_dht(num_peers: int, initial_peers: int, num_experts: int, expert_batch_size: int, random_seed: int,
wait_after_request: float, wait_before_read: float, wait_timeout: float, expiration: float):
random.seed(random_seed)
print("Creating peers...")
peers = []
for _ in trange(num_peers):
neighbors = [f'0.0.0.0:{node.port}' for node in random.sample(peers, min(initial_peers, len(peers)))]
peer = hivemind.DHT(initial_peers=neighbors, start=True, wait_timeout=wait_timeout,
expiration=expiration, listen_on=f'0.0.0.0:*')
peers.append(peer)
store_peer, get_peer = peers[-2:]
expert_uids = list(set(f"expert.{random.randint(0, 999)}.{random.randint(0, 999)}.{random.randint(0, 999)}"
for _ in range(num_experts)))
print(f"Sampled {len(expert_uids)} unique ids (after deduplication)")
random.shuffle(expert_uids)
print(f"Storing experts to dht in batches of {expert_batch_size}...")
successful_stores = total_stores = total_store_time = 0
benchmark_started = time.perf_counter()
endpoints = []
for start in trange(0, num_experts, expert_batch_size):
store_start = time.perf_counter()
endpoints.append(random_endpoint())
successes = store_peer.declare_experts(expert_uids[start: start + expert_batch_size], endpoints[-1]).values()
total_store_time += time.perf_counter() - store_start
total_stores += len(successes)
successful_stores += sum(successes)
time.sleep(wait_after_request)
print(f"Store success rate: {successful_stores / total_stores * 100:.1f}% ({successful_stores} / {total_stores})")
print(f"Mean store time: {total_store_time / total_stores:.5}, Total: {total_store_time:.5}")
time.sleep(wait_before_read)
if time.perf_counter() - benchmark_started > expiration:
logger.warning("All keys expired before benchmark started getting them. Consider increasing expiration_time")
successful_gets = total_get_time = 0
for start in trange(0, len(expert_uids), expert_batch_size):
get_start = time.perf_counter()
get_result = get_peer.get_experts(expert_uids[start: start + expert_batch_size])
total_get_time += time.perf_counter() - get_start
for i, expert in enumerate(get_result):
if expert is not None and expert.uid == expert_uids[start + i] \
and expert.endpoint == endpoints[start // expert_batch_size]:
successful_gets += 1
if time.perf_counter() - benchmark_started > expiration:
logger.warning("keys expired midway during get requests. If that isn't desired, increase expiration_time param")
print(f"Get success rate: {successful_gets / len(expert_uids) * 100:.1f} ({successful_gets} / {len(expert_uids)})")
print(f"Mean get time: {total_get_time / len(expert_uids):.5f}, Total: {total_get_time:.5f}")
alive_peers = [peer.is_alive() for peer in peers]
print(f"Node survival rate: {len(alive_peers) / len(peers) * 100:.3f}%")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--num_peers', type=int, default=32, required=False)
parser.add_argument('--initial_peers', type=int, default=1, required=False)
parser.add_argument('--num_experts', type=int, default=256, required=False)
parser.add_argument('--expert_batch_size', type=int, default=32, required=False)
parser.add_argument('--expiration', type=float, default=300, required=False)
parser.add_argument('--wait_after_request', type=float, default=0, required=False)
parser.add_argument('--wait_before_read', type=float, default=0, required=False)
parser.add_argument('--wait_timeout', type=float, default=5, required=False)
parser.add_argument('--random_seed', type=int, default=random.randint(1, 1000))
parser.add_argument('--increase_file_limit', action="store_true")
args = vars(parser.parse_args())
if args.pop('increase_file_limit', False):
increase_file_limit()
benchmark_dht(**args)
|
from . import datasets
from . import esperimenti
from . import grafici
from . import hello_world
from . import indici
from . import matematica
from . import metriche
from . import modello_lineare
from . import preprocessamento
from . import scraping
from . import selezione_variabili
from . import tests
from . import varieta
|
import React from 'react';
import { connect } from 'dva';
import { Layout } from 'antd';
import { Switch, routerRedux } from 'dva/router';
import NavBar from 'components/NavBar';
import { LeftSideBar, RightSideBar } from 'components/SideBar';
import TopBar from 'components/TopBar';
// import SkinToolbox from 'components/SkinToolbox';
import pathToRegexp from 'path-to-regexp';
import { enquireIsMobile } from '@/utils/enquireScreen';
import TabsLayout from './TabsLayout';
import './styles/basic.less';
import $$ from 'cmn-utils';
import cx from 'classnames';
const { Content, Header } = Layout;
/**
* 基本部局
* 可设置多种皮肤 theme: [light, grey, primary, info, warning, danger, alert, system, success, dark]
* 可设置多种布局 [header(固定头), sidebar(固定边栏), (固定面包蟹), tabLayout(标签布局)]
* @author weiq
*/
@connect(({ global }) => ({ global }))
export default class BasicLayout extends React.PureComponent {
constructor(props) {
super(props);
const user = $$.getStore('user', []);
const theme = $$.getStore('theme', {
leftSide: 'darkgrey', // 左边
navbar: 'grey' // 顶部
});
if (!theme.layout) {
theme.layout = [
'fixedHeader',
'fixedSidebar',
'fixeds'
// 'hideds',
// 'tabLayout',
];
}
this.state = {
collapsedLeftSide: false, // 左边栏开关控制
leftCollapsedWidth: 60, // 左边栏宽度
// expandTopBar: false, // 头部多功能区开合
showSidebarHeader: true, // 左边栏头部开关
collapsedRightSide: true, // 右边栏开关
theme, // 皮肤设置
user,
currentMenu: {},
isMobile: false
};
props.dispatch({
type: 'global/getMenu'
});
}
componentDidMount() {
this.unregisterEnquire = enquireIsMobile(ismobile => {
const { isMobile } = this.state;
if (isMobile !== ismobile) {
this.setState({
isMobile: ismobile
});
}
});
}
componentWillMount() {
// 检查有户是否登录
const user = $$.getStore('user');
if (!user) {
this.props.dispatch(routerRedux.replace('/sign/login'));
} else {
}
}
componentWillReceiveProps(nextProps) {
if (
nextProps.location.pathname !== this.props.location.pathname ||
nextProps.global.flatMenu !== this.props.global.flatMenu
) {
this.setState({
currentMenu: this.getCurrentMenu(nextProps) || {}
});
}
}
componentWillUnmount() {
// 清理监听
this.unregisterEnquire();
}
getCurrentMenu(props) {
const {
location: { pathname },
global
} = props || this.props;
const menu = this.getMeunMatchKeys(global.flatMenu, pathname)[0];
return menu;
}
getMeunMatchKeys = (flatMenu, path) => {
return flatMenu.filter(item => {
return pathToRegexp(item.path).test(path);
});
};
/**
* 顶部左侧菜单图标收缩控制
*/
onCollapseLeftSide = _ => {
const collapsedLeftSide =
this.state.leftCollapsedWidth === 0
? true
: !this.state.collapsedLeftSide;
const collapsedRightSide =
this.state.collapsedRightSide || !collapsedLeftSide;
this.setState({
collapsedLeftSide,
collapsedRightSide,
leftCollapsedWidth: 60
});
};
/**
* 完全关闭左边栏,即宽为0
*/
onCollapseLeftSideAll = _ => {
this.setState({
collapsedLeftSide: true,
leftCollapsedWidth: 0
});
};
/**
* 展开面包屑所在条中的多功能区
*/
// onExpandTopBar = _ => {
// this.setState({
// expandTopBar: true
// });
// };
/**
* 与上面相反
*/
// onCollapseTopBar = _ => {
// this.setState({
// expandTopBar: false
// });
// };
/**
* 切换左边栏中头部的开合
*/
toggleSidebarHeader = _ => {
this.setState({
showSidebarHeader: !this.state.showSidebarHeader
});
};
/**
* 切换右边栏
*/
toggleRightSide = _ => {
this.setState({
collapsedLeftSide: this.state.collapsedRightSide,
collapsedRightSide: !this.state.collapsedRightSide
});
};
onChangeTheme = theme => {
$$.setStore('theme', theme);
this.setState({
theme
});
};
render() {
const {
collapsedLeftSide,
leftCollapsedWidth,
// expandTopBar,
showSidebarHeader,
collapsedRightSide,
theme,
user,
currentMenu,
isMobile
} = this.state;
const { routerData, location, global } = this.props;
const { menu, flatMenu } = global;
const { childRoutes } = routerData;
const classnames = cx('basic-layout', 'full-layout', {
fixed: theme.layout && theme.layout.indexOf('fixedSidebar') !== -1,
'fixed-header':
theme.layout && theme.layout.indexOf('fixedHeader') !== -1,
'fixed-s':
theme.layout && theme.layout.indexOf('fixeds') !== -1,
'hided-s':
theme.layout && theme.layout.indexOf('hideds') !== -1
});
return (
<Layout className={classnames}>
<Header>
<NavBar
collapsed={collapsedLeftSide}
onCollapseLeftSide={this.onCollapseLeftSide}
onExpandTopBar={this.onExpandTopBar}
// toggleSidebarHeader={this.toggleSidebarHeader}
theme={theme.navbar}
user={user}
isMobile={isMobile}
/>
</Header>
<Layout>
<LeftSideBar
collapsed={collapsedLeftSide}
leftCollapsedWidth={leftCollapsedWidth}
showHeader={showSidebarHeader}
onCollapse={this.onCollapseLeftSide}
onCollapseAll={this.onCollapseLeftSideAll}
location={location}
theme={theme.leftSide}
flatMenu={flatMenu}
currentMenu={currentMenu}
menu={menu}
user={user}
isMobile={isMobile}
/>
<Content>
{theme.layout.indexOf('tabLayout') >= 0 ? (
<TabsLayout childRoutes={childRoutes} location={location} />
) : (
<Layout className="full-layout">
<Header>
<TopBar
// expand={expandTopBar}
// toggleRightSide={this.toggleRightSide}
// collapsedRightSide={collapsedRightSide}
// onCollapse={this.onCollapseTopBar}
currentMenu={currentMenu}
location={location}
theme={theme}
/>
</Header>
<Content className="router-page">
<Switch>{childRoutes}</Switch>
</Content>
</Layout>
)}
</Content>
<RightSideBar collapsed={collapsedRightSide} />
</Layout>
{/* <SkinToolbox onChangeTheme={this.onChangeTheme} theme={theme} /> */}
</Layout>
);
}
}
|
import unittest
from unittest.mock import Mock
from pyats.topology import Device
from pyats.topology import loader
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.junos.show_lacp import ShowLacpInterfacesInterface
""" TestCase for:
* show lacp interfaces {interface}
"""
class TestShowLacpInterfacesInterface(unittest.TestCase):
device = Device(name="aDevice")
maxDiff = None
empty_output = {"execute.return_value": ""}
golden_output = {
"execute.return_value": """
show lacp interfaces ae4
Aggregated interface: ae4
LACP state: Role Exp Def Dist Col Syn Aggr Timeout Activity
xe-3/0/1 Actor No No Yes Yes Yes Yes Fast Active
xe-3/0/1 Partner No No Yes Yes Yes Yes Fast Active
LACP protocol: Receive State Transmit State Mux State
xe-3/0/1 Current Fast periodic Collecting distributing
"""
}
golden_parsed_output = {
"lacp-interface-information-list": {
"lacp-interface-information": {
"lag-lacp-header": {"aggregate-name": "ae4"},
"lag-lacp-protocol": [
{
"lacp-mux-state": "Collecting distributing",
"lacp-receive-state": "Current",
"lacp-transmit-state": "Fast periodic",
"name": "xe-3/0/1",
}
],
"lag-lacp-state": [
{
"lacp-activity": "Active",
"lacp-aggregation": "Yes",
"lacp-collecting": "Yes",
"lacp-defaulted": "No",
"lacp-distributing": "Yes",
"lacp-expired": "No",
"lacp-role": "Actor",
"lacp-synchronization": "Yes",
"lacp-timeout": "Fast",
"name": "xe-3/0/1",
},
{
"lacp-activity": "Active",
"lacp-aggregation": "Yes",
"lacp-collecting": "Yes",
"lacp-defaulted": "No",
"lacp-distributing": "Yes",
"lacp-expired": "No",
"lacp-role": "Partner",
"lacp-synchronization": "Yes",
"lacp-timeout": "Fast",
"name": "xe-3/0/1",
},
],
}
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLacpInterfacesInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
obj.parse()
def test_golden_instance(self):
self.device = Mock(**self.golden_output)
obj = ShowLacpInterfacesInterface(device=self.device)
parsed_output = obj.parse(interface="ae4")
self.assertEqual(parsed_output, self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
|
module.exports = {
root: true,
parser: '@typescript-eslint/parser',
extends: [
'plugin:@typescript-eslint/recommended',
'prettier/@typescript-eslint',
'standard',
'prettier',
'prettier/standard',
],
parserOptions: {
ecmaFeatures: {
legacyDecorators: true,
},
},
plugins: ['@typescript-eslint', 'promise', 'prettier', 'standard'],
env: {
browser: true,
es6: true,
node: true,
jest: true,
},
rules: {
indent: ['error', 2],
'linebreak-style': ['error', 'unix'],
quotes: ['error', 'single'],
semi: ['error', 'always'],
},
};
|
import { main } from './example001';
import { readFileSync } from 'fs';
import { test } from 'ava';
test('Expected stl', async (t) => {
await main();
const produced = readFileSync('tmp/example001.html', { encoding: 'utf8' });
const expected = readFileSync('example001.html', { encoding: 'utf8' });
t.is(produced, expected);
});
|
import test from 'tape'
import React from 'react'
import { shallow } from 'enzyme'
import HomeBtn from '../../../src/client/components/home_button.js'
test('<HomeBtn />', t => {
const wrapper = shallow(<HomeBtn />)
t.equal(wrapper.contains(
<button className='w-75 h3 bg-white f_lato br-pill center db main_black_font'>BACK TO HOME</button>
), true)
t.end()
})
|
// @flow
import extend from 'lodash/extend';
import Binary from './Binary';
export default class Over extends Binary {
constructor(left, right = null) {
super(left, right);
const { default: AliasPredication } = require('../AliasPredication');
extend(this, AliasPredication);
}
get operator() {
return 'OVER';
}
}
|
/**
* サイトホーム
*/
import * as React from "react"
import { Link } from "gatsby"
import {
Text,
} from "@chakra-ui/react"
import Layout from "../components/layout"
const frontMatter = {
title: 'ホーム',
description: 'ウマ娘のツール'
}
const IndexPage = () => (
<Layout frontMatter={frontMatter}>
<Text fontSize="3xl" mt="6" textAlign="center">🥕🐴🥕🐴🥕</Text>
</Layout>
)
export default IndexPage
|
import Link from "next/link"
import { signIn, signOut, useSession } from "next-auth/react"
import styles from "./header.module.css"
import { text } from '../lib/data'
// The approach used in this component shows how to build a sign in and sign out
// component that works on pages which support both client and server side
// rendering, and avoids any flash incorrect content on initial page load.
export default function Header() {
const { data: session, status } = useSession()
const loading = status === "loading"
return (
<header>
<div className={styles.signedInStatus}>
<p
className={`nojs-show ${!session && loading ? styles.loading : styles.loaded
}`}
>
{!session && (
<>
<span className={styles.notSignedInText}>
{text.header.signInAs}
</span>
<a
href={`/api/auth/signin`}
className={styles.buttonPrimary}
onClick={(e) => {
e.preventDefault()
signIn()
}}
>
{text.header.signIn}
</a>
</>
)}
{session?.user && (
<>
<span className={styles.signedInText}>
<small>{text.header.signedInAs} <strong>{text.header.editor}</strong></small>
</span>
<a
href={`/api/auth/signout`}
className={styles.button}
onClick={(e) => {
e.preventDefault()
signOut()
}}
>
{text.header.signOut}
</a>
</>
)}
</p>
</div>
</header>
)
}
|
# Generated by Django 4.0.2 on 2022-02-08 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=42)),
('balance', models.IntegerField()),
],
),
]
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: otp.avatar.LocalAvatar
from pandac.PandaModules import *
from libotp import Nametag, WhisperPopup
from direct.gui.DirectGui import *
from direct.showbase.PythonUtil import *
from direct.interval.IntervalGlobal import *
from direct.showbase.InputStateGlobal import inputState
from pandac.PandaModules import *
import Avatar
from direct.controls import ControlManager
import DistributedAvatar
from direct.task import Task
import PositionExaminer
from otp.otpbase import OTPGlobals
from otp.otpbase import OTPRender
import math, string, random
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedSmoothNode
from direct.gui import DirectGuiGlobals
from otp.otpbase import OTPLocalizer
from direct.controls.GhostWalker import GhostWalker
from direct.controls.GravityWalker import GravityWalker
from direct.controls.ObserverWalker import ObserverWalker
from direct.controls.PhysicsWalker import PhysicsWalker
from direct.controls.SwimWalker import SwimWalker
from direct.controls.TwoDWalker import TwoDWalker
class LocalAvatar(DistributedAvatar.DistributedAvatar, DistributedSmoothNode.DistributedSmoothNode):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('LocalAvatar')
wantDevCameraPositions = base.config.GetBool('want-dev-camera-positions', 0)
wantMouse = base.config.GetBool('want-mouse', 0)
sleepTimeout = base.config.GetInt('sleep-timeout', 120)
swimTimeout = base.config.GetInt('afk-timeout', 600)
__enableMarkerPlacement = base.config.GetBool('place-markers', 0)
acceptingNewFriends = base.config.GetBool('accepting-new-friends', 1)
acceptingNonFriendWhispers = base.config.GetBool('accepting-non-friend-whispers', 0)
def __init__(self, cr, chatMgr, talkAssistant=None, passMessagesThrough=False):
try:
self.LocalAvatar_initialized
return
except:
pass
self.LocalAvatar_initialized = 1
DistributedAvatar.DistributedAvatar.__init__(self, cr)
DistributedSmoothNode.DistributedSmoothNode.__init__(self, cr)
self.cTrav = CollisionTraverser('base.cTrav')
base.pushCTrav(self.cTrav)
self.cTrav.setRespectPrevTransform(1)
self.avatarControlsEnabled = 0
self.controlManager = ControlManager.ControlManager(True, passMessagesThrough)
self.initializeCollisions()
self.initializeSmartCamera()
self.cameraPositions = []
self.animMultiplier = 1.0
self.runTimeout = 2.5
self.customMessages = []
self.chatMgr = chatMgr
base.talkAssistant = talkAssistant
self.commonChatFlags = 0
self.garbleChat = 1
self.teleportAllowed = 1
self.lockedDown = 0
self.isPageUp = 0
self.isPageDown = 0
self.soundRun = None
self.soundWalk = None
self.sleepFlag = 0
self.isDisguised = 0
self.movingFlag = 0
self.swimmingFlag = 0
self.lastNeedH = None
self.accept('friendOnline', self.__friendOnline)
self.accept('friendOffline', self.__friendOffline)
self.accept('clickedWhisper', self.clickedWhisper)
self.accept('playerOnline', self.__playerOnline)
self.accept('playerOffline', self.__playerOffline)
self.sleepCallback = None
self.accept('wakeup', self.wakeUp)
self.jumpLandAnimFixTask = None
self.fov = OTPGlobals.DefaultCameraFov
self.accept('avatarMoving', self.clearPageUpDown)
self.nametag2dNormalContents = Nametag.CSpeech
self.showNametag2d()
self.setPickable(0)
return
def useSwimControls(self):
self.controlManager.use('swim', self)
def useGhostControls(self):
self.controlManager.use('ghost', self)
def useWalkControls(self):
self.controlManager.use('walk', self)
def useTwoDControls(self):
self.controlManager.use('twoD', self)
def isLockedDown(self):
return self.lockedDown
def lock(self):
if self.lockedDown == 1:
self.notify.debug('lock() - already locked!')
self.lockedDown = 1
def unlock(self):
if self.lockedDown == 0:
self.notify.debug('unlock() - already unlocked!')
self.lockedDown = 0
def isInWater(self):
return self.getZ(render) <= 0.0
def isTeleportAllowed(self):
return self.teleportAllowed and not self.isDisguised
def setTeleportAllowed(self, flag):
self.teleportAllowed = flag
self.refreshOnscreenButtons()
def sendFriendsListEvent(self):
self.wakeUp()
messenger.send('openFriendsList')
def delete(self):
try:
self.LocalAvatar_deleted
return
except:
self.LocalAvatar_deleted = 1
else:
self.ignoreAll()
self.stopJumpLandTask()
taskMgr.remove('shadowReach')
base.popCTrav()
taskMgr.remove('posCamera')
self.disableAvatarControls()
self.stopTrackAnimToSpeed()
self.stopUpdateSmartCamera()
self.shutdownSmartCamera()
self.deleteCollisions()
self.controlManager.delete()
self.physControls = None
del self.controlManager
self.positionExaminer.delete()
del self.positionExaminer
taskMgr.remove(self.uniqueName('walkReturnTask'))
self.chatMgr.delete()
del self.chatMgr
del self.soundRun
del self.soundWalk
if hasattr(self, 'soundWhisper'):
del self.soundWhisper
DistributedAvatar.DistributedAvatar.delete(self)
return
def shadowReach(self, state):
if base.localAvatar.shadowPlacer:
base.localAvatar.shadowPlacer.lifter.setReach(base.localAvatar.getAirborneHeight() + 4.0)
return Task.cont
def wantLegacyLifter(self):
return False
def setupControls(self, avatarRadius=1.4, floorOffset=OTPGlobals.FloorOffset, reach=4.0, wallBitmask=OTPGlobals.WallBitmask, floorBitmask=OTPGlobals.FloorBitmask, ghostBitmask=OTPGlobals.GhostBitmask):
walkControls = GravityWalker(legacyLifter=self.wantLegacyLifter())
walkControls.setWallBitMask(wallBitmask)
walkControls.setFloorBitMask(floorBitmask)
walkControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
walkControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(walkControls, 'walk')
self.physControls = walkControls
twoDControls = TwoDWalker()
twoDControls.setWallBitMask(wallBitmask)
twoDControls.setFloorBitMask(floorBitmask)
twoDControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
twoDControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(twoDControls, 'twoD')
swimControls = SwimWalker()
swimControls.setWallBitMask(wallBitmask)
swimControls.setFloorBitMask(floorBitmask)
swimControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
swimControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(swimControls, 'swim')
ghostControls = GhostWalker()
ghostControls.setWallBitMask(ghostBitmask)
ghostControls.setFloorBitMask(floorBitmask)
ghostControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
ghostControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(ghostControls, 'ghost')
observerControls = ObserverWalker()
observerControls.setWallBitMask(ghostBitmask)
observerControls.setFloorBitMask(floorBitmask)
observerControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
observerControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(observerControls, 'observer')
self.controlManager.use('walk', self)
self.controlManager.disable()
def initializeCollisions(self):
self.setupControls()
def deleteCollisions(self):
self.controlManager.deleteCollisions()
self.ignore('entero157')
del self.cTrav
def initializeSmartCameraCollisions(self):
self.ccTrav = CollisionTraverser('LocalAvatar.ccTrav')
self.ccLine = CollisionSegment(0.0, 0.0, 0.0, 1.0, 0.0, 0.0)
self.ccLineNode = CollisionNode('ccLineNode')
self.ccLineNode.addSolid(self.ccLine)
self.ccLineNodePath = self.attachNewNode(self.ccLineNode)
self.ccLineBitMask = OTPGlobals.CameraBitmask
self.ccLineNode.setFromCollideMask(self.ccLineBitMask)
self.ccLineNode.setIntoCollideMask(BitMask32.allOff())
self.camCollisionQueue = CollisionHandlerQueue()
self.ccTrav.addCollider(self.ccLineNodePath, self.camCollisionQueue)
self.ccSphere = CollisionSphere(0, 0, 0, 1)
self.ccSphereNode = CollisionNode('ccSphereNode')
self.ccSphereNode.addSolid(self.ccSphere)
self.ccSphereNodePath = base.camera.attachNewNode(self.ccSphereNode)
self.ccSphereNode.setFromCollideMask(OTPGlobals.CameraBitmask)
self.ccSphereNode.setIntoCollideMask(BitMask32.allOff())
self.camPusher = CollisionHandlerPusher()
self.camPusher.addCollider(self.ccSphereNodePath, base.camera)
self.camPusher.setCenter(self)
self.ccPusherTrav = CollisionTraverser('LocalAvatar.ccPusherTrav')
self.ccSphere2 = self.ccSphere
self.ccSphereNode2 = CollisionNode('ccSphereNode2')
self.ccSphereNode2.addSolid(self.ccSphere2)
self.ccSphereNodePath2 = base.camera.attachNewNode(self.ccSphereNode2)
self.ccSphereNode2.setFromCollideMask(OTPGlobals.CameraBitmask)
self.ccSphereNode2.setIntoCollideMask(BitMask32.allOff())
self.camPusher2 = CollisionHandlerPusher()
self.ccPusherTrav.addCollider(self.ccSphereNodePath2, self.camPusher2)
self.camPusher2.addCollider(self.ccSphereNodePath2, base.camera)
self.camPusher2.setCenter(self)
self.camFloorRayNode = self.attachNewNode('camFloorRayNode')
self.ccRay = CollisionRay(0.0, 0.0, 0.0, 0.0, 0.0, -1.0)
self.ccRayNode = CollisionNode('ccRayNode')
self.ccRayNode.addSolid(self.ccRay)
self.ccRayNodePath = self.camFloorRayNode.attachNewNode(self.ccRayNode)
self.ccRayBitMask = OTPGlobals.FloorBitmask
self.ccRayNode.setFromCollideMask(self.ccRayBitMask)
self.ccRayNode.setIntoCollideMask(BitMask32.allOff())
self.ccTravFloor = CollisionTraverser('LocalAvatar.ccTravFloor')
self.camFloorCollisionQueue = CollisionHandlerQueue()
self.ccTravFloor.addCollider(self.ccRayNodePath, self.camFloorCollisionQueue)
self.ccTravOnFloor = CollisionTraverser('LocalAvatar.ccTravOnFloor')
self.ccRay2 = CollisionRay(0.0, 0.0, 0.0, 0.0, 0.0, -1.0)
self.ccRay2Node = CollisionNode('ccRay2Node')
self.ccRay2Node.addSolid(self.ccRay2)
self.ccRay2NodePath = self.camFloorRayNode.attachNewNode(self.ccRay2Node)
self.ccRay2BitMask = OTPGlobals.FloorBitmask
self.ccRay2Node.setFromCollideMask(self.ccRay2BitMask)
self.ccRay2Node.setIntoCollideMask(BitMask32.allOff())
self.ccRay2MoveNodePath = hidden.attachNewNode('ccRay2MoveNode')
self.camFloorCollisionBroadcaster = CollisionHandlerFloor()
self.camFloorCollisionBroadcaster.setInPattern('on-floor')
self.camFloorCollisionBroadcaster.setOutPattern('off-floor')
self.camFloorCollisionBroadcaster.addCollider(self.ccRay2NodePath, self.ccRay2MoveNodePath)
def deleteSmartCameraCollisions(self):
del self.ccTrav
del self.ccLine
del self.ccLineNode
self.ccLineNodePath.removeNode()
del self.ccLineNodePath
del self.camCollisionQueue
del self.ccRay
del self.ccRayNode
self.ccRayNodePath.removeNode()
del self.ccRayNodePath
del self.ccRay2
del self.ccRay2Node
self.ccRay2NodePath.removeNode()
del self.ccRay2NodePath
self.ccRay2MoveNodePath.removeNode()
del self.ccRay2MoveNodePath
del self.ccTravOnFloor
del self.ccTravFloor
del self.camFloorCollisionQueue
del self.camFloorCollisionBroadcaster
del self.ccSphere
del self.ccSphereNode
self.ccSphereNodePath.removeNode()
del self.ccSphereNodePath
del self.camPusher
del self.ccPusherTrav
del self.ccSphere2
del self.ccSphereNode2
self.ccSphereNodePath2.removeNode()
del self.ccSphereNodePath2
del self.camPusher2
def collisionsOff(self):
self.controlManager.collisionsOff()
def collisionsOn(self):
self.controlManager.collisionsOn()
def recalcCameraSphere(self):
nearPlaneDist = base.camLens.getNear()
hFov = base.camLens.getHfov()
vFov = base.camLens.getVfov()
hOff = nearPlaneDist * math.tan(deg2Rad(hFov / 2.0))
vOff = nearPlaneDist * math.tan(deg2Rad(vFov / 2.0))
camPnts = [
Point3(hOff, nearPlaneDist, vOff), Point3(-hOff, nearPlaneDist, vOff), Point3(hOff, nearPlaneDist, -vOff), Point3(-hOff, nearPlaneDist, -vOff), Point3(0.0, 0.0, 0.0)]
avgPnt = Point3(0.0, 0.0, 0.0)
for camPnt in camPnts:
avgPnt = avgPnt + camPnt
avgPnt = avgPnt / len(camPnts)
sphereRadius = 0.0
for camPnt in camPnts:
dist = Vec3(camPnt - avgPnt).length()
if dist > sphereRadius:
sphereRadius = dist
avgPnt = Point3(avgPnt)
self.ccSphereNodePath.setPos(avgPnt)
self.ccSphereNodePath2.setPos(avgPnt)
self.ccSphere.setRadius(sphereRadius)
def putCameraFloorRayOnAvatar(self):
self.camFloorRayNode.setPos(self, 0, 0, 5)
def putCameraFloorRayOnCamera(self):
self.camFloorRayNode.setPos(self.ccSphereNodePath, 0, 0, 0)
def attachCamera(self):
camera.reparentTo(self)
base.enableMouse()
base.setMouseOnNode(self.node())
self.ignoreMouse = not self.wantMouse
self.setWalkSpeedNormal()
def detachCamera(self):
base.disableMouse()
def stopJumpLandTask(self):
if self.jumpLandAnimFixTask:
self.jumpLandAnimFixTask.remove()
self.jumpLandAnimFixTask = None
return
def jumpStart(self):
if not self.sleepFlag and self.hp > 0:
self.b_setAnimState('jumpAirborne', 1.0)
self.stopJumpLandTask()
def returnToWalk(self, task):
if self.sleepFlag:
state = 'Sleep'
else:
if self.hp > 0:
state = 'Happy'
else:
state = 'Sad'
self.b_setAnimState(state, 1.0)
return Task.done
def jumpLandAnimFix(self, jumpTime):
if self.playingAnim != 'run' and self.playingAnim != 'walk':
return taskMgr.doMethodLater(jumpTime, self.returnToWalk, self.uniqueName('walkReturnTask'))
def jumpHardLand(self):
if self.allowHardLand():
self.b_setAnimState('jumpLand', 1.0)
self.stopJumpLandTask()
self.jumpLandAnimFixTask = self.jumpLandAnimFix(1.0)
if self.d_broadcastPosHpr:
self.d_broadcastPosHpr()
def jumpLand(self):
self.jumpLandAnimFixTask = self.jumpLandAnimFix(0.01)
if self.d_broadcastPosHpr:
self.d_broadcastPosHpr()
def setupAnimationEvents(self):
self.accept('jumpStart', self.jumpStart, [])
self.accept('jumpHardLand', self.jumpHardLand, [])
self.accept('jumpLand', self.jumpLand, [])
def ignoreAnimationEvents(self):
self.ignore('jumpStart')
self.ignore('jumpHardLand')
self.ignore('jumpLand')
def allowHardLand(self):
return not self.sleepFlag and self.hp > 0
def enableSmartCameraViews(self):
self.accept('tab', self.nextCameraPos, [1])
self.accept('shift-tab', self.nextCameraPos, [0])
self.accept('page_up', self.pageUp)
self.accept('page_down', self.pageDown)
def disableSmartCameraViews(self):
self.ignore('tab')
self.ignore('shift-tab')
self.ignore('page_up')
self.ignore('page_down')
self.ignore('page_down-up')
def enableAvatarControls(self):
if self.avatarControlsEnabled:
return
self.avatarControlsEnabled = 1
self.setupAnimationEvents()
self.controlManager.enable()
def disableAvatarControls(self):
if not self.avatarControlsEnabled:
return
self.avatarControlsEnabled = 0
self.ignoreAnimationEvents()
self.controlManager.disable()
self.clearPageUpDown()
def setWalkSpeedNormal(self):
self.controlManager.setSpeeds(OTPGlobals.ToonForwardSpeed, OTPGlobals.ToonJumpForce, OTPGlobals.ToonReverseSpeed, OTPGlobals.ToonRotateSpeed)
def setWalkSpeedSlow(self):
self.controlManager.setSpeeds(OTPGlobals.ToonForwardSlowSpeed, OTPGlobals.ToonJumpSlowForce, OTPGlobals.ToonReverseSlowSpeed, OTPGlobals.ToonRotateSlowSpeed)
def pageUp(self):
if not self.avatarControlsEnabled:
return
self.wakeUp()
if not self.isPageUp:
self.isPageDown = 0
self.isPageUp = 1
self.lerpCameraFov(70, 0.6)
self.setCameraPositionByIndex(self.cameraIndex)
else:
self.clearPageUpDown()
def pageDown(self):
if not self.avatarControlsEnabled:
return
self.wakeUp()
if not self.isPageDown:
self.isPageUp = 0
self.isPageDown = 1
self.lerpCameraFov(70, 0.6)
self.setCameraPositionByIndex(self.cameraIndex)
else:
self.clearPageUpDown()
def clearPageUpDown(self):
if self.isPageDown or self.isPageUp:
self.lerpCameraFov(self.fov, 0.6)
self.isPageDown = 0
self.isPageUp = 0
self.setCameraPositionByIndex(self.cameraIndex)
def nextCameraPos(self, forward):
if not self.avatarControlsEnabled:
return
self.wakeUp()
self.__cameraHasBeenMoved = 1
if forward:
self.cameraIndex += 1
if self.cameraIndex > len(self.cameraPositions) - 1:
self.cameraIndex = 0
else:
self.cameraIndex -= 1
if self.cameraIndex < 0:
self.cameraIndex = len(self.cameraPositions) - 1
self.setCameraPositionByIndex(self.cameraIndex)
def initCameraPositions(self):
camHeight = self.getClampedAvatarHeight()
heightScaleFactor = camHeight * 0.3333333333
defLookAt = Point3(0.0, 1.5, camHeight)
scXoffset = 3.0
scPosition = (
Point3(scXoffset - 1, -10.0, camHeight + 5.0), Point3(scXoffset, 2.0, camHeight))
self.cameraPositions = [
(
Point3(0.0, -9.0 * heightScaleFactor, camHeight), defLookAt, Point3(0.0, camHeight, camHeight * 4.0), Point3(0.0, camHeight, camHeight * -1.0), 0), (Point3(0.0, 0.5, camHeight), defLookAt, Point3(0.0, camHeight, camHeight * 1.33), Point3(0.0, camHeight, camHeight * 0.66), 1),
(
Point3(5.7 * heightScaleFactor, 7.65 * heightScaleFactor, camHeight + 2.0), Point3(0.0, 1.0, camHeight), Point3(0.0, 1.0, camHeight * 4.0), Point3(0.0, 1.0, camHeight * -1.0), 0), (Point3(0.0, -24.0 * heightScaleFactor, camHeight + 4.0), defLookAt, Point3(0.0, 1.5, camHeight * 4.0), Point3(0.0, 1.5, camHeight * -1.0), 0), (Point3(0.0, -12.0 * heightScaleFactor, camHeight + 4.0), defLookAt, Point3(0.0, 1.5, camHeight * 4.0), Point3(0.0, 1.5, camHeight * -1.0), 0)] + self.auxCameraPositions
if self.wantDevCameraPositions:
self.cameraPositions += [(Point3(0.0, 0.0, camHeight * 3), Point3(0.0, 0.0, 0.0), Point3(0.0, camHeight * 2, 0.0), Point3(0.0, -camHeight * 2, 0.0), 1), (Point3(camHeight * 3, 0.0, camHeight), Point3(0.0, 0.0, camHeight), Point3(0.0, camHeight, camHeight * 1.1), Point3(0.0, camHeight, camHeight * 0.9), 1), (Point3(camHeight * 3, 0.0, 0.0), Point3(0.0, 0.0, camHeight), Point3(0.0, camHeight, camHeight * 1.1), Point3(0.0, camHeight, camHeight * 0.9), 1), (Point3(-camHeight * 3, 0.0, camHeight), Point3(0.0, 0.0, camHeight), Point3(0.0, camHeight, camHeight * 1.1), Point3(0.0, camHeight, camHeight * 0.9), 1), (Point3(0.0, -60, 60), defLookAt + Point3(0, 15, 0), defLookAt + Point3(0, 15, 0), defLookAt + Point3(0, 15, 0), 1), (Point3(0.0, -20, 20), defLookAt + Point3(0, 5, 0), defLookAt + Point3(0, 5, 0), defLookAt + Point3(0, 5, 0), 1)]
def addCameraPosition(self, camPos=None):
if camPos == None:
lookAtNP = self.attachNewNode('lookAt')
lookAtNP.setPos(base.cam, 0, 1, 0)
lookAtPos = lookAtNP.getPos()
camHeight = self.getClampedAvatarHeight()
camPos = (base.cam.getPos(self), lookAtPos, Point3(0.0, 1.5, camHeight * 4.0), Point3(0.0, 1.5, camHeight * -1.0), 1)
lookAtNP.removeNode()
self.auxCameraPositions.append(camPos)
self.cameraPositions.append(camPos)
return
def resetCameraPosition(self):
self.cameraIndex = 0
self.setCameraPositionByIndex(self.cameraIndex)
def removeCameraPosition(self):
if len(self.cameraPositions) > 1:
camPos = self.cameraPositions[self.cameraIndex]
if camPos in self.auxCameraPositions:
self.auxCameraPositions.remove(camPos)
if camPos in self.cameraPositions:
self.cameraPositions.remove(camPos)
self.nextCameraPos(1)
def printCameraPositions(self):
print '['
for i in range(len(self.cameraPositions)):
self.printCameraPosition(i)
print ','
print ']'
def printCameraPosition(self, index):
cp = self.cameraPositions[index]
print '(Point3(%0.2f, %0.2f, %0.2f),' % (cp[0][0], cp[0][1], cp[0][2])
print 'Point3(%0.2f, %0.2f, %0.2f),' % (cp[1][0], cp[1][1], cp[1][2])
print 'Point3(%0.2f, %0.2f, %0.2f),' % (cp[2][0], cp[2][1], cp[2][2])
print 'Point3(%0.2f, %0.2f, %0.2f),' % (cp[3][0], cp[3][1], cp[3][2])
print '%d,' % cp[4]
print ')',
def posCamera(self, lerp, time):
if not lerp:
self.positionCameraWithPusher(self.getCompromiseCameraPos(), self.getLookAtPoint())
else:
camPos = self.getCompromiseCameraPos()
savePos = camera.getPos()
saveHpr = camera.getHpr()
self.positionCameraWithPusher(camPos, self.getLookAtPoint())
x = camPos[0]
y = camPos[1]
z = camPos[2]
destHpr = camera.getHpr()
h = destHpr[0]
p = destHpr[1]
r = destHpr[2]
camera.setPos(savePos)
camera.setHpr(saveHpr)
taskMgr.remove('posCamera')
camera.lerpPosHpr(x, y, z, h, p, r, time, task='posCamera')
def getClampedAvatarHeight(self):
return max(self.getHeight(), 3.0)
def getVisibilityPoint(self):
return Point3(0.0, 0.0, self.getHeight())
def setLookAtPoint(self, la):
self.__curLookAt = Point3(la)
def getLookAtPoint(self):
return Point3(self.__curLookAt)
def setIdealCameraPos(self, pos):
self.__idealCameraPos = Point3(pos)
self.updateSmartCameraCollisionLineSegment()
def getIdealCameraPos(self):
return Point3(self.__idealCameraPos)
def setCameraPositionByIndex(self, index):
self.notify.debug('switching to camera position %s' % index)
self.setCameraSettings(self.cameraPositions[index])
def setCameraPosForPetInteraction(self):
height = self.getClampedAvatarHeight()
point = Point3(height * (7 / 3.0), height * (-7 / 3.0), height)
self.prevIdealPos = self.getIdealCameraPos()
self.setIdealCameraPos(point)
self.posCamera(1, 0.7)
def unsetCameraPosForPetInteraction(self):
self.setIdealCameraPos(self.prevIdealPos)
del self.prevIdealPos
self.posCamera(1, 0.7)
def setCameraSettings(self, camSettings):
self.setIdealCameraPos(camSettings[0])
if self.isPageUp and self.isPageDown or not self.isPageUp and not self.isPageDown:
self.__cameraHasBeenMoved = 1
self.setLookAtPoint(camSettings[1])
else:
if self.isPageUp:
self.__cameraHasBeenMoved = 1
self.setLookAtPoint(camSettings[2])
else:
if self.isPageDown:
self.__cameraHasBeenMoved = 1
self.setLookAtPoint(camSettings[3])
else:
self.notify.error('This case should be impossible.')
self.__disableSmartCam = camSettings[4]
if self.__disableSmartCam:
self.putCameraFloorRayOnAvatar()
self.cameraZOffset = 0.0
def getCompromiseCameraPos(self):
if self.__idealCameraObstructed == 0:
compromisePos = self.getIdealCameraPos()
else:
visPnt = self.getVisibilityPoint()
idealPos = self.getIdealCameraPos()
distance = Vec3(idealPos - visPnt).length()
ratio = self.closestObstructionDistance / distance
compromisePos = idealPos * ratio + visPnt * (1 - ratio)
liftMult = 1.0 - ratio * ratio
compromisePos = Point3(compromisePos[0], compromisePos[1], compromisePos[2] + self.getHeight() * 0.4 * liftMult)
compromisePos.setZ(compromisePos[2] + self.cameraZOffset)
return compromisePos
def updateSmartCameraCollisionLineSegment(self):
pointB = self.getIdealCameraPos()
pointA = self.getVisibilityPoint()
vectorAB = Vec3(pointB - pointA)
lengthAB = vectorAB.length()
if lengthAB > 0.001:
self.ccLine.setPointA(pointA)
self.ccLine.setPointB(pointB)
def initializeSmartCamera(self):
self.__idealCameraObstructed = 0
self.closestObstructionDistance = 0.0
self.cameraIndex = 0
self.auxCameraPositions = []
self.cameraZOffset = 0.0
self.__onLevelGround = 0
self.__camCollCanMove = 0
self.__geom = render
self.__disableSmartCam = 0
self.initializeSmartCameraCollisions()
self._smartCamEnabled = False
def shutdownSmartCamera(self):
self.deleteSmartCameraCollisions()
def setOnLevelGround(self, flag):
self.__onLevelGround = flag
def setCameraCollisionsCanMove(self, flag):
self.__camCollCanMove = flag
def setGeom(self, geom):
self.__geom = geom
def startUpdateSmartCamera(self, push=1):
if self._smartCamEnabled:
LocalAvatar.notify.warning('redundant call to startUpdateSmartCamera')
return
self._smartCamEnabled = True
self.__floorDetected = 0
self.__cameraHasBeenMoved = 0
self.recalcCameraSphere()
self.initCameraPositions()
self.setCameraPositionByIndex(self.cameraIndex)
self.posCamera(0, 0.0)
self.__instantaneousCamPos = camera.getPos()
if push:
self.cTrav.addCollider(self.ccSphereNodePath, self.camPusher)
self.ccTravOnFloor.addCollider(self.ccRay2NodePath, self.camFloorCollisionBroadcaster)
self.__disableSmartCam = 0
else:
self.__disableSmartCam = 1
self.__lastPosWrtRender = camera.getPos(render)
self.__lastHprWrtRender = camera.getHpr(render)
taskName = self.taskName('updateSmartCamera')
taskMgr.remove(taskName)
taskMgr.add(self.updateSmartCamera, taskName, priority=47)
self.enableSmartCameraViews()
def stopUpdateSmartCamera(self):
if not self._smartCamEnabled:
LocalAvatar.notify.warning('redundant call to stopUpdateSmartCamera')
return
self.disableSmartCameraViews()
self.cTrav.removeCollider(self.ccSphereNodePath)
self.ccTravOnFloor.removeCollider(self.ccRay2NodePath)
if not base.localAvatar.isEmpty():
self.putCameraFloorRayOnAvatar()
taskName = self.taskName('updateSmartCamera')
taskMgr.remove(taskName)
self._smartCamEnabled = False
def updateSmartCamera(self, task):
if not self.__camCollCanMove:
if not self.__cameraHasBeenMoved:
if self.__lastPosWrtRender == camera.getPos(render):
return (self.__lastHprWrtRender == camera.getHpr(render) and Task).cont
self.__cameraHasBeenMoved = 0
self.__lastPosWrtRender = camera.getPos(render)
self.__lastHprWrtRender = camera.getHpr(render)
self.__idealCameraObstructed = 0
if not self.__disableSmartCam:
self.ccTrav.traverse(self.__geom)
if self.camCollisionQueue.getNumEntries() > 0:
self.camCollisionQueue.sortEntries()
self.handleCameraObstruction(self.camCollisionQueue.getEntry(0))
if not self.__onLevelGround:
self.handleCameraFloorInteraction()
if not self.__idealCameraObstructed:
self.nudgeCamera()
if not self.__disableSmartCam:
self.ccPusherTrav.traverse(self.__geom)
self.putCameraFloorRayOnCamera()
self.ccTravOnFloor.traverse(self.__geom)
return Task.cont
def positionCameraWithPusher(self, pos, lookAt):
camera.setPos(pos)
self.ccPusherTrav.traverse(self.__geom)
camera.lookAt(lookAt)
def nudgeCamera(self):
CLOSE_ENOUGH = 0.1
curCamPos = self.__instantaneousCamPos
curCamHpr = camera.getHpr()
targetCamPos = self.getCompromiseCameraPos()
targetCamLookAt = self.getLookAtPoint()
posDone = 0
if Vec3(curCamPos - targetCamPos).length() <= CLOSE_ENOUGH:
camera.setPos(targetCamPos)
posDone = 1
camera.setPos(targetCamPos)
camera.lookAt(targetCamLookAt)
targetCamHpr = camera.getHpr()
hprDone = 0
if Vec3(curCamHpr - targetCamHpr).length() <= CLOSE_ENOUGH:
hprDone = 1
if posDone:
if hprDone:
return
lerpRatio = 0.15
lerpRatio = 1 - pow(1 - lerpRatio, globalClock.getDt() * 30.0)
self.__instantaneousCamPos = targetCamPos * lerpRatio + curCamPos * (1 - lerpRatio)
newHpr = (self.__disableSmartCam or not self.__idealCameraObstructed) and targetCamHpr * lerpRatio + curCamHpr * (1 - lerpRatio)
else:
newHpr = targetCamHpr
camera.setPos(self.__instantaneousCamPos)
camera.setHpr(newHpr)
def popCameraToDest(self):
newCamPos = self.getCompromiseCameraPos()
newCamLookAt = self.getLookAtPoint()
self.positionCameraWithPusher(newCamPos, newCamLookAt)
self.__instantaneousCamPos = camera.getPos()
def handleCameraObstruction(self, camObstrCollisionEntry):
collisionPoint = camObstrCollisionEntry.getSurfacePoint(self.ccLineNodePath)
collisionVec = Vec3(collisionPoint - self.ccLine.getPointA())
distance = collisionVec.length()
self.__idealCameraObstructed = 1
self.closestObstructionDistance = distance
self.popCameraToDest()
def handleCameraFloorInteraction(self):
self.putCameraFloorRayOnCamera()
self.ccTravFloor.traverse(self.__geom)
if self.__onLevelGround:
return
if self.camFloorCollisionQueue.getNumEntries() == 0:
return
self.camFloorCollisionQueue.sortEntries()
camObstrCollisionEntry = self.camFloorCollisionQueue.getEntry(0)
camHeightFromFloor = camObstrCollisionEntry.getSurfacePoint(self.ccRayNodePath)[2]
self.cameraZOffset = camera.getPos()[2] + camHeightFromFloor
if self.cameraZOffset < 0:
self.cameraZOffset = 0
if self.__floorDetected == 0:
self.__floorDetected = 1
self.popCameraToDest()
def lerpCameraFov(self, fov, time):
taskMgr.remove('cam-fov-lerp-play')
oldFov = base.camLens.getHfov()
if abs(fov - oldFov) > 0.1:
def setCamFov(fov):
base.camLens.setFov(fov)
self.camLerpInterval = LerpFunctionInterval(setCamFov, fromData=oldFov, toData=fov, duration=time, name='cam-fov-lerp')
self.camLerpInterval.start()
def setCameraFov(self, fov):
self.fov = fov
if not (self.isPageDown or self.isPageUp):
base.camLens.setFov(self.fov)
def gotoNode(self, node, eyeHeight=3):
possiblePoints = (
Point3(3, 6, 0), Point3(-3, 6, 0), Point3(6, 6, 0), Point3(-6, 6, 0), Point3(3, 9, 0), Point3(-3, 9, 0), Point3(6, 9, 0), Point3(-6, 9, 0), Point3(9, 9, 0), Point3(-9, 9, 0), Point3(6, 0, 0), Point3(-6, 0, 0), Point3(6, 3, 0), Point3(-6, 3, 0), Point3(9, 9, 0), Point3(-9, 9, 0), Point3(0, 12, 0), Point3(3, 12, 0), Point3(-3, 12, 0), Point3(6, 12, 0), Point3(-6, 12, 0), Point3(9, 12, 0), Point3(-9, 12, 0), Point3(0, -6, 0), Point3(-3, -6, 0), Point3(0, -9, 0), Point3(-6, -9, 0))
for point in possiblePoints:
pos = self.positionExaminer.consider(node, point, eyeHeight)
if pos:
self.setPos(node, pos)
self.lookAt(node)
self.setHpr(self.getH() + random.choice((-10, 10)), 0, 0)
return
self.setPos(node, 0, 0, 0)
def setCustomMessages(self, customMessages):
self.customMessages = customMessages
messenger.send('customMessagesChanged')
def displayWhisper(self, fromId, chatString, whisperType):
sender = None
sfx = self.soundWhisper
if whisperType == WhisperPopup.WTNormal or whisperType == WhisperPopup.WTQuickTalker:
if sender == None:
return
chatString = sender.getName() + ': ' + chatString
whisper = WhisperPopup(chatString, OTPGlobals.getInterfaceFont(), whisperType)
if sender != None:
whisper.setClickable(sender.getName(), fromId)
whisper.manage(base.marginManager)
base.playSfx(sfx)
return
def displayWhisperPlayer(self, fromId, chatString, whisperType):
sender = None
playerInfo = None
sfx = self.soundWhisper
playerInfo = base.cr.playerFriendsManager.playerId2Info.get(fromId, None)
if playerInfo == None:
return
senderName = playerInfo.playerName
if whisperType == WhisperPopup.WTNormal or whisperType == WhisperPopup.WTQuickTalker:
chatString = senderName + ': ' + chatString
whisper = WhisperPopup(chatString, OTPGlobals.getInterfaceFont(), whisperType)
if sender != None:
whisper.setClickable(senderName, fromId)
whisper.manage(base.marginManager)
base.playSfx(sfx)
return
def setAnimMultiplier(self, value):
self.animMultiplier = value
def getAnimMultiplier(self):
return self.animMultiplier
def enableRun(self):
self.accept('arrow_up', self.startRunWatch)
self.accept('arrow_up-up', self.stopRunWatch)
self.accept('control-arrow_up', self.startRunWatch)
self.accept('control-arrow_up-up', self.stopRunWatch)
self.accept('alt-arrow_up', self.startRunWatch)
self.accept('alt-arrow_up-up', self.stopRunWatch)
self.accept('shift-arrow_up', self.startRunWatch)
self.accept('shift-arrow_up-up', self.stopRunWatch)
def disableRun(self):
self.ignore('arrow_up')
self.ignore('arrow_up-up')
self.ignore('control-arrow_up')
self.ignore('control-arrow_up-up')
self.ignore('alt-arrow_up')
self.ignore('alt-arrow_up-up')
self.ignore('shift-arrow_up')
self.ignore('shift-arrow_up-up')
def startRunWatch(self):
def setRun(ignored):
messenger.send('running-on')
taskMgr.doMethodLater(self.runTimeout, setRun, self.uniqueName('runWatch'))
return Task.cont
def stopRunWatch(self):
taskMgr.remove(self.uniqueName('runWatch'))
messenger.send('running-off')
return Task.cont
def runSound(self):
self.soundWalk.stop()
base.playSfx(self.soundRun, looping=1)
def walkSound(self):
self.soundRun.stop()
base.playSfx(self.soundWalk, looping=1)
def stopSound(self):
self.soundRun.stop()
self.soundWalk.stop()
def wakeUp(self):
if self.sleepCallback != None:
taskMgr.remove(self.uniqueName('sleepwatch'))
self.startSleepWatch(self.sleepCallback)
self.lastMoved = globalClock.getFrameTime()
if self.sleepFlag:
self.sleepFlag = 0
return
def gotoSleep(self):
if not self.sleepFlag:
self.b_setAnimState('Sleep', self.animMultiplier)
self.sleepFlag = 1
def forceGotoSleep(self):
if self.hp > 0:
self.sleepFlag = 0
self.gotoSleep()
def startSleepWatch(self, callback):
self.sleepCallback = callback
taskMgr.doMethodLater(self.sleepTimeout, callback, self.uniqueName('sleepwatch'))
def stopSleepWatch(self):
taskMgr.remove(self.uniqueName('sleepwatch'))
self.sleepCallback = None
return
def startSleepSwimTest(self):
taskName = self.taskName('sleepSwimTest')
taskMgr.remove(taskName)
task = Task.Task(self.sleepSwimTest)
self.lastMoved = globalClock.getFrameTime()
self.lastState = None
self.lastAction = None
self.sleepSwimTest(task)
taskMgr.add(self.sleepSwimTest, taskName, 35)
return
def stopSleepSwimTest(self):
taskName = self.taskName('sleepSwimTest')
taskMgr.remove(taskName)
self.stopSound()
def sleepSwimTest(self, task):
now = globalClock.getFrameTime()
speed, rotSpeed, slideSpeed = self.controlManager.getSpeeds()
if speed != 0.0 or rotSpeed != 0.0 or inputState.isSet('jump'):
if not self.swimmingFlag:
self.swimmingFlag = 1
else:
if self.swimmingFlag:
self.swimmingFlag = 0
if self.swimmingFlag or self.hp <= 0:
self.wakeUp()
else:
if not self.sleepFlag:
now = globalClock.getFrameTime()
if now - self.lastMoved > self.swimTimeout:
self.swimTimeoutAction()
return Task.done
return Task.cont
def swimTimeoutAction(self):
pass
def trackAnimToSpeed(self, task):
speed, rotSpeed, slideSpeed = self.controlManager.getSpeeds()
if speed != 0.0 or rotSpeed != 0.0 or inputState.isSet('jump'):
if not self.movingFlag:
self.movingFlag = 1
self.stopLookAround()
else:
if self.movingFlag:
self.movingFlag = 0
self.startLookAround()
if self.movingFlag or self.hp <= 0:
self.wakeUp()
else:
if not self.sleepFlag:
now = globalClock.getFrameTime()
if now - self.lastMoved > self.sleepTimeout:
self.gotoSleep()
state = None
if self.sleepFlag:
state = 'Sleep'
else:
if self.hp > 0:
state = 'Happy'
else:
state = 'Sad'
if state != self.lastState:
self.lastState = state
self.b_setAnimState(state, self.animMultiplier)
if state == 'Sad':
self.setWalkSpeedSlow()
else:
self.setWalkSpeedNormal()
if self.cheesyEffect == OTPGlobals.CEFlatProfile or self.cheesyEffect == OTPGlobals.CEFlatPortrait:
needH = None
if rotSpeed > 0.0:
needH = -10
else:
if rotSpeed < 0.0:
needH = 10
else:
if speed != 0.0:
needH = 0
if needH != None:
node = self.lastNeedH != needH and self.getGeomNode().getChild(0)
lerp = Sequence(LerpHprInterval(node, 0.5, Vec3(needH, 0, 0), blendType='easeInOut'), name='cheesy-lerp-hpr', autoPause=1)
lerp.start()
self.lastNeedH = needH
else:
self.lastNeedH = None
action = self.setSpeed(speed, rotSpeed)
if action != self.lastAction:
self.lastAction = action
if self.emoteTrack:
self.emoteTrack.finish()
self.emoteTrack = None
if action == OTPGlobals.WALK_INDEX or action == OTPGlobals.REVERSE_INDEX:
self.walkSound()
elif action == OTPGlobals.RUN_INDEX:
self.runSound()
else:
self.stopSound()
return Task.cont
def hasTrackAnimToSpeed(self):
taskName = self.taskName('trackAnimToSpeed')
return taskMgr.hasTaskNamed(taskName)
def startTrackAnimToSpeed(self):
taskName = self.taskName('trackAnimToSpeed')
taskMgr.remove(taskName)
task = Task.Task(self.trackAnimToSpeed)
self.lastMoved = globalClock.getFrameTime()
self.lastState = None
self.lastAction = None
self.trackAnimToSpeed(task)
taskMgr.add(self.trackAnimToSpeed, taskName, 35)
return
def stopTrackAnimToSpeed(self):
taskName = self.taskName('trackAnimToSpeed')
taskMgr.remove(taskName)
self.stopSound()
def startChat(self):
self.chatMgr.start()
self.accept(OTPGlobals.WhisperIncomingEvent, self.handlePlayerFriendWhisper)
self.accept(OTPGlobals.ThinkPosHotkey, self.thinkPos)
self.accept(OTPGlobals.PrintCamPosHotkey, self.printCamPos)
if self.__enableMarkerPlacement:
self.accept(OTPGlobals.PlaceMarkerHotkey, self.__placeMarker)
def stopChat(self):
self.chatMgr.stop()
self.ignore(OTPGlobals.WhisperIncomingEvent)
self.ignore(OTPGlobals.ThinkPosHotkey)
self.ignore(OTPGlobals.PrintCamPosHotkey)
if self.__enableMarkerPlacement:
self.ignore(OTPGlobals.PlaceMarkerHotkey)
def printCamPos(self):
node = base.camera.getParent()
pos = base.cam.getPos(node)
hpr = base.cam.getHpr(node)
print 'cam pos = ', `pos`, ', cam hpr = ', `hpr`
def d_broadcastPositionNow(self):
self.d_clearSmoothing()
self.d_broadcastPosHpr()
def travCollisionsLOS(self, n=None):
if n == None:
n = self.__geom
self.ccTrav.traverse(n)
return
def travCollisionsFloor(self, n=None):
if n == None:
n = self.__geom
self.ccTravFloor.traverse(n)
return
def travCollisionsPusher(self, n=None):
if n == None:
n = self.__geom
self.ccPusherTrav.traverse(n)
return
def __friendOnline(self, doId, commonChatFlags=0, whitelistChatFlags=0):
friend = base.cr.identifyFriend(doId)
if friend != None and hasattr(friend, 'setCommonAndWhitelistChatFlags'):
friend.setCommonAndWhitelistChatFlags(commonChatFlags, whitelistChatFlags)
if self.oldFriendsList != None:
now = globalClock.getFrameTime()
elapsed = now - self.timeFriendsListChanged
if elapsed < 10.0 and self.oldFriendsList.count(doId) == 0:
self.oldFriendsList.append(doId)
return
if friend != None:
self.setSystemMessage(doId, OTPLocalizer.WhisperFriendComingOnline % friend.getName())
return
def __friendOffline(self, doId):
friend = base.cr.identifyFriend(doId)
if friend != None:
self.setSystemMessage(0, OTPLocalizer.WhisperFriendLoggedOut % friend.getName())
return
def __playerOnline(self, playerId):
playerInfo = base.cr.playerFriendsManager.playerId2Info[playerId]
if playerInfo:
self.setSystemMessage(playerId, OTPLocalizer.WhisperPlayerOnline % (playerInfo.playerName, playerInfo.location))
def __playerOffline(self, playerId):
playerInfo = base.cr.playerFriendsManager.playerId2Info[playerId]
if playerInfo:
self.setSystemMessage(playerId, OTPLocalizer.WhisperPlayerOffline % playerInfo.playerName)
def clickedWhisper(self, doId, isPlayer=None):
if not isPlayer:
friend = base.cr.identifyFriend(doId)
if friend != None:
messenger.send('clickedNametag', [friend])
self.chatMgr.whisperTo(friend.getName(), doId)
else:
friend = base.cr.playerFriendsManager.getFriendInfo(doId)
if friend:
messenger.send('clickedNametagPlayer', [None, doId])
self.chatMgr.whisperTo(friend.getName(), None, doId)
return
def d_setParent(self, parentToken):
DistributedSmoothNode.DistributedSmoothNode.d_setParent(self, parentToken)
def handlePlayerFriendWhisper(self, playerId, charMessage):
print 'handlePlayerFriendWhisper'
self.displayWhisperPlayer(playerId, charMessage, WhisperPopup.WTNormal)
def canChat(self):
return 0
|
import brownie
from util.constants import BROWNIE_PROJECT
accounts = brownie.network.accounts
def test_transfer():
token = _deployToken()
assert token.totalSupply() == 1e21
token.transfer(accounts[1], 1e20, {"from": accounts[0]})
assert token.balanceOf(accounts[1]) == 1e20
assert token.balanceOf(accounts[0]) == 9e20
def test_approve():
token = _deployToken()
token.approve(accounts[1], 1e19, {"from": accounts[0]})
assert token.allowance(accounts[0], accounts[1]) == 1e19
assert token.allowance(accounts[0], accounts[2]) == 0
token.approve(accounts[1], 6e18, {"from": accounts[0]})
assert token.allowance(accounts[0], accounts[1]) == 6e18
def test_transferFrom():
token = _deployToken()
token.approve(accounts[1], 6e18, {"from": accounts[0]})
token.transferFrom(accounts[0], accounts[2], 5e18, {"from": accounts[1]})
assert token.balanceOf(accounts[2]) == 5e18
assert token.balanceOf(accounts[1]) == 0
assert token.balanceOf(accounts[0]) == 9.95e20
assert token.allowance(accounts[0], accounts[1]) == 1e18
def _deployToken():
return BROWNIE_PROJECT.Simpletoken.deploy(
"TST", "Test Token", 18, 1e21, {"from": accounts[0]}
)
|
#include "bus.h"
void nesbus_clear(uint8_t * bus, int start, int size) {
for (int i = start; i < size; i++)
{
if (i >= 0x0000 && 0xFFFF <= i)
* (bus + i) = 0x00;
}
}
void nesbus_write(uint8_t * bus, uint16_t address, uint8_t data) {
if (address >= 0x0000 && 0xFFFF >= address)
* (bus + address) = data;
}
uint8_t nesbus_read(uint8_t * bus, uint16_t address) {
if (address >= 0x0000 && 0xFFFF >= address)
return * (bus + address);
return 0x00;
}
|
const webpackConfig = {
resolve: {
extensions: ['.js']
},
module: {
rules: [
{
test: /\.js?$/,
use: ['babel-loader'],
exclude: /(node_modules)/
}
]
}
};
module.exports = webpackConfig;
|
/* istanbul instrument in package npmtest_morgan */
/*jslint
bitwise: true,
browser: true,
maxerr: 8,
maxlen: 96,
node: true,
nomen: true,
regexp: true,
stupid: true
*/
(function () {
'use strict';
var local;
// run shared js-env code - init-before
(function () {
// init local
local = {};
// init modeJs
local.modeJs = (function () {
try {
return typeof navigator.userAgent === 'string' &&
typeof document.querySelector('body') === 'object' &&
typeof XMLHttpRequest.prototype.open === 'function' &&
'browser';
} catch (errorCaughtBrowser) {
return module.exports &&
typeof process.versions.node === 'string' &&
typeof require('http').createServer === 'function' &&
'node';
}
}());
// init global
local.global = local.modeJs === 'browser'
? window
: global;
switch (local.modeJs) {
// re-init local from window.local
case 'browser':
local = local.global.utility2.objectSetDefault(
local.global.utility2_rollup || local.global.local,
local.global.utility2
);
break;
// re-init local from example.js
case 'node':
local = (local.global.utility2_rollup || require('utility2'))
.requireReadme();
break;
}
// export local
local.global.local = local;
}());
// run shared js-env code - function
(function () {
return;
}());
switch (local.modeJs) {
// run browser js-env code - function
case 'browser':
break;
// run node js-env code - function
case 'node':
break;
}
// run shared js-env code - init-after
(function () {
return;
}());
switch (local.modeJs) {
// run browser js-env code - init-after
case 'browser':
local.testCase_browser_nullCase = local.testCase_browser_nullCase || function (
options,
onError
) {
/*
* this function will test browsers's null-case handling-behavior-behavior
*/
onError(null, options);
};
// run tests
local.nop(local.modeTest &&
document.querySelector('#testRunButton1') &&
document.querySelector('#testRunButton1').click());
break;
// run node js-env code - init-after
/* istanbul ignore next */
case 'node':
local.testCase_buildApidoc_default = local.testCase_buildApidoc_default || function (
options,
onError
) {
/*
* this function will test buildApidoc's default handling-behavior-behavior
*/
options = { modulePathList: module.paths };
local.buildApidoc(options, onError);
};
local.testCase_buildApp_default = local.testCase_buildApp_default || function (
options,
onError
) {
/*
* this function will test buildApp's default handling-behavior-behavior
*/
local.testCase_buildReadme_default(options, local.onErrorThrow);
local.testCase_buildLib_default(options, local.onErrorThrow);
local.testCase_buildTest_default(options, local.onErrorThrow);
local.testCase_buildCustomOrg_default(options, local.onErrorThrow);
options = [];
local.buildApp(options, onError);
};
local.testCase_buildCustomOrg_default = local.testCase_buildCustomOrg_default ||
function (options, onError) {
/*
* this function will test buildCustomOrg's default handling-behavior
*/
options = {};
local.buildCustomOrg(options, onError);
};
local.testCase_buildLib_default = local.testCase_buildLib_default || function (
options,
onError
) {
/*
* this function will test buildLib's default handling-behavior
*/
options = {};
local.buildLib(options, onError);
};
local.testCase_buildReadme_default = local.testCase_buildReadme_default || function (
options,
onError
) {
/*
* this function will test buildReadme's default handling-behavior-behavior
*/
options = {};
local.buildReadme(options, onError);
};
local.testCase_buildTest_default = local.testCase_buildTest_default || function (
options,
onError
) {
/*
* this function will test buildTest's default handling-behavior
*/
options = {};
local.buildTest(options, onError);
};
local.testCase_webpage_default = local.testCase_webpage_default || function (
options,
onError
) {
/*
* this function will test webpage's default handling-behavior
*/
options = { modeCoverageMerge: true, url: local.serverLocalHost + '?modeTest=1' };
local.browserTest(options, onError);
};
// run test-server
local.testRunServer(local);
break;
}
}());
|
__all__ = [
'main',
]
import argparse
import itertools
import logging
import sys
from collections import OrderedDict
import iga.context
import iga.fargparse
import iga.ninja
import iga.package
from iga.context import load_workspace
from iga.label import Label
from iga.ninja import NinjaRule
def parse_argv(argv):
parser = argparse.ArgumentParser(prog='iga', description='''
iga meta-build system
''')
parser.add_argument(
'-v', '--verbose', action='count', default=0,
help='verbose output')
parser.add_argument(
'label',
help='build target')
return parser.parse_args(argv[1:])
def init(args):
if args.verbose == 0:
level = logging.WARNING
format = '%(levelname)s %(message)s'
elif args.verbose == 1:
level = logging.INFO
format = '%(levelname)s %(message)s'
else:
level = logging.DEBUG
format = '%(asctime)s %(levelname)s %(name)s: %(message)s'
logging.basicConfig(level=level, format=format)
iga.fargparse.Parser.register_parse_func(str, parse_string)
from iga.rules import cc
cc.init()
from iga.rules import genrule
genrule.init()
def parse_string(string):
if not isinstance(string, str):
raise iga.fargparse.ParseError()
return string
def main(argv=None):
args = parse_argv(argv or sys.argv)
init(args)
load_workspace()
label = Label.parse_cmdline(args.label)
rules = OrderedDict()
ninja_rules = OrderedDict()
queue = [iga.package.get_rule(label)]
while queue:
rule = queue.pop(0)
if rule.name in rules:
continue
rules[rule.name] = rule
for ninja_rule in rule.rule_type.ninja_rules:
ninja_rules[ninja_rule] = NinjaRule.get_object(ninja_rule)
queue.extend(generate_input_rules(rule))
iga.context.current().update(
outputs=iga.package.get_outputs(),
_parsed=True,
)
with open('build.ninja', 'w') as ninja_file:
iga.ninja.write_header_to(ninja_file)
for ninja_rule in ninja_rules.values():
ninja_rule.write_to(ninja_file)
for rule in rules.values():
with iga.context.create() as cxt:
cxt['package'] = rule.name.package
rule.write_to(ninja_file)
return 0
def generate_input_rules(rule):
for label in itertools.chain.from_iterable(rule.inputs.values()):
rule = iga.package.get_rule(label, raises=False)
if rule is not None:
yield rule
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @flow
*/
'use strict';
describe('Timeline profiler', () => {
let React;
let ReactDOM;
let Scheduler;
let utils;
let clearedMarks;
let featureDetectionMarkName = null;
let marks;
let setPerformanceMock;
function createUserTimingPolyfill() {
featureDetectionMarkName = null;
clearedMarks = [];
marks = [];
// Remove file-system specific bits or version-specific bits of information from the module range marks.
function filterMarkData(markName) {
if (markName.startsWith('--react-internal-module-start')) {
return `${markName.substr(0, 29)}-<filtered-file-system-path>`;
} else if (markName.startsWith('--react-internal-module-stop')) {
return `${markName.substr(0, 28)}-<filtered-file-system-path>`;
} else if (markName.startsWith('--react-version')) {
return `${markName.substr(0, 15)}-0.0.0`;
} else {
return markName;
}
}
// This is not a true polyfill, but it gives us enough to capture marks.
// Reference: https://developer.mozilla.org/en-US/docs/Web/API/User_Timing_API
return {
clearMarks(markName) {
markName = filterMarkData(markName);
clearedMarks.push(markName);
marks = marks.filter(mark => mark !== markName);
},
mark(markName, markOptions) {
markName = filterMarkData(markName);
if (featureDetectionMarkName === null) {
featureDetectionMarkName = markName;
}
marks.push(markName);
if (markOptions != null) {
// This is triggers the feature detection.
markOptions.startTime++;
}
},
};
}
function clearPendingMarks() {
clearedMarks.splice(0);
}
beforeEach(() => {
utils = require('./utils');
utils.beforeEachProfiling();
React = require('react');
ReactDOM = require('react-dom');
Scheduler = require('scheduler');
setPerformanceMock = require('react-devtools-shared/src/backend/profilingHooks')
.setPerformanceMock_ONLY_FOR_TESTING;
setPerformanceMock(createUserTimingPolyfill());
global.IS_REACT_ACT_ENVIRONMENT = true;
});
afterEach(() => {
// Verify all logged marks also get cleared.
expect(marks).toHaveLength(0);
setPerformanceMock(null);
});
describe('getLanesFromTransportDecimalBitmask', () => {
let getLanesFromTransportDecimalBitmask;
beforeEach(() => {
getLanesFromTransportDecimalBitmask = require('react-devtools-timeline/src/import-worker/preprocessData')
.getLanesFromTransportDecimalBitmask;
});
it('should return array of lane numbers from bitmask string', () => {
expect(getLanesFromTransportDecimalBitmask('1')).toEqual([0]);
expect(getLanesFromTransportDecimalBitmask('512')).toEqual([9]);
expect(getLanesFromTransportDecimalBitmask('3')).toEqual([0, 1]);
expect(getLanesFromTransportDecimalBitmask('1234')).toEqual([
1,
4,
6,
7,
10,
]); // 2 + 16 + 64 + 128 + 1024
expect(
getLanesFromTransportDecimalBitmask('1073741824'), // 0b1000000000000000000000000000000
).toEqual([30]);
expect(
getLanesFromTransportDecimalBitmask('2147483647'), // 0b1111111111111111111111111111111
).toEqual(Array.from(Array(31).keys()));
});
it('should return empty array if laneBitmaskString is not a bitmask', () => {
expect(getLanesFromTransportDecimalBitmask('')).toEqual([]);
expect(getLanesFromTransportDecimalBitmask('hello')).toEqual([]);
expect(getLanesFromTransportDecimalBitmask('-1')).toEqual([]);
expect(getLanesFromTransportDecimalBitmask('-0')).toEqual([]);
});
it('should ignore lanes outside REACT_TOTAL_NUM_LANES', () => {
const REACT_TOTAL_NUM_LANES = require('react-devtools-timeline/src/constants')
.REACT_TOTAL_NUM_LANES;
// Sanity check; this test may need to be updated when the no. of fiber lanes are changed.
expect(REACT_TOTAL_NUM_LANES).toBe(31);
expect(
getLanesFromTransportDecimalBitmask(
'4294967297', // 2^32 + 1
),
).toEqual([0]);
});
});
describe('preprocessData', () => {
let preprocessData;
beforeEach(() => {
preprocessData = require('react-devtools-timeline/src/import-worker/preprocessData')
.default;
});
// These should be dynamic to mimic a real profile,
// but reprooducible between test runs.
let pid = 0;
let tid = 0;
let startTime = 0;
function createUserTimingEntry(data) {
return {
pid: ++pid,
tid: ++tid,
ts: ++startTime,
...data,
};
}
function createProfilerVersionEntry() {
const SCHEDULING_PROFILER_VERSION = require('react-devtools-timeline/src/constants')
.SCHEDULING_PROFILER_VERSION;
return createUserTimingEntry({
cat: 'blink.user_timing',
name: '--profiler-version-' + SCHEDULING_PROFILER_VERSION,
});
}
function createReactVersionEntry() {
return createUserTimingEntry({
cat: 'blink.user_timing',
name: '--react-version-0.0.0',
});
}
function createLaneLabelsEntry() {
return createUserTimingEntry({
cat: 'blink.user_timing',
name:
'--react-lane-labels-Sync,InputContinuousHydration,InputContinuous,DefaultHydration,Default,TransitionHydration,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Transition,Retry,Retry,Retry,Retry,Retry,SelectiveHydration,IdleHydration,Idle,Offscreen',
});
}
function createNativeEventEntry(type, duration) {
return createUserTimingEntry({
cat: 'devtools.timeline',
name: 'EventDispatch',
args: {data: {type}},
dur: duration,
tdur: duration,
});
}
function creactCpuProfilerSample() {
return createUserTimingEntry({
args: {data: {startTime: ++startTime}},
cat: 'disabled-by-default-v8.cpu_profiler',
id: '0x1',
name: 'Profile',
ph: 'P',
});
}
function createBoilerplateEntries() {
return [
createProfilerVersionEntry(),
createReactVersionEntry(),
createLaneLabelsEntry(),
];
}
function createUserTimingData(sampleMarks) {
const cpuProfilerSample = creactCpuProfilerSample();
const randomSample = createUserTimingEntry({
dur: 100,
tdur: 200,
ph: 'X',
cat: 'disabled-by-default-devtools.timeline',
name: 'RunTask',
args: {},
});
const userTimingData = [cpuProfilerSample, randomSample];
sampleMarks.forEach(markName => {
userTimingData.push({
pid: ++pid,
tid: ++tid,
ts: ++startTime,
args: {data: {}},
cat: 'blink.user_timing',
name: markName,
ph: 'R',
});
});
return userTimingData;
}
beforeEach(() => {
tid = 0;
pid = 0;
startTime = 0;
});
it('should throw given an empty timeline', async () => {
await expect(async () => preprocessData([])).rejects.toThrow();
});
it('should throw given a timeline with no Profile event', async () => {
const randomSample = createUserTimingEntry({
dur: 100,
tdur: 200,
ph: 'X',
cat: 'disabled-by-default-devtools.timeline',
name: 'RunTask',
args: {},
});
await expect(async () =>
preprocessData([randomSample]),
).rejects.toThrow();
});
it('should throw given a timeline without an explicit profiler version mark nor any other React marks', async () => {
const cpuProfilerSample = creactCpuProfilerSample();
await expect(
async () => await preprocessData([cpuProfilerSample]),
).rejects.toThrow(
'Please provide profiling data from an React application',
);
});
it('should throw given a timeline with React scheduling marks, but without an explicit profiler version mark', async () => {
const cpuProfilerSample = creactCpuProfilerSample();
const scheduleRenderSample = createUserTimingEntry({
cat: 'blink.user_timing',
name: '--schedule-render-512-',
});
const samples = [cpuProfilerSample, scheduleRenderSample];
await expect(async () => await preprocessData(samples)).rejects.toThrow(
'This version of profiling data is not supported',
);
});
it('should return empty data given a timeline with no React scheduling profiling marks', async () => {
const cpuProfilerSample = creactCpuProfilerSample();
const randomSample = createUserTimingEntry({
dur: 100,
tdur: 200,
ph: 'X',
cat: 'disabled-by-default-devtools.timeline',
name: 'RunTask',
args: {},
});
const data = await preprocessData([
...createBoilerplateEntries(),
cpuProfilerSample,
randomSample,
]);
expect(data).toMatchInlineSnapshot(`
Object {
"batchUIDToMeasuresMap": Map {},
"componentMeasures": Array [],
"duration": 0.005,
"flamechart": Array [],
"internalModuleSourceToRanges": Map {},
"laneToLabelMap": Map {
0 => "Sync",
1 => "InputContinuousHydration",
2 => "InputContinuous",
3 => "DefaultHydration",
4 => "Default",
5 => "TransitionHydration",
6 => "Transition",
7 => "Transition",
8 => "Transition",
9 => "Transition",
10 => "Transition",
11 => "Transition",
12 => "Transition",
13 => "Transition",
14 => "Transition",
15 => "Transition",
16 => "Transition",
17 => "Transition",
18 => "Transition",
19 => "Transition",
20 => "Transition",
21 => "Transition",
22 => "Retry",
23 => "Retry",
24 => "Retry",
25 => "Retry",
26 => "Retry",
27 => "SelectiveHydration",
28 => "IdleHydration",
29 => "Idle",
30 => "Offscreen",
},
"laneToReactMeasureMap": Map {
0 => Array [],
1 => Array [],
2 => Array [],
3 => Array [],
4 => Array [],
5 => Array [],
6 => Array [],
7 => Array [],
8 => Array [],
9 => Array [],
10 => Array [],
11 => Array [],
12 => Array [],
13 => Array [],
14 => Array [],
15 => Array [],
16 => Array [],
17 => Array [],
18 => Array [],
19 => Array [],
20 => Array [],
21 => Array [],
22 => Array [],
23 => Array [],
24 => Array [],
25 => Array [],
26 => Array [],
27 => Array [],
28 => Array [],
29 => Array [],
30 => Array [],
},
"nativeEvents": Array [],
"networkMeasures": Array [],
"otherUserTimingMarks": Array [],
"reactVersion": "0.0.0",
"schedulingEvents": Array [],
"snapshotHeight": 0,
"snapshots": Array [],
"startTime": 1,
"suspenseEvents": Array [],
"thrownErrors": Array [],
}
`);
});
it('should process legacy data format (before lane labels were added)', async () => {
const cpuProfilerSample = creactCpuProfilerSample();
// Data below is hard-coded based on an older profile sample.
// Should be fine since this is explicitly a legacy-format test.
const data = await preprocessData([
...createBoilerplateEntries(),
cpuProfilerSample,
createUserTimingEntry({
cat: 'blink.user_timing',
name: '--schedule-render-512-',
}),
createUserTimingEntry({
cat: 'blink.user_timing',
name: '--render-start-512',
}),
createUserTimingEntry({
cat: 'blink.user_timing',
name: '--render-stop',
}),
createUserTimingEntry({
cat: 'blink.user_timing',
name: '--commit-start-512',
}),
createUserTimingEntry({
cat: 'blink.user_timing',
name: '--layout-effects-start-512',
}),
createUserTimingEntry({
cat: 'blink.user_timing',
name: '--layout-effects-stop',
}),
createUserTimingEntry({
cat: 'blink.user_timing',
name: '--commit-stop',
}),
]);
expect(data).toMatchInlineSnapshot(`
Object {
"batchUIDToMeasuresMap": Map {
0 => Array [
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.005,
"lanes": Array [
9,
],
"timestamp": 0.006,
"type": "render-idle",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.001,
"lanes": Array [
9,
],
"timestamp": 0.006,
"type": "render",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.003,
"lanes": Array [
9,
],
"timestamp": 0.008,
"type": "commit",
},
Object {
"batchUID": 0,
"depth": 1,
"duration": 0.001,
"lanes": Array [
9,
],
"timestamp": 0.009,
"type": "layout-effects",
},
],
},
"componentMeasures": Array [],
"duration": 0.011,
"flamechart": Array [],
"internalModuleSourceToRanges": Map {},
"laneToLabelMap": Map {
0 => "Sync",
1 => "InputContinuousHydration",
2 => "InputContinuous",
3 => "DefaultHydration",
4 => "Default",
5 => "TransitionHydration",
6 => "Transition",
7 => "Transition",
8 => "Transition",
9 => "Transition",
10 => "Transition",
11 => "Transition",
12 => "Transition",
13 => "Transition",
14 => "Transition",
15 => "Transition",
16 => "Transition",
17 => "Transition",
18 => "Transition",
19 => "Transition",
20 => "Transition",
21 => "Transition",
22 => "Retry",
23 => "Retry",
24 => "Retry",
25 => "Retry",
26 => "Retry",
27 => "SelectiveHydration",
28 => "IdleHydration",
29 => "Idle",
30 => "Offscreen",
},
"laneToReactMeasureMap": Map {
0 => Array [],
1 => Array [],
2 => Array [],
3 => Array [],
4 => Array [],
5 => Array [],
6 => Array [],
7 => Array [],
8 => Array [],
9 => Array [
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.005,
"lanes": Array [
9,
],
"timestamp": 0.006,
"type": "render-idle",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.001,
"lanes": Array [
9,
],
"timestamp": 0.006,
"type": "render",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.003,
"lanes": Array [
9,
],
"timestamp": 0.008,
"type": "commit",
},
Object {
"batchUID": 0,
"depth": 1,
"duration": 0.001,
"lanes": Array [
9,
],
"timestamp": 0.009,
"type": "layout-effects",
},
],
10 => Array [],
11 => Array [],
12 => Array [],
13 => Array [],
14 => Array [],
15 => Array [],
16 => Array [],
17 => Array [],
18 => Array [],
19 => Array [],
20 => Array [],
21 => Array [],
22 => Array [],
23 => Array [],
24 => Array [],
25 => Array [],
26 => Array [],
27 => Array [],
28 => Array [],
29 => Array [],
30 => Array [],
},
"nativeEvents": Array [],
"networkMeasures": Array [],
"otherUserTimingMarks": Array [],
"reactVersion": "0.0.0",
"schedulingEvents": Array [
Object {
"lanes": Array [
9,
],
"timestamp": 0.005,
"type": "schedule-render",
"warning": null,
},
],
"snapshotHeight": 0,
"snapshots": Array [],
"startTime": 1,
"suspenseEvents": Array [],
"thrownErrors": Array [],
}
`);
});
it('should process a sample legacy render sequence', async () => {
utils.legacyRender(<div />, document.createElement('div'));
const data = await preprocessData([
...createBoilerplateEntries(),
...createUserTimingData(clearedMarks),
]);
expect(data).toMatchInlineSnapshot(`
Object {
"batchUIDToMeasuresMap": Map {
0 => Array [
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.01,
"lanes": Array [
0,
],
"timestamp": 0.004,
"type": "render-idle",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.001,
"lanes": Array [
0,
],
"timestamp": 0.004,
"type": "render",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.008,
"lanes": Array [
0,
],
"timestamp": 0.006,
"type": "commit",
},
Object {
"batchUID": 0,
"depth": 1,
"duration": 0.001,
"lanes": Array [
0,
],
"timestamp": 0.012,
"type": "layout-effects",
},
],
},
"componentMeasures": Array [],
"duration": 0.014,
"flamechart": Array [],
"internalModuleSourceToRanges": Map {
undefined => Array [
Array [
Object {
"functionName": "<filtered-file-system-path>",
},
Object {
"functionName": "dule-stop-<filtered-file-system-path>",
},
],
],
},
"laneToLabelMap": Map {
0 => "Sync",
1 => "InputContinuousHydration",
2 => "InputContinuous",
3 => "DefaultHydration",
4 => "Default",
5 => "TransitionHydration",
6 => "Transition",
7 => "Transition",
8 => "Transition",
9 => "Transition",
10 => "Transition",
11 => "Transition",
12 => "Transition",
13 => "Transition",
14 => "Transition",
15 => "Transition",
16 => "Transition",
17 => "Transition",
18 => "Transition",
19 => "Transition",
20 => "Transition",
21 => "Transition",
22 => "Retry",
23 => "Retry",
24 => "Retry",
25 => "Retry",
26 => "Retry",
27 => "SelectiveHydration",
28 => "IdleHydration",
29 => "Idle",
30 => "Offscreen",
},
"laneToReactMeasureMap": Map {
0 => Array [
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.01,
"lanes": Array [
0,
],
"timestamp": 0.004,
"type": "render-idle",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.001,
"lanes": Array [
0,
],
"timestamp": 0.004,
"type": "render",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.008,
"lanes": Array [
0,
],
"timestamp": 0.006,
"type": "commit",
},
Object {
"batchUID": 0,
"depth": 1,
"duration": 0.001,
"lanes": Array [
0,
],
"timestamp": 0.012,
"type": "layout-effects",
},
],
1 => Array [],
2 => Array [],
3 => Array [],
4 => Array [],
5 => Array [],
6 => Array [],
7 => Array [],
8 => Array [],
9 => Array [],
10 => Array [],
11 => Array [],
12 => Array [],
13 => Array [],
14 => Array [],
15 => Array [],
16 => Array [],
17 => Array [],
18 => Array [],
19 => Array [],
20 => Array [],
21 => Array [],
22 => Array [],
23 => Array [],
24 => Array [],
25 => Array [],
26 => Array [],
27 => Array [],
28 => Array [],
29 => Array [],
30 => Array [],
},
"nativeEvents": Array [],
"networkMeasures": Array [],
"otherUserTimingMarks": Array [],
"reactVersion": "0.0.0",
"schedulingEvents": Array [
Object {
"lanes": Array [
0,
],
"timestamp": 0.003,
"type": "schedule-render",
"warning": null,
},
],
"snapshotHeight": 0,
"snapshots": Array [],
"startTime": 4,
"suspenseEvents": Array [],
"thrownErrors": Array [],
}
`);
});
it('should process a sample createRoot render sequence', async () => {
function App() {
const [didMount, setDidMount] = React.useState(false);
React.useEffect(() => {
if (!didMount) {
setDidMount(true);
}
});
return true;
}
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() => root.render(<App />));
const data = await preprocessData([
...createBoilerplateEntries(),
...createUserTimingData(clearedMarks),
]);
expect(data).toMatchInlineSnapshot(`
Object {
"batchUIDToMeasuresMap": Map {
0 => Array [
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.012,
"lanes": Array [
4,
],
"timestamp": 0.004,
"type": "render-idle",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.003,
"lanes": Array [
4,
],
"timestamp": 0.004,
"type": "render",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.008,
"lanes": Array [
4,
],
"timestamp": 0.008,
"type": "commit",
},
Object {
"batchUID": 0,
"depth": 1,
"duration": 0.001,
"lanes": Array [
4,
],
"timestamp": 0.014,
"type": "layout-effects",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.004,
"lanes": Array [
4,
],
"timestamp": 0.017,
"type": "passive-effects",
},
],
1 => Array [
Object {
"batchUID": 1,
"depth": 0,
"duration": 0.012,
"lanes": Array [
4,
],
"timestamp": 0.022,
"type": "render-idle",
},
Object {
"batchUID": 1,
"depth": 0,
"duration": 0.003,
"lanes": Array [
4,
],
"timestamp": 0.022,
"type": "render",
},
Object {
"batchUID": 1,
"depth": 0,
"duration": 0.008,
"lanes": Array [
4,
],
"timestamp": 0.026,
"type": "commit",
},
Object {
"batchUID": 1,
"depth": 1,
"duration": 0.001,
"lanes": Array [
4,
],
"timestamp": 0.032,
"type": "layout-effects",
},
Object {
"batchUID": 1,
"depth": 0,
"duration": 0.003,
"lanes": Array [
4,
],
"timestamp": 0.035,
"type": "passive-effects",
},
],
},
"componentMeasures": Array [
Object {
"componentName": "App",
"duration": 0.001,
"timestamp": 0.005,
"type": "render",
"warning": null,
},
Object {
"componentName": "App",
"duration": 0.002,
"timestamp": 0.018,
"type": "passive-effect-mount",
"warning": null,
},
Object {
"componentName": "App",
"duration": 0.001,
"timestamp": 0.023,
"type": "render",
"warning": null,
},
Object {
"componentName": "App",
"duration": 0.001,
"timestamp": 0.036,
"type": "passive-effect-mount",
"warning": null,
},
],
"duration": 0.038,
"flamechart": Array [],
"internalModuleSourceToRanges": Map {
undefined => Array [
Array [
Object {
"functionName": "<filtered-file-system-path>",
},
Object {
"functionName": "dule-stop-<filtered-file-system-path>",
},
],
],
},
"laneToLabelMap": Map {
0 => "Sync",
1 => "InputContinuousHydration",
2 => "InputContinuous",
3 => "DefaultHydration",
4 => "Default",
5 => "TransitionHydration",
6 => "Transition",
7 => "Transition",
8 => "Transition",
9 => "Transition",
10 => "Transition",
11 => "Transition",
12 => "Transition",
13 => "Transition",
14 => "Transition",
15 => "Transition",
16 => "Transition",
17 => "Transition",
18 => "Transition",
19 => "Transition",
20 => "Transition",
21 => "Transition",
22 => "Retry",
23 => "Retry",
24 => "Retry",
25 => "Retry",
26 => "Retry",
27 => "SelectiveHydration",
28 => "IdleHydration",
29 => "Idle",
30 => "Offscreen",
},
"laneToReactMeasureMap": Map {
0 => Array [],
1 => Array [],
2 => Array [],
3 => Array [],
4 => Array [
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.012,
"lanes": Array [
4,
],
"timestamp": 0.004,
"type": "render-idle",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.003,
"lanes": Array [
4,
],
"timestamp": 0.004,
"type": "render",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.008,
"lanes": Array [
4,
],
"timestamp": 0.008,
"type": "commit",
},
Object {
"batchUID": 0,
"depth": 1,
"duration": 0.001,
"lanes": Array [
4,
],
"timestamp": 0.014,
"type": "layout-effects",
},
Object {
"batchUID": 0,
"depth": 0,
"duration": 0.004,
"lanes": Array [
4,
],
"timestamp": 0.017,
"type": "passive-effects",
},
Object {
"batchUID": 1,
"depth": 0,
"duration": 0.012,
"lanes": Array [
4,
],
"timestamp": 0.022,
"type": "render-idle",
},
Object {
"batchUID": 1,
"depth": 0,
"duration": 0.003,
"lanes": Array [
4,
],
"timestamp": 0.022,
"type": "render",
},
Object {
"batchUID": 1,
"depth": 0,
"duration": 0.008,
"lanes": Array [
4,
],
"timestamp": 0.026,
"type": "commit",
},
Object {
"batchUID": 1,
"depth": 1,
"duration": 0.001,
"lanes": Array [
4,
],
"timestamp": 0.032,
"type": "layout-effects",
},
Object {
"batchUID": 1,
"depth": 0,
"duration": 0.003,
"lanes": Array [
4,
],
"timestamp": 0.035,
"type": "passive-effects",
},
],
5 => Array [],
6 => Array [],
7 => Array [],
8 => Array [],
9 => Array [],
10 => Array [],
11 => Array [],
12 => Array [],
13 => Array [],
14 => Array [],
15 => Array [],
16 => Array [],
17 => Array [],
18 => Array [],
19 => Array [],
20 => Array [],
21 => Array [],
22 => Array [],
23 => Array [],
24 => Array [],
25 => Array [],
26 => Array [],
27 => Array [],
28 => Array [],
29 => Array [],
30 => Array [],
},
"nativeEvents": Array [],
"networkMeasures": Array [],
"otherUserTimingMarks": Array [],
"reactVersion": "0.0.0",
"schedulingEvents": Array [
Object {
"lanes": Array [
4,
],
"timestamp": 0.003,
"type": "schedule-render",
"warning": null,
},
Object {
"componentName": "App",
"lanes": Array [
4,
],
"timestamp": 0.019,
"type": "schedule-state-update",
"warning": null,
},
],
"snapshotHeight": 0,
"snapshots": Array [],
"startTime": 4,
"suspenseEvents": Array [],
"thrownErrors": Array [],
}
`);
});
it('should error if events and measures are incomplete', async () => {
const container = document.createElement('div');
utils.legacyRender(<div />, container);
const invalidMarks = clearedMarks.filter(
mark => !mark.includes('render-stop'),
);
const invalidUserTimingData = createUserTimingData(invalidMarks);
const error = spyOn(console, 'error');
preprocessData([...createBoilerplateEntries(), ...invalidUserTimingData]);
expect(error).toHaveBeenCalled();
});
it('should error if work is completed without being started', async () => {
const container = document.createElement('div');
utils.legacyRender(<div />, container);
const invalidMarks = clearedMarks.filter(
mark => !mark.includes('render-start'),
);
const invalidUserTimingData = createUserTimingData(invalidMarks);
const error = spyOn(console, 'error');
preprocessData([...createBoilerplateEntries(), ...invalidUserTimingData]);
expect(error).toHaveBeenCalled();
});
it('should populate other user timing marks', async () => {
const userTimingData = createUserTimingData([]);
userTimingData.push(
createUserTimingEntry({
args: {},
cat: 'blink.user_timing',
id: '0xcdf75f7c',
name: 'VCWithoutImage: root',
ph: 'n',
scope: 'blink.user_timing',
}),
);
userTimingData.push(
createUserTimingEntry({
cat: 'blink.user_timing',
name: '--a-mark-that-looks-like-one-of-ours',
ph: 'R',
}),
);
userTimingData.push(
createUserTimingEntry({
cat: 'blink.user_timing',
name: 'Some other mark',
ph: 'R',
}),
);
const data = await preprocessData([
...createBoilerplateEntries(),
...userTimingData,
]);
expect(data.otherUserTimingMarks).toMatchInlineSnapshot(`
Array [
Object {
"name": "VCWithoutImage: root",
"timestamp": 0.003,
},
Object {
"name": "--a-mark-that-looks-like-one-of-ours",
"timestamp": 0.004,
},
Object {
"name": "Some other mark",
"timestamp": 0.005,
},
]
`);
});
it('should include a suspended resource "displayName" if one is set', async () => {
let promise = null;
let resolvedValue = null;
function readValue(value) {
if (resolvedValue !== null) {
return resolvedValue;
} else if (promise === null) {
promise = Promise.resolve(true).then(() => {
resolvedValue = value;
});
promise.displayName = 'Testing displayName';
}
throw promise;
}
function Component() {
const value = readValue(123);
return value;
}
const testMarks = [creactCpuProfilerSample()];
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() =>
root.render(
<React.Suspense fallback="Loading...">
<Component />
</React.Suspense>,
),
);
testMarks.push(...createUserTimingData(clearedMarks));
let data;
await utils.actAsync(async () => {
data = await preprocessData(testMarks);
});
expect(data.suspenseEvents).toHaveLength(1);
expect(data.suspenseEvents[0].promiseName).toBe('Testing displayName');
});
describe('warnings', () => {
describe('long event handlers', () => {
it('should not warn when React scedules a (sync) update inside of a short event handler', async () => {
function App() {
return null;
}
const testMarks = [
creactCpuProfilerSample(),
...createBoilerplateEntries(),
createNativeEventEntry('click', 5),
];
clearPendingMarks();
utils.legacyRender(<App />, document.createElement('div'));
testMarks.push(...createUserTimingData(clearedMarks));
const data = await preprocessData(testMarks);
const event = data.nativeEvents.find(({type}) => type === 'click');
expect(event.warning).toBe(null);
});
it('should not warn about long events if the cause was non-React JavaScript', async () => {
function App() {
return null;
}
const testMarks = [
creactCpuProfilerSample(),
...createBoilerplateEntries(),
createNativeEventEntry('click', 25000),
];
startTime += 2000;
clearPendingMarks();
utils.legacyRender(<App />, document.createElement('div'));
testMarks.push(...createUserTimingData(clearedMarks));
const data = await preprocessData(testMarks);
const event = data.nativeEvents.find(({type}) => type === 'click');
expect(event.warning).toBe(null);
});
it('should warn when React scedules a long (sync) update inside of an event', async () => {
function App() {
return null;
}
const testMarks = [
creactCpuProfilerSample(),
...createBoilerplateEntries(),
createNativeEventEntry('click', 25000),
];
clearPendingMarks();
utils.legacyRender(<App />, document.createElement('div'));
clearedMarks.forEach(markName => {
if (markName === '--render-stop') {
// Fake a long running render
startTime += 20000;
}
testMarks.push({
pid: ++pid,
tid: ++tid,
ts: ++startTime,
args: {data: {}},
cat: 'blink.user_timing',
name: markName,
ph: 'R',
});
});
const data = await preprocessData(testMarks);
const event = data.nativeEvents.find(({type}) => type === 'click');
expect(event.warning).toMatchInlineSnapshot(
`"An event handler scheduled a big update with React. Consider using the Transition API to defer some of this work."`,
);
});
it('should not warn when React finishes a previously long (async) update with a short (sync) update inside of an event', async () => {
function Yield({id, value}) {
Scheduler.unstable_yieldValue(`${id}:${value}`);
return null;
}
const testMarks = [
creactCpuProfilerSample(),
...createBoilerplateEntries(),
];
// Advance the clock by some arbitrary amount.
startTime += 50000;
const root = ReactDOM.createRoot(document.createElement('div'));
// Temporarily turn off the act environment, since we're intentionally using Scheduler instead.
global.IS_REACT_ACT_ENVIRONMENT = false;
React.startTransition(() => {
// Start rendering an async update (but don't finish).
root.render(
<>
<Yield id="A" value={1} />
<Yield id="B" value={1} />
</>,
);
expect(Scheduler).toFlushAndYieldThrough(['A:1']);
testMarks.push(...createUserTimingData(clearedMarks));
clearPendingMarks();
// Advance the clock some more to make the pending React update seem long.
startTime += 20000;
// Fake a long "click" event in the middle
// and schedule a sync update that will also flush the previous work.
testMarks.push(createNativeEventEntry('click', 25000));
ReactDOM.flushSync(() => {
root.render(
<>
<Yield id="A" value={2} />
<Yield id="B" value={2} />
</>,
);
});
});
expect(Scheduler).toHaveYielded(['A:2', 'B:2']);
testMarks.push(...createUserTimingData(clearedMarks));
const data = await preprocessData(testMarks);
const event = data.nativeEvents.find(({type}) => type === 'click');
expect(event.warning).toBe(null);
});
});
describe('nested updates', () => {
it('should not warn about short nested (state) updates during layout effects', async () => {
function Component() {
const [didMount, setDidMount] = React.useState(false);
Scheduler.unstable_yieldValue(
`Component ${didMount ? 'update' : 'mount'}`,
);
React.useLayoutEffect(() => {
setDidMount(true);
}, []);
return didMount;
}
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() => {
root.render(<Component />);
});
expect(Scheduler).toHaveYielded([
'Component mount',
'Component update',
]);
const data = await preprocessData([
...createBoilerplateEntries(),
...createUserTimingData(clearedMarks),
]);
const event = data.schedulingEvents.find(
({type}) => type === 'schedule-state-update',
);
expect(event.warning).toBe(null);
});
it('should not warn about short (forced) updates during layout effects', async () => {
class Component extends React.Component {
_didMount: boolean = false;
componentDidMount() {
this._didMount = true;
this.forceUpdate();
}
render() {
Scheduler.unstable_yieldValue(
`Component ${this._didMount ? 'update' : 'mount'}`,
);
return null;
}
}
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() => {
root.render(<Component />);
});
expect(Scheduler).toHaveYielded([
'Component mount',
'Component update',
]);
const data = await preprocessData([
...createBoilerplateEntries(),
...createUserTimingData(clearedMarks),
]);
const event = data.schedulingEvents.find(
({type}) => type === 'schedule-force-update',
);
expect(event.warning).toBe(null);
});
it('should warn about long nested (state) updates during layout effects', async () => {
function Component() {
const [didMount, setDidMount] = React.useState(false);
Scheduler.unstable_yieldValue(
`Component ${didMount ? 'update' : 'mount'}`,
);
// Fake a long render
startTime += 20000;
React.useLayoutEffect(() => {
setDidMount(true);
}, []);
return didMount;
}
const cpuProfilerSample = creactCpuProfilerSample();
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() => {
root.render(<Component />);
});
expect(Scheduler).toHaveYielded([
'Component mount',
'Component update',
]);
const testMarks = [];
clearedMarks.forEach(markName => {
if (markName === '--component-render-start-Component') {
// Fake a long running render
startTime += 20000;
}
testMarks.push({
pid: ++pid,
tid: ++tid,
ts: ++startTime,
args: {data: {}},
cat: 'blink.user_timing',
name: markName,
ph: 'R',
});
});
const data = await preprocessData([
cpuProfilerSample,
...createBoilerplateEntries(),
...testMarks,
]);
const event = data.schedulingEvents.find(
({type}) => type === 'schedule-state-update',
);
expect(event.warning).toMatchInlineSnapshot(
`"A big nested update was scheduled during layout. Nested updates require React to re-render synchronously before the browser can paint. Consider delaying this update by moving it to a passive effect (useEffect)."`,
);
});
it('should warn about long nested (forced) updates during layout effects', async () => {
class Component extends React.Component {
_didMount: boolean = false;
componentDidMount() {
this._didMount = true;
this.forceUpdate();
}
render() {
Scheduler.unstable_yieldValue(
`Component ${this._didMount ? 'update' : 'mount'}`,
);
return null;
}
}
const cpuProfilerSample = creactCpuProfilerSample();
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() => {
root.render(<Component />);
});
expect(Scheduler).toHaveYielded([
'Component mount',
'Component update',
]);
const testMarks = [];
clearedMarks.forEach(markName => {
if (markName === '--component-render-start-Component') {
// Fake a long running render
startTime += 20000;
}
testMarks.push({
pid: ++pid,
tid: ++tid,
ts: ++startTime,
args: {data: {}},
cat: 'blink.user_timing',
name: markName,
ph: 'R',
});
});
const data = await preprocessData([
cpuProfilerSample,
...createBoilerplateEntries(),
...testMarks,
]);
const event = data.schedulingEvents.find(
({type}) => type === 'schedule-force-update',
);
expect(event.warning).toMatchInlineSnapshot(
`"A big nested update was scheduled during layout. Nested updates require React to re-render synchronously before the browser can paint. Consider delaying this update by moving it to a passive effect (useEffect)."`,
);
});
it('should not warn about transition updates scheduled during commit phase', async () => {
function Component() {
const [value, setValue] = React.useState(0);
// eslint-disable-next-line no-unused-vars
const [isPending, startTransition] = React.useTransition();
Scheduler.unstable_yieldValue(
`Component rendered with value ${value}`,
);
// Fake a long render
if (value !== 0) {
Scheduler.unstable_yieldValue('Long render');
startTime += 20000;
}
React.useLayoutEffect(() => {
startTransition(() => {
setValue(1);
});
}, []);
return value;
}
const cpuProfilerSample = creactCpuProfilerSample();
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() => {
root.render(<Component />);
});
expect(Scheduler).toHaveYielded([
'Component rendered with value 0',
'Component rendered with value 0',
'Component rendered with value 1',
'Long render',
]);
const testMarks = [];
clearedMarks.forEach(markName => {
if (markName === '--component-render-start-Component') {
// Fake a long running render
startTime += 20000;
}
testMarks.push({
pid: ++pid,
tid: ++tid,
ts: ++startTime,
args: {data: {}},
cat: 'blink.user_timing',
name: markName,
ph: 'R',
});
});
const data = await preprocessData([
cpuProfilerSample,
...createBoilerplateEntries(),
...testMarks,
]);
data.schedulingEvents.forEach(event => {
expect(event.warning).toBeNull();
});
});
it('should not warn about deferred value updates scheduled during commit phase', async () => {
function Component() {
const [value, setValue] = React.useState(0);
const deferredValue = React.useDeferredValue(value);
Scheduler.unstable_yieldValue(
`Component rendered with value ${value} and deferredValue ${deferredValue}`,
);
// Fake a long render
if (deferredValue !== 0) {
Scheduler.unstable_yieldValue('Long render');
startTime += 20000;
}
React.useLayoutEffect(() => {
setValue(1);
}, []);
return value + deferredValue;
}
const cpuProfilerSample = creactCpuProfilerSample();
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() => {
root.render(<Component />);
});
expect(Scheduler).toHaveYielded([
'Component rendered with value 0 and deferredValue 0',
'Component rendered with value 1 and deferredValue 0',
'Component rendered with value 1 and deferredValue 1',
'Long render',
]);
const testMarks = [];
clearedMarks.forEach(markName => {
if (markName === '--component-render-start-Component') {
// Fake a long running render
startTime += 20000;
}
testMarks.push({
pid: ++pid,
tid: ++tid,
ts: ++startTime,
args: {data: {}},
cat: 'blink.user_timing',
name: markName,
ph: 'R',
});
});
const data = await preprocessData([
cpuProfilerSample,
...createBoilerplateEntries(),
...testMarks,
]);
data.schedulingEvents.forEach(event => {
expect(event.warning).toBeNull();
});
});
});
describe('errors thrown while rendering', () => {
it('shoult parse Errors thrown during render', async () => {
spyOn(console, 'error');
class ErrorBoundary extends React.Component {
state = {error: null};
componentDidCatch(error) {
this.setState({error});
}
render() {
if (this.state.error) {
return null;
}
return this.props.children;
}
}
function ExampleThatThrows() {
throw Error('Expected error');
}
const testMarks = [creactCpuProfilerSample()];
// Mount and commit the app
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() =>
root.render(
<ErrorBoundary>
<ExampleThatThrows />
</ErrorBoundary>,
),
);
testMarks.push(...createUserTimingData(clearedMarks));
const data = await preprocessData(testMarks);
expect(data.thrownErrors).toHaveLength(2);
expect(data.thrownErrors[0].message).toMatchInlineSnapshot(
'"Expected error"',
);
});
});
describe('suspend during an update', () => {
// This also tests an edge case where the a component suspends while profiling
// before the first commit is logged (so the lane-to-labels map will not yet exist).
it('should warn about suspending during an udpate', async () => {
let promise = null;
let resolvedValue = null;
function readValue(value) {
if (resolvedValue !== null) {
return resolvedValue;
} else if (promise === null) {
promise = Promise.resolve(true).then(() => {
resolvedValue = value;
});
}
throw promise;
}
function Component({shouldSuspend}) {
Scheduler.unstable_yieldValue(`Component ${shouldSuspend}`);
if (shouldSuspend) {
readValue(123);
}
return null;
}
// Mount and commit the app
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() =>
root.render(
<React.Suspense fallback="Loading...">
<Component shouldSuspend={false} />
</React.Suspense>,
),
);
clearPendingMarks();
const testMarks = [creactCpuProfilerSample()];
// Start profiling and suspend during a render.
utils.act(() =>
root.render(
<React.Suspense fallback="Loading...">
<Component shouldSuspend={true} />
</React.Suspense>,
),
);
testMarks.push(...createUserTimingData(clearedMarks));
let data;
await utils.actAsync(async () => {
data = await preprocessData(testMarks);
});
expect(data.suspenseEvents).toHaveLength(1);
expect(data.suspenseEvents[0].warning).toMatchInlineSnapshot(
`"A component suspended during an update which caused a fallback to be shown. Consider using the Transition API to avoid hiding components after they've been mounted."`,
);
});
it('should not warn about suspending during an transition', async () => {
let promise = null;
let resolvedValue = null;
function readValue(value) {
if (resolvedValue !== null) {
return resolvedValue;
} else if (promise === null) {
promise = Promise.resolve(true).then(() => {
resolvedValue = value;
});
}
throw promise;
}
function Component({shouldSuspend}) {
Scheduler.unstable_yieldValue(`Component ${shouldSuspend}`);
if (shouldSuspend) {
readValue(123);
}
return null;
}
// Mount and commit the app
const root = ReactDOM.createRoot(document.createElement('div'));
utils.act(() =>
root.render(
<React.Suspense fallback="Loading...">
<Component shouldSuspend={false} />
</React.Suspense>,
),
);
const testMarks = [creactCpuProfilerSample()];
// Start profiling and suspend during a render.
await utils.actAsync(async () =>
React.startTransition(() =>
root.render(
<React.Suspense fallback="Loading...">
<Component shouldSuspend={true} />
</React.Suspense>,
),
),
);
testMarks.push(...createUserTimingData(clearedMarks));
let data;
await utils.actAsync(async () => {
data = await preprocessData(testMarks);
});
expect(data.suspenseEvents).toHaveLength(1);
expect(data.suspenseEvents[0].warning).toBe(null);
});
});
});
// TODO: Add test for snapshot base64 parsing
// TODO: Add test for flamechart parsing
});
});
|
/* 登录请求【第一个】
特征:url后面接参数,但是是POST请求。没有返回信息(可能是被拦截了?)
API地址:api/oauth/oauth/token
参数:url后
*/
// http://localhost:8080/api/oauth/oauth/token?grant_type=pwd&scope=all&client_id=nti_client_id&client_secret=nti_client_secret&username=chenjun&password=e5078bfbbe579416d62161980fa0bc20
// `
// grant_type: pwd
// scope: all
// client_id: nti_client_id
// client_secret: nti_client_secret
// username: chenjun
// password: e5078bfbbe579416d62161980fa0bc20
// `
const parameter = null
const result = null
export default result
|
/***********
* GLOBALS *
***********/
var um_rows = document.getElementsByClassName( "um-container" );
var save_data = null;
var storage_enabled = typeof( Storage ) !== "undefined";
/*************
* CALLBACKS *
*************/
function onUMMarkerChange( checkbox, index ) {
var row = checkbox.parentElement.parentElement;
index = index - 1; // Switch from 1-index to 0-index
if( checkbox.checked ) {
row.classList.add( "row-complete" );
if( storage_enabled ) {
save_data[ index ] = true;
localStorage.setItem( "um", JSON.stringify( save_data ) );
}
} else {
row.classList.remove( "row-complete" );
if( storage_enabled ) {
save_data[ index ] = false;
localStorage.setItem( "um", JSON.stringify( save_data ) );
}
}
}
function onClearUMs() {
const INDEX_CHECK = 0;
for( var index = 0; index < um_rows.length; ++index ) {
var checkbox = getUMColumn( um_rows[ index ], INDEX_CHECK ).getElementsByTagName( "input" )[ 0 ];
checkbox.checked = false;
onUMMarkerChange( checkbox, index + 1 ); // Add 1 because it takes quest number, not index, blame Handlebars
}
}
function onCollapseClickInternal( index ) {
var row = document.getElementById( "um-" + String( index ) ); // get the table row
// Hide/show the subrow
var subrow = getUMSubrow( row ); // get the subrow
subrow.classList.toggle( "hidden" ); // hide or show it
}
function onCollapseClick( button, index ) {
button.classList.toggle( "collapse" ); // swap the button icon
onCollapseClickInternal( index );
}
/********************
* HELPER FUNCTIONS *
********************/
function sortTable( table_rows, sort_func ) {
var parent = table_rows[ 0 ].parentElement;
var arrayed_elements = [].slice.call( table_rows ); // Workaround to convert the table to an array
arrayed_elements.sort( sort_func ); // Sort
arrayed_elements.forEach( element => parent.appendChild( element ) ); // And put the elements back in
// Since appendChild removes the element from its previous parent, the old ordering is erased automatically
}
function getUMMainRow( um_div ) {
return um_div.getElementsByClassName( "um-row" )[ 0 ];
}
function getUMSubrow( um_div ) {
return um_div.getElementsByClassName( "um-subrow" )[ 0 ];
}
function getUMColumn( um_div, column ) {
return getUMMainRow( um_div ).getElementsByTagName( "td" )[ column ];
}
/******************
* SORT FUNCTIONS *
******************/
function sortByNum() {
sortTable( um_rows, function( row1, row2 ) {
const INDEX_NUM = 1;
var num_1 = parseInt( row1.getElementsByTagName( "td" )[ INDEX_NUM ].textContent );
var num_2 = parseInt( row2.getElementsByTagName( "td" )[ INDEX_NUM ].textContent );
return num_1 > num_2;
} );
}
function sortByName() {
sortTable( um_rows, function( row1, row2 ) {
const INDEX_NAME = 2;
var title_1 = row1.getElementsByTagName( "td" )[ INDEX_NAME ].textContent;
var title_2 = row2.getElementsByTagName( "td" )[ INDEX_NAME ].textContent;
return title_1 > title_2;
} );
}
function sortByLevel() {
sortTable( um_rows, function( row1, row2 ) {
const INDEX_LEVEL = 3;
var level_1 = row1.getElementsByTagName( "td" )[ INDEX_LEVEL ].textContent;
var level_2 = row2.getElementsByTagName( "td" )[ INDEX_LEVEL ].textContent;
return parseInt( level_1 ) > parseInt( level_2 );
} );
}
function sortByArea() {
sortTable( um_rows, function( row1, row2 ) {
const AREAS = [
"Colony 9",
"Tephra Cave",
"Bionis’ Leg",
"Colony 6",
"Ether Mine",
"Satorl Marsh",
"Makna Forest",
"Frontier Village",
"Eryth Sea",
"High Entia Tomb",
"Valak Mountain",
"Sword Valley",
"Galahad Fortress",
"Fallen Arm",
"Mechonis Field",
"Central Factory",
"Agniratha",
"Bionis’ Interior",
"Prison Island"
];
const INDEX_AREA = 4;
var area1 = row1.getElementsByTagName( "td" )[ INDEX_AREA ].textContent;
var area2 = row2.getElementsByTagName( "td" )[ INDEX_AREA ].textContent;
area1 = AREAS.indexOf( area1 );
area2 = AREAS.indexOf( area2 );
return area1 > area2;
} );
}
/**************
* INITIALIZE *
**************/
// Check local storage
if( !storage_enabled ) {
console.log( "Web storage is disabled, unable to store data between sessions" );
} else {
if( !localStorage.getItem( "um" ) ) {
save_data = [];
} else {
save_data = JSON.parse( localStorage.getItem( "um" ) );
}
}
// Initialize rows
for( var index = 0; index < um_rows.length; ++index ) {
const INDEX_CHECK = 0;
// Init rows from storage
var elements = um_rows[ index ].getElementsByTagName( "td" );
var checkbox = elements[ INDEX_CHECK ].getElementsByTagName( "input" )[ 0 ];
if( storage_enabled ) {
checkbox.checked = save_data[ index ];
onUMMarkerChange( checkbox, index + 1 );
}
// Collapse everything
onCollapseClickInternal( index + 1 );
}
|
#pragma once
#include <Macro/RectangleArray.h>
#include <map>
#include <vector>
#include <set>
void log(const char* text, ...);
std::map<int, int> computeAssignments(Util::RectangleArray< double> &cost);
void myRestartGame();
void logScore(bool isWinner, std::string mapName);
std::vector<double> exponentialDistribution(double lambda, unsigned int bins=1000);
long double maxVector(const std::vector<long double>& t);
size_t indMax(const std::vector<long double>& t);
std::set<size_t> supTo(const std::vector<long double>& t, long double minProb);
bool fileExists(const char *fileName);
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import setuptools
INSTALL_REQUIRES = [
'absl-py',
'apache-beam',
'numpy',
'scipy',
'xarray',
]
setuptools.setup(
name='pde-superresolution-2d',
version='0.0.0',
license='Apache 2.0',
author='Google LLC',
author_email='noreply@google.com',
install_requires=INSTALL_REQUIRES,
url='https://github.com/google-research/pde-superresolution-2d',
packages=setuptools.find_packages(),
python_requires='>=3')
|
//p5.play
var wallTop, wallBottom, wallLeft, wallRight, player;
var WALL_THICKNESS = 30;
//var playerImg, backgroundImg;
// player variables
var playerInitialX = 60;
var playerInitialY = 300;
var playerInitialDirection = 180;
var playerX = playerInitialX;
var playerY = playerInitialY;
var playerDirection = playerInitialDirection;
// target variables
var targetX = 300;
var targetY = 60;
function preload(){
playerLeft = loadImage("/assets/pokeballs/pokeball-left.png");
playerRight = loadImage("/assets/pokeballs/pokeball-right.png");
playerTop = loadImage("/assets/pokeballs/pokeball-top.png");
playerBottom = loadImage("/assets/pokeballs/pokeball-bottom.png");
targetImg = loadImage("/assets/pokemons/pidgeot.png");
treeImg = loadImage("/assets/objects/tree.png");
backgroundImg = loadImage('/assets/background/bg4.png'); //carrega imagem de fundo
}
function setup() {
var canvasp5 = createCanvas(400, 400);
canvasp5.parent('sketch-holder');
//deixa o jogo mais devagar
frameRate(1);
//paredes
wallTop = createSprite(width/2, -WALL_THICKNESS/2, width+WALL_THICKNESS*2, WALL_THICKNESS);
wallTop.immovable = true;
wallBottom = createSprite(width/2, height+WALL_THICKNESS/2, width+WALL_THICKNESS*2, WALL_THICKNESS);
wallBottom.immovable = true;
wallLeft = createSprite(-WALL_THICKNESS/2, height/2, WALL_THICKNESS, height);
wallLeft.immovable = true;
wallRight = createSprite(width+WALL_THICKNESS/2, height/2, WALL_THICKNESS, height);
wallRight.immovable = true;
//criando objetos na tela
//primeiro objeto
object1 = createSprite(205, 305, 75, 85);
object1.addImage("tree", treeImg);
object1.immovable = true;
//segungo objeto
object2 = createSprite(235, 75, 75, 85);
object2.addImage("tree", treeImg);
object2.immovable = true;
//Criando player
player = createSprite(playerX, playerY, 40 ,40);
player.addAnimation("right", playerRight);
/*player.addAnimation("left", playerLeft);
player.addAnimation("top", playerTop);
player.addAnimation("bottom", playerBottom);*/
//Criando alvo
target = createSprite(targetX, targetY, 40, 40);
target.addImage("pokemon", targetImg);
}
function draw() {
background(backgroundImg);
player.addAnimation("right", playerRight);
target.changeAnimation("pokemon");
player.collide(object1);
player.collide(object2);
// immovable não esta funcionando
player.collide(wallTop);
player.collide(wallBottom);
player.collide(wallRight);
player.collide(wallLeft);
drawSprites();
}
//avanca uma casa no jogo
function setNewPosition() {
//soma ou subtrai 40px da posicao atual do jogador
//somente altera se a nova posicao estiver dentro dos limites do jogo
switch (playerDirection){
case 0:
if(player.position.x - 40 >= 0){
playerX = player.position.x - 40;
}
break;
case 90:
if(player.position.y - 40 >= 0) {
playerY = player.position.y - 40;
}
break;
case 180:
if(player.position.x + 40 <= 400){
playerX = player.position.x + 40;
}
break;
case 270:
if(player.position.y + 40 <= 400){
playerY = player.position.y + 40;
}
break;
}
setAnimation();
checkChallenge();
}
//verifica se o player esta na mesma posicao do target
function checkChallenge() {
if(playerX == targetX && playerY == targetY){
target.remove();
}
}
//implementado dentro do bloco WHILE
function checkChallengeBlock() {
var result = false;
if(playerX == targetX && playerY == targetY){
result = false;
}else{
result = true;
}
return result;
}
//verifica se o player esta na mesma posicao do target
function checkAnswer() {
if(playerX == targetX && playerY == targetY){
correctAnswer();
} else {
wrongAnswer();
}
}
//gira o player para a direita
function setDirectionRIGHT() {
if(playerDirection + 90 <= 270){
playerDirection += 90;
} else{
playerDirection = 0;
}
setAnimation();
}
//gira o player para a esquerda
function setDirectionLEFT() {
if(playerDirection - 90 >= 0){
playerDirection -= 90;
} else {
playerDirection = 270;
}
setAnimation();
}
//altera animacao conforme a rotação do player
function setAnimation(){
player.remove();
switch (playerDirection){
case 0:
player = createSprite(playerX, playerY, 100 ,100);
player.addAnimation("left", playerLeft);
break;
case 90:
player = createSprite(playerX, playerY, 100 ,100);
player.addAnimation("top", playerTop);
break;
case 180:
player = createSprite(playerX, playerY, 100 ,100);
player.addAnimation("right", playerRight);
break;
case 270:
player = createSprite(playerX, playerY, 100 ,100);
player.addAnimation("bottom", playerBottom);
break;
}
}
//reinica o jogo colocando o player na posição inicial
function resetGame() {
player.remove();
playerX = playerInitialX;
playerY = playerInitialY;
playerDirection = playerInitialDirection;
//desenha o player novamente
player = createSprite(playerX, playerY, 100 ,100);
player.addAnimation("right", playerRight);
target.remove();
//desenha o alvo
target = createSprite(targetX, targetY, 100, 100);
target.addImage("pokemon",targetImg);
}
|
APP_DIRECTORY_NAME = "APP"
SRC_DIRECTORY_NAME = "src"
TEMPLATES_DIRECTORY_NAME = "templates"
# Production React Js Template files having Github Repository Links
REACTJS_TEMPLATES_URLS_DICT = {
"package.json-tpl": "https://raw.githubusercontent.com/Jitensid/django-webpack-dev-server/main/assets/reactjs/package.json-tpl",
"webpack.config.js-tpl": "https://raw.githubusercontent.com/Jitensid/django-webpack-dev-server/main/assets/reactjs/webpack.config.js-tpl",
"babel.config.json-tpl": "https://raw.githubusercontent.com/Jitensid/django-webpack-dev-server/main/assets/reactjs/babel.config.json-tpl",
"App.js-tpl": "https://raw.githubusercontent.com/Jitensid/django-webpack-dev-server/main/assets/reactjs/App.js-tpl",
"index.js-tpl": "https://raw.githubusercontent.com/Jitensid/django-webpack-dev-server/main/assets/reactjs/index.js-tpl",
"App.css-tpl": "https://raw.githubusercontent.com/Jitensid/django-webpack-dev-server/main/assets/reactjs/App.css-tpl",
"reactlogo.png": "https://raw.githubusercontent.com/Jitensid/django-webpack-dev-server/main/assets/reactjs/reactlogo.png",
}
# Template files specific to react js configuration
PROD_REACTJS_TEMPLATE_FILES = [
(
APP_DIRECTORY_NAME,
"package.json",
REACTJS_TEMPLATES_URLS_DICT["package.json-tpl"],
),
(
APP_DIRECTORY_NAME,
"webpack.config.js",
REACTJS_TEMPLATES_URLS_DICT["webpack.config.js-tpl"],
),
(
APP_DIRECTORY_NAME,
"babel.config.json",
REACTJS_TEMPLATES_URLS_DICT["babel.config.json-tpl"],
),
(SRC_DIRECTORY_NAME, "App.js", REACTJS_TEMPLATES_URLS_DICT["App.js-tpl"]),
(SRC_DIRECTORY_NAME, "index.js", REACTJS_TEMPLATES_URLS_DICT["index.js-tpl"]),
(SRC_DIRECTORY_NAME, "App.css", REACTJS_TEMPLATES_URLS_DICT["App.css-tpl"]),
(SRC_DIRECTORY_NAME, "reactlogo.png", REACTJS_TEMPLATES_URLS_DICT["reactlogo.png"]),
]
|
import React, { useState, useEffect } from 'react';
import { Link as RouterLink, withRouter } from 'react-router-dom';
import PropTypes from 'prop-types';
import validate from 'validate.js';
import { makeStyles } from '@material-ui/styles';
import {
Grid,
Button,
IconButton,
TextField,
Link,
FormHelperText,
Checkbox,
Typography
} from '@material-ui/core';
import ArrowBackIcon from '@material-ui/icons/ArrowBack';
const schema = {
firstName: {
presence: { allowEmpty: false, message: 'is required' },
length: {
maximum: 32
}
},
lastName: {
presence: { allowEmpty: false, message: 'is required' },
length: {
maximum: 32
}
},
email: {
presence: { allowEmpty: false, message: 'is required' },
email: true,
length: {
maximum: 64
}
},
password: {
presence: { allowEmpty: false, message: 'is required' },
length: {
maximum: 128
}
},
policy: {
presence: { allowEmpty: false, message: 'is required' },
checked: true
}
};
const useStyles = makeStyles(theme => ({
root: {
backgroundColor: theme.palette.background.default,
height: '100%'
},
grid: {
height: '100%'
},
quoteContainer: {
[theme.breakpoints.down('md')]: {
display: 'none'
}
},
quote: {
backgroundColor: theme.palette.neutral,
height: '100%',
display: 'flex',
justifyContent: 'center',
alignItems: 'center',
backgroundImage: 'url(/images/panel.png)',
backgroundSize: 'cover',
backgroundRepeat: 'no-repeat',
backgroundPosition: 'center'
},
quoteInner: {
textAlign: 'center',
flexBasis: '600px'
},
quoteText: {
color: theme.palette.white,
fontWeight: 300
},
name: {
marginTop: theme.spacing(3),
color: theme.palette.white
},
bio: {
color: theme.palette.white
},
contentContainer: {},
content: {
height: '100%',
display: 'flex',
flexDirection: 'column'
},
contentHeader: {
display: 'flex',
alignItems: 'center',
paddingTop: theme.spacing(5),
paddingBototm: theme.spacing(2),
paddingLeft: theme.spacing(2),
paddingRight: theme.spacing(2)
},
logoImage: {
marginLeft: theme.spacing(4)
},
contentBody: {
flexGrow: 1,
display: 'flex',
alignItems: 'center',
[theme.breakpoints.down('md')]: {
justifyContent: 'center'
}
},
form: {
paddingLeft: 100,
paddingRight: 100,
paddingBottom: 125,
flexBasis: 700,
[theme.breakpoints.down('sm')]: {
paddingLeft: theme.spacing(2),
paddingRight: theme.spacing(2)
}
},
title: {
marginTop: theme.spacing(3)
},
textField: {
marginTop: theme.spacing(2)
},
policy: {
marginTop: theme.spacing(1),
display: 'flex',
alignItems: 'center'
},
policyCheckbox: {
marginLeft: '-14px'
},
signUpButton: {
margin: theme.spacing(2, 0)
}
}));
const SignUp = props => {
const { history } = props;
const classes = useStyles();
const [formState, setFormState] = useState({
isValid: false,
values: {},
touched: {},
errors: {}
});
useEffect(() => {
const errors = validate(formState.values, schema);
setFormState(formState => ({
...formState,
isValid: errors ? false : true,
errors: errors || {}
}));
}, [formState.values]);
const handleChange = event => {
event.persist();
setFormState(formState => ({
...formState,
values: {
...formState.values,
[event.target.name]:
event.target.type === 'checkbox'
? event.target.checked
: event.target.value
},
touched: {
...formState.touched,
[event.target.name]: true
}
}));
};
const handleBack = () => {
history.goBack();
};
const handleSignUp = event => {
event.preventDefault();
history.push('/');
};
const hasError = field =>
formState.touched[field] && formState.errors[field] ? true : false;
return (
<div className={classes.root}>
<Grid
className={classes.grid}
container
>
<Grid
className={classes.quoteContainer}
item
lg={5}
>
<div className={classes.quote}>
<div className={classes.quoteInner}>
<Typography
className={classes.quoteText}
variant="h1"
>
Welcome to StudyDate.
</Typography>
<br></br>
<div className={classes.person}>
<Typography
className={classes.name}
variant="h3"
>
Enhance the way you study!
</Typography>
<br></br>
<Typography
className={classes.bio}
variant="body1"
align = "center"
>
Start meeting people in your area who are taking similar courses or <br/>
study on your own using our online courses. Track your progress and <br/>
connect with friends to innovate your learning.
</Typography>
</div>
</div>
</div>
</Grid>
<Grid
className={classes.content}
item
lg={7}
xs={12}
>
<div className={classes.content}>
<div className={classes.contentHeader}>
<IconButton onClick={handleBack}>
<ArrowBackIcon />
</IconButton>
</div>
<div className={classes.contentBody}>
<form
className={classes.form}
onSubmit={handleSignUp}
>
<Typography
className={classes.title}
variant="h2"
>
Create new account
</Typography>
<Typography
color="textSecondary"
gutterBottom
>
Use your email to create new account
</Typography>
<TextField
className={classes.textField}
error={hasError('firstName')}
fullWidth
helperText={
hasError('firstName') ? formState.errors.firstName[0] : null
}
label="First name"
name="firstName"
onChange={handleChange}
type="text"
value={formState.values.firstName || ''}
variant="outlined"
/>
<TextField
className={classes.textField}
error={hasError('lastName')}
fullWidth
helperText={
hasError('lastName') ? formState.errors.lastName[0] : null
}
label="Last name"
name="lastName"
onChange={handleChange}
type="text"
value={formState.values.lastName || ''}
variant="outlined"
/>
<TextField
className={classes.textField}
error={hasError('email')}
fullWidth
helperText={
hasError('email') ? formState.errors.email[0] : null
}
label="Email address"
name="email"
onChange={handleChange}
type="text"
value={formState.values.email || ''}
variant="outlined"
/>
<TextField
className={classes.textField}
error={hasError('password')}
fullWidth
helperText={
hasError('password') ? formState.errors.password[0] : null
}
label="Password"
name="password"
onChange={handleChange}
type="password"
value={formState.values.password || ''}
variant="outlined"
/>
<div className={classes.policy}>
<Checkbox
checked={formState.values.policy || false}
className={classes.policyCheckbox}
color="primary"
name="policy"
onChange={handleChange}
/>
<Typography
className={classes.policyText}
color="textSecondary"
variant="body1"
>
I have read the{' '}
<Link
color="primary"
component={RouterLink}
to="#"
underline="always"
variant="h6"
>
Terms and Conditions
</Link>
</Typography>
</div>
{hasError('policy') && (
<FormHelperText error>
{formState.errors.policy[0]}
</FormHelperText>
)}
<Button
className={classes.signUpButton}
color="primary"
disabled={!formState.isValid}
fullWidth
size="large"
type="submit"
variant="contained"
>
Sign up now
</Button>
<Typography
color="textSecondary"
variant="body1"
>
Have an account?{' '}
<Link
component={RouterLink}
to="/sign-in"
variant="h6"
>
Sign in
</Link>
</Typography>
</form>
</div>
</div>
</Grid>
</Grid>
</div>
);
};
SignUp.propTypes = {
history: PropTypes.object
};
export default withRouter(SignUp);
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.signal import argrelextrema
class TestOutlierDetector():
def __init__(self):
self.targets = ['is_signal', 'mass_jj']
def create_testing_dataframe(self, df, feature_cols=None, sample_frac=1.):
# optinally use only a subset of features
if feature_cols:
df_sb = df[feature_cols + self.targets].copy()
else:
df_sb = df.copy()
# optionally further reduce signal-to-background ratio from default of 1:10
if (sample_frac < 1.) & (sample_frac > 0.):
df_s = df_sb[df_sb['is_signal'] == 1].sample(frac=sample_frac)
df_b = df_sb[df_sb['is_signal'] == 0]
df_sb = df_s.append(df_b).sample(frac=1).reset_index(drop=True)
return df_sb
def score_outlier_detector(self, df, outlier_detector, spliter, transformer=None, score_name='score'):
# get features and targets
X=df.drop(columns = self.targets)
y=df['is_signal']
# create df to store results
df_split_scores=pd.DataFrame(columns = [score_name, 'index'])
# cross validation
for tr_idx, te_idx in spliter.split(X, y):
# optionally apply feature transformation
if transformer:
X_tr=transformer.fit_transform(X.iloc[tr_idx])
X_te=transformer.transform(X.iloc[te_idx])
else:
X_tr=X.iloc[tr_idx]
X_te=X.iloc[te_idx]
# intialize and fit model
od=outlier_detector
od.fit(X_tr)
# get results
df_split_score=pd.DataFrame(data = {score_name: od.decision_function(X_te), 'index': te_idx})
df_split_scores=df_split_scores.append(df_split_score)
# merge resuls with other necessary info
result=pd.merge(df[self.targets], df_split_score, how = 'left', left_index = True, right_on = 'index')
return result.drop(columns = ['index'])
def test_outlier_detector(self, df, outlier_detector, spliter, transformer=None, feature_cols=None, sample_frac=1.):
# create signal+background dataframe
df_sb=self.create_testing_dataframe(df, feature_cols = feature_cols, sample_frac = sample_frac)
# and background only dataframe
df_b=df_sb[df_sb['is_signal'] == 0]
# compute scores for signal+background
sb_scores=self.score_outlier_detector(df_sb, outlier_detector, spliter, transformer = transformer, score_name = 'score_sb')
# and background only
b_scores=self.score_outlier_detector(df_b, outlier_detector, spliter, transformer = transformer, score_name = 'score_b')
# delete unnecessary data
del df_sb, df_b
# return scores
return (sb_scores, b_scores)
def get_bin_edges(self, df, ntile=0.01, bin_width=100):
# create bins, default width = 100 GeV
left = np.round(df['mass_jj'].quantile(ntile), -2)
right = np.round(df['mass_jj'].quantile(1 - ntile), -2)
return np.arange(left - bin_width / 2, right + 3 * bin_width / 2, bin_width)
def bin_means(self, bins):
return [(bins[i] + bins[i+1]) / 2 for i in range(len(bins)-1)]
def get_histogram(self, df, bins, q=0.0, feature='score'):
# select the 1 - q fraction of events with the highest outlier scores
qcut = df[feature] > df[feature].quantile(q)
# create historgram of selected scores
return np.histogram(df[qcut]['mass_jj'], bins=bins)
def get_histograms(self, sb_scores, b_scores, quantiles=[0.0, 0.7, 0.9], ntile=0.01):
# create bins for histograms
hist_bins = self.get_bin_edges(sb_scores, ntile=ntile)
# signal+background histogram
hist_sb = [self.get_histogram(sb_scores, hist_bins, q=q, feature='score_sb') for q in quantiles]
# background histogram
hist_b = [self.get_histogram(b_scores, hist_bins, q=q, feature='score_b') for q in quantiles]
return (hist_sb, hist_b)
def plot_outlier_detections(self, sb_scores_list, b_scores_list, plot_signal_region=1, quantiles=[0.0, 0.7, 0.9], figsize=(9, 9), title_list=None, ntile=0.01, file_name=None, file_types=['.png']):
n = len(sb_scores_list)
plt.figure(figsize=figsize)
for i in range(n):
plt.subplot(int(np.ceil(n/2)), 2, i+1)
sb_scores = sb_scores_list[i]
b_scores = b_scores_list[i]
# ratio of # of signal+background events to just background events
r = len(sb_scores) / len(b_scores)
# get score histograms
hist_sb, hist_b = self.get_histograms(sb_scores, b_scores, quantiles=quantiles, ntile=ntile)
# plot background only scores
for hist in hist_b:
plt.plot(self.bin_means(hist[1]), r * hist[0], color='r', linewidth=1)
# plot signal+background scores
for hist in hist_sb:
plt.scatter(self.bin_means(hist[1]), hist[0], marker='.', color='b')
# plot true signal histogram
plt.hist(sb_scores[sb_scores['is_signal'] == 1]['mass_jj'], bins=hist_sb[0][1], alpha=0.4, color='y')
# find the center of the signal region
sig_maxs = argrelextrema(hist_sb[plot_signal_region][0], np.greater)[0]
bkg_maxs = argrelextrema(hist_b[plot_signal_region][0], np.greater)[0]
# the background should only have one peak
sig_maxs_sorted = sorted(sig_maxs, key=lambda x: (x - bkg_maxs[0])**2)
# find the mass associated with that index
keys = sig_maxs_sorted[1:]
masses = [self.bin_means(hist_sb[plot_signal_region][1][key:key+2]) for key in keys]
# indicate signal region with dashed lines
plt.vlines(masses, 10**1, 10**6, linestyles='dashed')
# label plot
if title_list is not None:
plt.title(title_list[i])
plt.xlabel(r'$m_{JJ} /\rm\, GeV$')
binwidth = int(hist_sb[plot_signal_region][1][1] - hist_sb[plot_signal_region][1][0])
plt.ylabel(r'$\rm Events\, /\, %i\, GeV$' %binwidth)
plt.yscale('log')
plt.ylim(10**1, 10**6)
plt.tight_layout(True)
if file_name:
for file_type in file_types:
plt.savefig(file_name + file_type)
plt.show()
|
/*=========================================================================
*
* Copyright NumFOCUS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkShapeSignedDistanceFunction_h
#define itkShapeSignedDistanceFunction_h
#include "itkSpatialFunction.h"
#include "itkOptimizerParameters.h"
namespace itk
{
/**
*\class ShapeSignedDistanceFunction
* \brief Base class for functions which evaluates the signed distance
* from a shape.
*
* ShapeSignedDistanceFunction is the base class for functions which
* returns the signed distance from a shape at an arbitrary point.
* A shape assumed to be defined by a set of shape and pose parameters.
*
* Note that Initialize() must be called before use.
* This allows the class an opportunity to validate any inputs.
*
* This class is templated over the coordinate representation type
* (e.g. float or double) and the space dimension.
*
* ShapeSignedDistanceFunction is used to encapsulate the shape prior
* in ShapePriorSegmentationLevelSetFunctions.
*
* \sa SpatialFunction
* \sa ShapePriorSegmentationLevelSetFunction
*
* \ingroup ImageFunctions
*
*
* \ingroup ITKSignedDistanceFunction
*/
template <typename TCoordRep, unsigned int VSpaceDimension>
class ShapeSignedDistanceFunction : public SpatialFunction<double, VSpaceDimension, Point<TCoordRep, VSpaceDimension>>
{
public:
ITK_DISALLOW_COPY_AND_ASSIGN(ShapeSignedDistanceFunction);
/** Standard class type aliases. */
using Self = ShapeSignedDistanceFunction;
using Superclass = SpatialFunction<double, VSpaceDimension, Point<TCoordRep, VSpaceDimension>>;
using Pointer = SmartPointer<Self>;
using ConstPointer = SmartPointer<const Self>;
/** Run-time type information (and related methods). */
itkTypeMacro(ShapeSignedDistanceFunction, SpatialFunction);
/** OutputType type alias support */
using OutputType = typename Superclass::OutputType;
/** InputType type alias support */
using InputType = typename Superclass::InputType;
/** Dimension underlying input image. */
static constexpr unsigned int SpaceDimension = VSpaceDimension;
/** CoordRep type alias support */
using CoordRepType = TCoordRep;
/** Point type alias support */
using PointType = InputType;
/** Type of the shape parameters. */
using ParametersType = OptimizerParameters<double>;
/** A shape is defined by a set of shape parameters. */
virtual void
SetParameters(const ParametersType &) = 0;
virtual ParametersType &
GetParameters()
{
return m_Parameters;
}
virtual unsigned int
GetNumberOfShapeParameters() const = 0;
virtual unsigned int
GetNumberOfPoseParameters() const = 0;
virtual unsigned int
GetNumberOfParameters() const
{
return this->GetNumberOfShapeParameters() + this->GetNumberOfPoseParameters();
}
/** Evaluate the signed distance from a shape at a given position. */
OutputType
Evaluate(const PointType & point) const override = 0;
/** Initialize must be called before the first call of SetParameters() or
Evaluate() to allow the class to validate any inputs. */
virtual void
Initialize()
{}
protected:
ShapeSignedDistanceFunction() = default;
~ShapeSignedDistanceFunction() override = default;
void
PrintSelf(std::ostream & os, Indent indent) const override
{
Superclass::PrintSelf(os, indent);
// FIX os << indent << "Parameters: " << m_Parameters << std::endl;
}
ParametersType m_Parameters;
};
} // end namespace itk
#endif
|
define(["utils"], function(utils) {
/** CallbackManager()
*/
var CallbackManager = utils.make_class();
CallbackManager.prototype = { init: init,
set: set,
remove: remove,
run: run };
return CallbackManager;
function init() {
}
function set(name, fn) {
/** As in d3 callbacks, you can namespace your callbacks after a period:
select_metabolite.direction_arrow
select_metabolite.input
Both are called by select_metabolite
*/
if (this.callbacks===undefined) this.callbacks = {};
if (this.callbacks[name]===undefined) this.callbacks[name] = [];
this.callbacks[name].push(fn);
return this;
}
function remove(name) {
/** Remove a callback by name
*/
if (this.callbacks===undefined || Object.keys(this.callbacks).length==0) {
console.warn('No callbacks to remove');
}
delete this.callbacks[name];
return this;
}
function run(name) {
/** Run all callbacks that match the portion of name before the period ('.').
*/
if (this.callbacks===undefined) return this;
// pass all but the first (name) argument to the callback
var pass_args = Array.prototype.slice.call(arguments, 1);
// look for matching callback names
for (var a_name in this.callbacks) {
var split_name = a_name.split('.')[0];
if (split_name==name) {
this.callbacks[a_name].forEach(function(fn) {
fn.apply(null, pass_args);
});
}
}
return this;
}
});
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = flattenUniq;
// Copyright 2017-2020 @polkadot/metadata authors & contributors
// This software may be modified and distributed under the terms
// of the Apache-2.0 license. See the LICENSE file for details.
/** @internal */
function flattenUniq(list) {
const flat = list.reduce((result, entry) => {
return result.concat(Array.isArray(entry) ? flattenUniq(entry) : entry);
}, []);
return [...new Set(flat)].filter(value => value).sort();
}
|
import json
import os
import cv2 as cv
from torch.utils.data import Dataset
from torchvision import transforms
from config import image_folder
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]),
'valid': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
class DeepIQADataset(Dataset):
def __init__(self, split):
filename = '{}.json'.format(split)
with open(filename, 'r') as file:
samples = json.load(file)
self.samples = samples
self.transformer = data_transforms[split]
def __getitem__(self, i):
sample = self.samples[i]
img_path = sample['img_path']
label = sample['label']
img_path = os.path.join(image_folder, img_path)
img = cv.imread(img_path)
img = img[..., ::-1] # RGB
img = transforms.ToPILImage()(img)
img = self.transformer(img)
return img, label
def __len__(self):
return len(self.samples)
if __name__ == "__main__":
train = DeepIQADataset('train')
print('num_train: ' + str(len(train)))
valid = DeepIQADataset('valid')
print('num_valid: ' + str(len(valid)))
print(train[0])
print(valid[0])
|
from PIL import Image
from sys import platform
import imagehash
print("Flagpole")
userFlagDir = str(input("Enter the directory that stores your flags > "))
userInput = str(input("Enter the path to the flag you want to test > "))
try:
userHash = imagehash.average_hash(Image.open(userInput))
except:
print("Invalid File - Must be a local file path")
exit()
def image(x):
if platform.lower() == "win32":
return userHash - imagehash.average_hash(Image.open(userFlagDir + "\\" + x + ".png")),
else:
return userHash - imagehash.average_hash(Image.open(userFlagDir + "/" + x + ".png")),
countries = {
"Abkhazia": image("Abkhazia"),
"Afghanistan": image("Afghanistan"),
"Albania": image("Albania"),
"Algeria": image("Algeria"),
"Andorra": image("Andorra"),
"Angola": image("Angola"),
"Antigua and Barbuda": image("Antigua and Barbuda"),
"Argentina": image("Argentina"),
"Armenia": image("Armenia"),
"Artsakh": image("Artsakh"),
"Australia": image("Australia"),
"Austria": image("Austria"),
"Azerbaijan": image("Azerbaijan"),
"Bahamas": image("Bahamas"),
"Bahrain": image("Bahrain"),
"Bangladesh": image("Bangladesh"),
"Barbados": image("Barbados"),
"Belarus": image("Belarus"),
"Belgium": image("Belgium"),
"Belize": image("Belize"),
"Benin": image("Benin"),
"Bhutan": image("Bhutan"),
"Bolivia": image("Bolivia"),
"Bosnia and Herzegovina": image("Bosnia and Herzegovina"),
"Botswana": image("Botswana"),
"Brazil": image("Brazil"),
"Brunei": image("Brunei"),
"Bulgaria": image("Bulgaria"),
"Burkina Faso": image("Burkina Faso"),
"Burundi": image("Burundi"),
"Cambodia": image("Cambodia"),
"Cameroon": image("Cameroon"),
}
print("Calculating...")
print("Looks Like: " + min(countries, key=countries.get))
print("Similarity:", countries[min(countries, key=countries.get)][0], "(where 0 is identical)")
input()
|
"""SCons.Scanner.Python
This module implements the dependency scanner for Python code.
One important note about the design is that this does not take any dependencies
upon packages or binaries in the Python installation unless they are listed in
PYTHONPATH. To do otherwise would have required code to determine where the
Python installation is, which is outside of the scope of a scanner like this.
If consumers want to pick up dependencies upon these packages, they must put
those directories in PYTHONPATH.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import itertools
import os
import re
import SCons.Scanner
# Capture python "from a import b" and "import a" statements.
from_cre = re.compile(r'^\s*from\s+([^\s]+)\s+import\s+(.*)', re.M)
import_cre = re.compile(r'^\s*import\s+([^\s]+)', re.M)
def path_function(env, dir=None, target=None, source=None, argument=None):
"""Retrieves a tuple with all search paths."""
paths = env['ENV'].get('PYTHONPATH', '').split(os.pathsep)
if source:
paths.append(source[0].dir.abspath)
return tuple(paths)
def find_include_names(node):
"""
Scans the node for all imports.
Returns a list of tuples. Each tuple has two elements:
1. The main import (e.g. module, module.file, module.module2)
2. Additional optional imports that could be functions or files
in the case of a "from X import Y" statement. In the case of a
normal "import" statement, this is None.
"""
text = node.get_text_contents()
all_matches = []
matches = from_cre.findall(text)
if matches:
for match in matches:
imports = [i.strip() for i in match[1].split(',')]
# Add some custom logic to strip out "as" because the regex
# includes it.
last_import_split = imports[-1].split()
if len(last_import_split) > 1:
imports[-1] = last_import_split[0]
all_matches.append((match[0], imports))
matches = import_cre.findall(text)
if matches:
for match in matches:
all_matches.append((match, None))
return all_matches
def scan(node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes is not None:
includes = node.includes
else:
includes = find_include_names(node)
# Intern the names of the include files. Saves some memory
# if the same header is included many times.
node.includes = list(map(SCons.Util.silent_intern, includes))
# XXX TODO: Sort?
nodes = []
if callable(path):
path = path()
for module, imports in includes:
is_relative = module.startswith('.')
if is_relative:
# This is a relative include, so we must ignore PYTHONPATH.
module_lstripped = module.lstrip('.')
# One dot is current directory, two is parent, three is
# grandparent, etc.
num_parents = len(module) - len(module_lstripped) - 1
current_dir = node.get_dir()
for i in itertools.repeat(None, num_parents):
current_dir = current_dir.up()
search_paths = [current_dir.abspath]
search_string = module_lstripped
else:
search_paths = path
search_string = module
module_components = search_string.split('.')
for search_path in search_paths:
candidate_path = os.path.join(search_path, *module_components)
# The import stored in "module" could refer to a directory or file.
import_dirs = []
if os.path.isdir(candidate_path):
import_dirs = module_components
# Because this resolved to a directory, there is a chance that
# additional imports (e.g. from module import A, B) could refer
# to files to import.
if imports:
for imp in imports:
file = os.path.join(candidate_path, imp + '.py')
if os.path.isfile(file):
nodes.append(file)
elif os.path.isfile(candidate_path + '.py'):
nodes.append(candidate_path + '.py')
import_dirs = module_components[:-1]
# We can ignore imports because this resolved to a file. Any
# additional imports (e.g. from module.file import A, B) would
# only refer to functions in this file.
# Take a dependency on all __init__.py files from all imported
# packages unless it's a relative import. If it's a relative
# import, we don't need to take the dependency because Python
# requires that all referenced packages have already been imported,
# which means that the dependency has already been established.
if import_dirs and not is_relative:
for i in range(len(import_dirs)):
init_components = module_components[:i+1] + ['__init__.py']
init_path = os.path.join(search_path, *(init_components))
if os.path.isfile(init_path):
nodes.append(init_path)
break
return sorted(nodes)
PythonSuffixes = ['.py']
PythonScanner = SCons.Scanner.Base(scan, name='PythonScanner',
skeys=PythonSuffixes,
path_function=path_function, recursive=1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
// SCRIPT TIMING
var scriptStartTime = new Date();
console.log("Global Script Started at : " + scriptStartTime.getHours() + "h : "+scriptStartTime.getMinutes() + "m : " + scriptStartTime.getSeconds() + "s");
var scriptEndTime = new Date();
scriptTimingMs = parseFloat(scriptEndTime.getTime() - scriptStartTime.getTime());
console.log("Global Script ENDED at : " + scriptEndTime.getHours() + "h : "+scriptEndTime.getMinutes() + "m : " + scriptEndTime.getSeconds() + "s");
console.log("Le script a mis " + scriptTimingMs/1000 + " secondes.");
// RADIO BOX VANILLA
function radio()
{
var cases = document.getElementById("idForm").mesCases;
var platFavori;
// on recherche le bouton coche (s'il y en a un)
for(var i=0; i<cases.length && !platFavori; i++)
if(cases[i].checked)
platFavori = cases[i].value;
// s'il y en a un, on affiche la valeur correspondante
if(platFavori)
alert("Votre plat favori est : " + platFavori);
}
// ================================
// | AJAX - VANILLA JS |
// ================================
Here is how you can submit your form via Ajax:
function submitFormAjax() {
let xmlhttp= window.XMLHttpRequest ?
new XMLHttpRequest() : new ActiveXObject("Microsoft.XMLHTTP");
xmlhttp.onreadystatechange = function() {
if (this.readyState === 4 && this.status === 200)
alert(this.responseText); // Here is the response
}
let name = document.getElementById('name').innerHTML;
let email = document.getElementById('email').innerHTML;
xmlhttp.open("GET","your_url.php?name=" + name + "&email=" + email, true);
xmlhttp.send();
}
This example is using GET, but you could also use POST:
xmlhttp.open("POST","your_url.php",true);
xmlhttp.setRequestHeader("Content-type","application/x-www-form-urlencoded");
xmlhttp.send("name=" + name + "&email=" + email);
Note:
You must call submitFormAjax() after validateFormOnSubmit is done with no errors, here:
if (reason.length == 0) {
// Show some loading image and submit form
submitFormAjax();
} else {
return false;
}
|
'use strict';
function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; }
var fs = require('fs');
var path = require('path');
var hasha = _interopDefault(require('hasha'));
const cheerio = require('cheerio');
function traverse(dir, list) {
const dirList = fs.readdirSync(dir);
dirList.forEach(node => {
const file = `${dir}/${node}`;
if (fs.statSync(file).isDirectory()) {
traverse(file, list);
} else {
if (/\.js$/.test(file)) {
list.push({ type: 'js', file });
} else if (/\.css$/.test(file)) {
list.push({ type: 'css', file });
}
}
});
}
function isURL(url){
return (new RegExp('^(?:[a-z]+:)?//', 'i')).test(url);
}
function collapseWhitespaceAll(str) {
const reg = /("([^\\\"]*(\\.)?)*")|('([^\\\']*(\\.)?)*')|(\/{2,}.*?(\r|\n))|(\/\*(\n|.)*?\*\/)/g; // 正则表达式
return str && str.replace(reg, function(word) { // 去除注释后的文本
return /^\/{2,}/.test(word) || /^\/\*/.test(word) ? "" : word;
}).replace(/[ \n\r\t\f\xA0]+/g, function(spaces) {
return spaces === '\t' ? '\t' : spaces.replace(/(^|\xA0+)[^\xA0]+/g, '$1 ');
})
}
var index = (opt = {}) => {
const { template, filename, externals, inject, dest, absolute, ignore, onlinePath, favicon, defer } = opt;
return {
name: 'html',
writeBundle(config, data) {
const isHTML = /^.*<html>.*<\/html>$/.test(template);
let html = isHTML?template:fs.readFileSync(template).toString();
html = collapseWhitespaceAll(html);
const $ = cheerio.load(html, {decodeEntities: false});
const head = $('head');
const body = $('body');
let entryConfig = {};
Object.values(config).forEach((c) => {
if (c && c.isEntry) entryConfig = c;
});
const { fileName = filename, sourcemap } = entryConfig;
const fileList = [];
// relative('./', file) will not be equal to file when file is a absolute path
const destPath = path.relative('./', fileName);
const destDir = dest || destPath.slice(0, destPath.indexOf(path.sep));
const destFile = `${destDir}/${filename || path.basename(template)}`;
const absolutePathPrefix = absolute ? '/' : '';
traverse(destDir, fileList);
if (Array.isArray(externals)) {
let firstBundle = 0;
externals.forEach(function(node) {
if (node.pos === 'before') {
fileList.splice(firstBundle++, 0, node);
} else {
fileList.splice(fileList.length, 0, node);
}
});
}
fileList.forEach(node => {
let { type, file } = node;
if (ignore && file.match(ignore)) {
return;
}
let hash = '';
let code = '';
if (/\[hash\]/.test(file)) {
if (file === destPath) {
// data.code will remove the last line of the source code(//# sourceMappingURL=xxx), so it's needed to add this
code = data.code + `//# sourceMappingURL=${path.basename(file)}.map`;
} else {
code = fs.readFileSync(file).toString();
}
if (sourcemap) {
let srcmapFile = file + ".map";
let srcmapCode = fs.readFileSync(srcmapFile).toString();
let srcmapHash = hasha(srcmapCode, { algorithm: 'md5' });
// remove the source map file without hash
fs.unlinkSync(srcmapFile);
srcmapFile = srcmapFile.replace('[hash]', srcmapHash);
fs.writeFileSync(srcmapFile, srcmapCode);
code = code.replace(`//# sourceMappingURL=${path.basename(file)}.map`, `//# sourceMappingURL=${path.basename(srcmapFile)}`);
}
hash = hasha(code, { algorithm: 'md5' });
// remove the file without hash
fs.unlinkSync(file);
file = file.replace('[hash]', hash);
fs.writeFileSync(file, code);
}
let src = isURL(file) ? file : absolutePathPrefix + path.relative(destDir, file).replace(/\\/g, '/');
if (onlinePath) {
const filename = file.split('/').slice(-1)[0];
const slash = onlinePath.slice(-1) === '/' ? '' : '/';
src = onlinePath + slash + filename;
}
// if (node.timestamp) {
src += '?t=' + (new Date()).getTime();
// }
if (favicon) {
const faviconTmp = `<link rel="shortcut icon" href="${favicon}">`;
head.append(faviconTmp);
}
if (type === 'js') {
let script = `<script type="text/javascript" src="${src}"></script>\n`;
if (defer) {
script = script.replace('type="text/javascript"', 'type="text/javascript" defer');
}
// node.inject will cover the inject
if (node.inject === 'head' || inject === 'head') {
head.append(script);
} else {
body.append(script);
}
} else if (type === 'css') {
head.append(`<link rel="stylesheet" href="${src}">\n`);
}
});
fs.writeFileSync(destFile, $.html());
}
};
};
module.exports = index;
|
import { createSlice } from '@reduxjs/toolkit';
import { addAttendanceAsync, getAttendanceAsync, getSalesAsync, addSalesAsync } from './salesThunk';
export const salesInitialState = {
status: 'idle',
error: {},
success: false,
getattendancestatus: 'idle',
getattendanceerror: {},
getattendancedata: null,
getsalesstatus: 'idle',
getsaleserror: {},
getsalesdata: null,
setaddSalesStatus: 'idle',
setaddSalesError: {},
setaddSalesisSuccess: false
};
export const salesSlice = createSlice({
name: 'sales',
initialState: salesInitialState,
reducers: {
resetData: (state, action) => {
console.log(action.payload, '-------------resetData');
state.error = {};
state.getattendanceerror = {};
state.getsaleserror = {};
state.setaddSalesError = {};
state.success = false;
state.setaddSalesisSuccess = false;
}
},
extraReducers: (builder) => {
builder
.addCase(getAttendanceAsync.pending, (state) => {
state.getattendancestatus = 'loading';
})
.addCase(getAttendanceAsync.rejected, (state, action) => {
state.getattendancestatus = 'idle';
console.log(action.payload, '---------------action.payload');
state.getattendanceerror = action.payload;
})
.addCase(getAttendanceAsync.fulfilled, (state, action) => {
state.getattendancestatus = 'idle';
state.getattendancedata = action.payload.data;
console.log(action.payload.data, '---------------action.payload');
})
.addCase(addAttendanceAsync.pending, (state) => {
state.status = 'loading';
})
.addCase(addAttendanceAsync.rejected, (state, action) => {
state.status = 'idle';
console.log(action.payload, '---------------action.payload');
state.error = action.payload;
})
.addCase(addAttendanceAsync.fulfilled, (state, action) => {
state.status = 'idle';
state.success = true;
console.log(action.payload, '---------------action.payload');
})
.addCase(addSalesAsync.pending, (state) => {
state.setaddSalesStatus = 'loading';
})
.addCase(addSalesAsync.rejected, (state, action) => {
state.setaddSalesStatus = 'idle';
console.log(action.payload, '---------------action.payload');
state.setaddSalesError = action.payload;
})
.addCase(addSalesAsync.fulfilled, (state, action) => {
state.setaddSalesStatus = 'idle';
state.setaddSalesisSuccess = true;
console.log(action.payload, '---------------action.payload');
})
.addCase(getSalesAsync.pending, (state) => {
state.getsalesstatus = 'loading';
})
.addCase(getSalesAsync.rejected, (state, action) => {
state.getsalesstatus = 'idle';
console.log(action.payload, '---------------action.payload');
state.getsaleserror = action.payload;
})
.addCase(getSalesAsync.fulfilled, (state, action) => {
state.getsalesstatus = 'idle';
state.getsalesdata = action.payload;
console.log(action.payload, '---------------action.payload');
});
}
});
// Action creators are generated for each case reducer function
export const { resetData } = salesSlice.actions;
export { addAttendanceAsync, getAttendanceAsync, getSalesAsync, addSalesAsync };
export default salesSlice.reducer;
|
const fs = require('fs');
const path = require('path');
const moment = require('moment');
const GENESIS_TOKEN = path.resolve(__dirname, 'blockchain/genesis_token.json');
const GENESIS_ACCOUNTS = path.resolve(__dirname, 'blockchain/genesis_accounts.json');
const GENESIS_OWNERS = path.resolve(__dirname, 'blockchain/genesis_owners.json');
const ADDITIONAL_OWNERS = process.env.ADDITIONAL_OWNERS ? {
dbPath: process.env.ADDITIONAL_OWNERS.split(':')[0],
filePath: path.resolve(__dirname, process.env.ADDITIONAL_OWNERS.split(':')[1])
} : null;
const GENESIS_RULES = path.resolve(__dirname, 'blockchain/genesis_rules.json');
const ADDITIONAL_RULES = process.env.ADDITIONAL_RULES ? {
dbPath: process.env.ADDITIONAL_RULES.split(':')[0],
filePath: path.resolve(__dirname, process.env.ADDITIONAL_RULES.split(':')[1])
} : null;
const GENESIS_FUNCTIONS = path.resolve(__dirname, 'blockchain/genesis_functions.json');
const ADDITIONAL_FUNCTIONS = process.env.ADDITIONAL_FUNCTIONS ? {
dbPath: process.env.ADDITIONAL_FUNCTIONS.split(':')[0],
filePath: path.resolve(__dirname, process.env.ADDITIONAL_FUNCTIONS.split(':')[1])
} : null;
const BLOCKCHAINS_DIR = path.resolve(__dirname, 'blockchain/blockchains');
const PROTOCOL_VERSIONS = path.resolve(__dirname, 'client/protocol_versions.json');
const STAKE = process.env.STAKE ? Number(process.env.STAKE) : null;
const DEBUG = process.env.DEBUG ? process.env.DEBUG.toLowerCase().startsWith('t') : false;
const MAX_TX_BYTES = 10000;
const TRANSACTION_POOL_TIME_OUT_MS = moment.duration(1, 'hours').as('milliseconds');
const TRANSACTION_TRACKER_TIME_OUT_MS = moment.duration(24, 'hours').as('milliseconds');
// TODO (lia): Check network id in all messages
const NETWORK_ID = process.env.NETWORK_ID || 'Testnet';
// HOSTING_ENV is a variable used in extracting the ip address of the host machine,
// of which value could be either 'local', 'default', or 'gcp'.
const HOSTING_ENV = process.env.HOSTING_ENV || 'default';
const ACCOUNT_INDEX = process.env.ACCOUNT_INDEX || null;
const TRACKER_WS_ADDR = process.env.TRACKER_WS_ADDR || 'ws://localhost:5000';
const PORT = getPortNumber(8080, 8081);
const P2P_PORT = getPortNumber(5000, 5001);
function getPortNumber(defaultValue, baseValue) {
if (HOSTING_ENV == 'local') {
return baseValue + (ACCOUNT_INDEX !== null ? Number(ACCOUNT_INDEX) : 0);
}
return defaultValue;
}
/**
* Message types for communication between nodes
* @enum {string}
*/
const MessageTypes = {
TRANSACTION: 'transaction',
CHAIN_SUBSECTION: 'chain_subsection',
CHAIN_SUBSECTION_REQUEST: 'chain_subsection_request',
CONSENSUS: 'consensus'
};
/**
* Predefined database paths
* @enum {string}
*/
// TODO (lia): Pick one convention: full-paths (e.g. /deposit/consensus) or keys (e.g. token)
const PredefinedDbPaths = {
// Roots
OWNERS_ROOT: 'owners',
RULES_ROOT: 'rules',
FUNCTIONS_ROOT: 'functions',
VALUES_ROOT: 'values',
// Consensus
CONSENSUS: 'consensus',
// Token
TOKEN: 'token',
TOKEN_NAME: 'name',
TOKEN_SYMBOL: 'symbol',
TOKEN_TOTAL_SUPPLY: 'total_supply',
// Accounts & Transfer
ACCOUNTS: 'accounts',
BALANCE: 'balance',
TRANSFER: 'transfer',
TRANSFER_VALUE: 'value',
TRANSFER_RESULT: 'result',
// Deposit & Withdraw
DEPOSIT: '/deposit',
DEPOSIT_ACCOUNTS: '/deposit_accounts',
DEPOSIT_CONFIG: 'config',
DEPOSIT_CREATED_AT: 'created_at',
DEPOSIT_EXPIRE_AT: 'expire_at',
DEPOSIT_LOCKUP_DURATION: 'lockup_duration',
DEPOSIT_RESULT: 'result',
DEPOSIT_VALUE: 'value',
WITHDRAW: '/withdraw',
WITHDRAW_CREATED_AT: 'created_at',
WITHDRAW_RESULT: 'result',
WITHDRAW_VALUE: 'value',
DEPOSIT_ACCOUNTS_CONSENSUS: '/deposit_accounts/consensus',
DEPOSIT_CONSENSUS: '/deposit/consensus',
WITHDRAW_CONSENSUS: '/withdraw/consensus'
};
/**
* Properties of owner configs
* @enum {string}
*/
const OwnerProperties = {
ANYONE: '*',
BRANCH_OWNER: 'branch_owner',
OWNER: '.owner',
OWNERS: 'owners',
WRITE_FUNCTION: 'write_function',
WRITE_OWNER: 'write_owner',
WRITE_RULE: 'write_rule',
};
/**
* Properties of rule configs
* @enum {string}
*/
const RuleProperties = {
WRITE: '.write',
};
/**
* Properties of function configs
* @enum {string}
*/
const FunctionProperties = {
EVENT_LISTENER: 'event_listener',
FUNCTION: '.function',
FUNCTION_ID: 'function_id',
FUNCTION_TYPE: 'function_type',
SERVICE_NAME: 'service_name',
};
/**
* Types of functions
* @enum {string}
*/
const FunctionTypes = {
NATIVE: 'NATIVE',
REST: 'REST',
};
/**
* IDs of native functions
* @enum {string}
*/
const NativeFunctionIds = {
DEPOSIT: '_deposit',
TRANSFER: '_transfer',
WITHDRAW: '_withdraw',
};
/**
* Types of read database operations
* @enum {string}
*/
const ReadDbOperations = {
GET_VALUE: 'GET_VALUE',
GET_FUNCTION: 'GET_FUNCTION',
GET_RULE: 'GET_RULE',
GET_OWNER: 'GET_OWNER',
MATCH_FUNCTION: 'MATCH_FUNCTION',
MATCH_RULE: 'MATCH_RULE',
MATCH_OWNER: 'MATCH_OWNER',
EVAL_RULE: 'EVAL_RULE',
EVAL_OWNER: 'EVAL_OWNER',
GET: 'GET',
};
/**
* Types of write database operations
* @enum {string}
*/
const WriteDbOperations = {
SET_VALUE: 'SET_VALUE',
INC_VALUE: 'INC_VALUE',
DEC_VALUE: 'DEC_VALUE',
SET_FUNCTION: 'SET_FUNCTION',
SET_RULE: 'SET_RULE',
SET_OWNER: 'SET_OWNER',
SET: 'SET',
};
/**
* Function result code
* @enum {string}
*/
const FunctionResultCode = {
SUCCESS: 'SUCCESS',
FAILURE: 'FAILURE',
INSUFFICIENT_BALANCE: 'INSUFFICIENT_BALANCE',
IN_LOCKUP_PERIOD: 'IN_LOCKUP_PERIOD',
};
/**
* Constant values for transactionTracker
* @enum {string}
*/
const TransactionStatus = {
BLOCK_STATUS: 'BLOCK',
POOL_STATUS: 'POOL',
TIMEOUT_STATUS: 'TIMEOUT',
};
/**
* Default values
*/
const DefaultValues = {
DEPOSIT_LOCKUP_DURATION_MS: 2592000000 // 30 days
}
const GenesisToken = fs.existsSync(GENESIS_TOKEN) ?
JSON.parse(fs.readFileSync(GENESIS_TOKEN)) : null;
const GenesisAccounts = fs.existsSync(GENESIS_ACCOUNTS) ?
JSON.parse(fs.readFileSync(GENESIS_ACCOUNTS)) : null;
module.exports = {
GENESIS_OWNERS,
ADDITIONAL_OWNERS,
GENESIS_RULES,
ADDITIONAL_RULES,
GENESIS_FUNCTIONS,
ADDITIONAL_FUNCTIONS,
BLOCKCHAINS_DIR,
PROTOCOL_VERSIONS,
STAKE,
DEBUG,
MAX_TX_BYTES,
TRANSACTION_POOL_TIME_OUT_MS,
TRANSACTION_TRACKER_TIME_OUT_MS,
NETWORK_ID,
HOSTING_ENV,
ACCOUNT_INDEX,
PORT,
P2P_PORT,
TRACKER_WS_ADDR,
MessageTypes,
PredefinedDbPaths,
OwnerProperties,
RuleProperties,
FunctionProperties,
FunctionTypes,
FunctionResultCode,
NativeFunctionIds,
ReadDbOperations,
WriteDbOperations,
TransactionStatus,
DefaultValues,
GenesisToken,
GenesisAccounts
};
|
#pragma once
class HttpRequest;
/**
* @brief 解析 Http 请求
*
*/
class HttpRequestParser {
public:
enum HTTP_CODE {
No_Request = 0, Get_Request, Bad_Request, Forbidden_Request, Internal_Error,
Closed_Connection
};
enum LINE_STATE {
Line_OK = 0, Line_Bad, Line_More, Line_Overflow
};
enum PARSE_STATE {
Parse_Request_Line = 0, Parse_Header, Parse_Body
};
/**
* @brief 解析一行,将其拷贝到临时缓存中,视情况返回解析状态。解析完成后,
* start_index 和 temp_start_index 分别指向两个 buffer 的首个未读位置。
* @return 返回 Line_Bad 说明有异常的结束符
*/
static LINE_STATE checkLine(char *buffer, int &start_index, int buffer_size,
char *temp_buffer, int &temp_start_index ,int temp_buffer_size);
/**
* @brief 从临时缓存解析请求行
*/
static HTTP_CODE parseRequestLine(char *temp_buffer, HttpRequest &http_request);
/**
* @brief 从临时缓存解析请求头的一行
*/
static HTTP_CODE parseHeaderLine(char *temp_buffer, HttpRequest &http_request);
/**
* @brief 从缓存拷贝请求体
*/
static HTTP_CODE parseBody(char *buffer, int buffer_size, HttpRequest &http_request);
/**
* @brief 解析请求内容,存入 HttpRequest 类中
*/
static HTTP_CODE parseRequest(char *buffer, int buffer_size,
char *temp_buffer, int &temp_buffer_index, int temp_buffer_size,
HttpRequest &http_request);
};
|
class Router {
constructor() {
this.stack = []
}
register(path, methods, middleware) {
let route = { path, methods, middleware }
this.stack.push(route)
}
get(path, middleware) {
// 注册路由
this.register(path, 'get', middleware)
}
post(path, middleware) {
// 注册路由
this.register(path, 'post', middleware)
}
routes() {
// 返回一个中间件回调函数 (ctx, next) => { 进行路由处理 }
let stock = this.stack
return async (ctx, next) => {
if (ctx.url === '/favicon.ico') {
await next()
return
}
const len = stock.length
let route
for(let i = 0; i < len; i++) {
let item = stock[i]
console.log(ctx.url, item, ctx.method)
if (ctx.url === item.path && item.methods.includes(ctx.method.toLowerCase())) {
route = item.middleware
break
}
}
console.log('route', route)
if (typeof route === 'function') {
// 如果匹配到了路由
route(ctx, next)
} else {
await next()
}
}
}
}
module.exports = Router
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Search engine API."""
import hashlib
from functools import partial
import six
from elasticsearch import VERSION as ES_VERSION
from elasticsearch_dsl import FacetedSearch, Search
from elasticsearch_dsl.faceted_search import FacetedResponse
from elasticsearch_dsl.query import Bool, Ids
from flask import current_app, request
from .proxies import current_search_client
from .utils import build_alias_name
class DefaultFilter(object):
"""Shortcut for defining default filters with query parser."""
def __init__(self, query=None, query_parser=None):
"""Build filter property with query parser."""
self._query = query
self.query_parser = query_parser or (lambda x: x)
@property
def query(self):
"""Build lazy query if needed."""
return self._query() if callable(self._query) else self._query
def __get__(self, obj, objtype):
"""Return parsed query."""
return self.query_parser(self.query)
class MinShouldMatch(str):
"""Work-around for Elasticsearch DSL problem.
The ElasticSearch DSL Bool query tries to inspect the
``minimum_should_match`` parameter, but understands only integers and not
queries like "0<1". This class circumvents the specific problematic clause
in Elasticsearch DSL.
"""
def __lt__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
def __le__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
def __gt__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
def __ge__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
class BaseRecordsSearch(Search):
"""Example subclass for searching records using Elastic DSL."""
class Meta:
"""Configuration for ``Search`` and ``FacetedSearch`` classes."""
index = '_all'
doc_types = None
fields = ('*', )
facets = {}
default_filter = None
"""Default filter added to search body.
Example: ``default_filter = DefaultFilter('_access.owner:"1"')``.
"""
def __init__(self, **kwargs):
"""Use Meta to set kwargs defaults."""
kwargs.setdefault('index', getattr(self.Meta, 'index', None))
kwargs.setdefault('doc_type', getattr(self.Meta, 'doc_types', None))
kwargs.setdefault('using', current_search_client)
kwargs.setdefault('extra', {})
min_score = current_app.config.get('SEARCH_RESULTS_MIN_SCORE')
if min_score:
kwargs['extra'].update(min_score=min_score)
super(BaseRecordsSearch, self).__init__(**kwargs)
default_filter = getattr(self.Meta, 'default_filter', None)
if default_filter:
# NOTE: https://github.com/elastic/elasticsearch/issues/21844
self.query = Bool(minimum_should_match=MinShouldMatch("0<1"),
filter=default_filter)
def get_record(self, id_):
"""Return a record by its identifier.
:param id_: The record identifier.
:returns: The record.
"""
return self.query(Ids(values=[str(id_)]))
def get_records(self, ids):
"""Return records by their identifiers.
:param ids: A list of record identifier.
:returns: A list of records.
"""
return self.query(Ids(values=[str(id_) for id_ in ids]))
@classmethod
def faceted_search(cls, query=None, filters=None, search=None):
"""Return faceted search instance with defaults set.
:param query: Elastic DSL query object (``Q``).
:param filters: Dictionary with selected facet values.
:param search: An instance of ``Search`` class. (default: ``cls()``).
"""
search_ = search or cls()
class RecordsFacetedSearch(FacetedSearch):
"""Pass defaults from ``cls.Meta`` object."""
index = build_alias_name(search_._index[0])
doc_types = getattr(search_.Meta, 'doc_types', ['_all'])
fields = getattr(search_.Meta, 'fields', ('*', ))
facets = getattr(search_.Meta, 'facets', {})
def search(self):
"""Use ``search`` or ``cls()`` instead of default Search."""
# Later versions of `elasticsearch-dsl` (>=5.1.0) changed the
# Elasticsearch FacetedResponse class constructor signature.
if ES_VERSION[0] > 2:
return search_.response_class(FacetedResponse)
return search_.response_class(partial(FacetedResponse, self))
return RecordsFacetedSearch(query=query, filters=filters or {})
def with_preference_param(self):
"""Add the preference param to the ES request and return a new Search.
The preference param avoids the bouncing effect with multiple
replicas, documented on ES documentation.
See: https://www.elastic.co/guide/en/elasticsearch/guide/current
/_search_options.html#_preference for more information.
"""
user_hash = self._get_user_hash()
if user_hash:
return self.params(preference=user_hash)
return self
def _get_user_agent(self):
"""Retrieve the request's User-Agent, if available.
Taken from Flask Login utils.py.
"""
user_agent = request.headers.get('User-Agent')
if user_agent:
user_agent = user_agent.encode('utf-8')
return user_agent or ''
def _get_user_hash(self):
"""Calculate a digest based on request's User-Agent and IP address."""
if request:
user_hash = '{ip}-{ua}'.format(ip=request.remote_addr,
ua=self._get_user_agent())
alg = hashlib.md5()
alg.update(user_hash.encode('utf8'))
return alg.hexdigest()
return None
class PrefixedIndexList(list):
"""Custom list type for avoiding double prefixing."""
pass
class RecordsSearch(BaseRecordsSearch):
"""Example subclass for searching records using index prefixing."""
def __init__(self, **kwargs):
"""Using PrefixedIndexList type to avoid double prefixing."""
# at object instantiation, kwargs['index'] is not defined.
# Elasticsearch-dsl-py re-instantiated the object at each search
# by cloning it and passing as kwargs the list of indices
# kwargs['index'] = ['index-name1', 'index-name2']
_index_param = kwargs.get('index', getattr(self.Meta, 'index', None))
if not isinstance(_index_param, PrefixedIndexList):
if isinstance(_index_param, (tuple, list)):
_prefixed_index_list = [
build_alias_name(_index)
for _index in _index_param
]
kwargs.update({'index': _prefixed_index_list})
elif isinstance(_index_param, six.string_types):
_splitted_index = _index_param.strip().split(',')
if len(_splitted_index) > 1:
_prefix_index_list = [
build_alias_name(_index)
for _index in _splitted_index]
_prefix_index_param = ','.join(_prefix_index_list)
kwargs.update({'index': _prefix_index_param})
else:
kwargs.update({'index': build_alias_name(_index_param)})
_index_param = [_index_param]
self._original_index = _index_param
super(RecordsSearch, self).__init__(**kwargs)
if self._index:
self._index = PrefixedIndexList(self._index)
def _clone(self):
"""Clone `_original_index` attribute.
During re-instantiation Elasticsearch-dsl-py calls `self._clone`
to copy over the search object. We override the method so we can
copy the `_original_index` attribute.
"""
s = super(RecordsSearch, self)._clone()
s._original_index = self._original_index
return s
UnPrefixedRecordsSearch = BaseRecordsSearch
|
import threading
import requests
import time
import json
import pandas as pd
import time
import unidecode
spanish_path = "./datasets/es.json"
english_path = "./datasets/all_data_en.json"
file = open(english_path, "r")
data = []
for index, line in enumerate(file):
data.append(json.loads(line))
if index == 1000:
break
text_raw_df = pd.json_normalize(data)
print(text_raw_df.shape)
text_raw_df["text"].head(10)
def worker(i, dfi, chunk_size):
"""thread worker function"""
tags = []
start_time = time.time()
print("Running worker:", i)
for num in range(dfi.shape[0]):
text = dfi.iloc[num, :]["text"]
text = unidecode.unidecode(text)
headers = {"Content-Type": "text/plain"}
r = requests.post("http://localhost:8080/", headers=headers, data=text)
try:
missinfo_class = r.json()["entities"]["MisinfoClass"][0]["class"]
tags.append(missinfo_class)
except:
print(" Thread:", i)
print("text:", text)
print(r.json())
index = list(range(i * chunk_size, (i + 1) * chunk_size))
final = pd.concat(
[pd.DataFrame(dfi), pd.Series(tags, name="gate_tags", index=index)], axis=1
)
final.to_json(f"./gateTagging/tagThread_{i}", orient="records", lines=True)
end_time = time.time()
print(f"Time worker_{i}:", end_time - start_time)
threads = []
for i in range(1):
chunk_size = 100
dfi = text_raw_df.iloc[i * chunk_size : (i + 1) * chunk_size, :]
t = threading.Thread(target=worker, args=(i, dfi, chunk_size))
threads.append(t)
t.start()
|
import numpy as np
from numba import jit
dt = np.double
@jit
def shade(ray, hit):
if (hit.distance < np.Inf):
return hit.normal * 128 + 128
else:
#theta = np.acos(ray.direction.y) / - np.pi
#phi = atan2(ray.direction.x, -ray.direction.z) / - np.pi * 0.5
#return _SkyboxTexture.SampleLevel(sampler_SkyboxTexture, float2(phi, theta), 0).xyz
return np.array([0,0,255], dt)
|