text
stringlengths 3
1.05M
|
|---|
from PyQt5 import QtWidgets, QtCore
from utils import utils_collection as utils
from utils import db_manager
from utils.styling import excluded_dialog_title_style, excluded_dialog_subtitle_style
class ExcludedCompsDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(ExcludedCompsDialog, self).__init__(parent)
self.setWindowFlags(
QtCore.Qt.Dialog
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowCloseButtonHint
)
self.setFixedWidth(200)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.settings = QtCore.QSettings("solutronic", "admin_stock")
layout = QtWidgets.QVBoxLayout()
checkbox_layout = QtWidgets.QVBoxLayout()
checkbox_layout.setAlignment(QtCore.Qt.AlignCenter)
title = QtWidgets.QLabel("Componentes excluidos")
subtitle = QtWidgets.QLabel("Componentes no integrados\na ninguna receta:")
title.setAlignment(QtCore.Qt.AlignCenter)
title.setStyleSheet(excluded_dialog_title_style)
subtitle.setAlignment(QtCore.Qt.AlignCenter)
subtitle.setStyleSheet(excluded_dialog_subtitle_style)
self.checkbox = QtWidgets.QCheckBox("Resaltar excluidos en amarillo")
if self.settings.value("excluded_checkbox") == "on":
self.checkbox.setChecked(True)
db = db_manager.DB_Manager()
self.unused_components = db.get_components_not_in_use()
db.close_connection()
unused_components_string = ""
for i in self.unused_components:
unused_components_string += f"• {i}\n"
if not self.unused_components:
unused_components_string = "No hay excluidos."
excluded_data = QtWidgets.QLabel(unused_components_string)
excluded_data.setAlignment(QtCore.Qt.AlignCenter)
checkbox_layout.addWidget(self.checkbox)
self.back_button = QtWidgets.QPushButton("« Volver")
self.back_button.setShortcut("Alt+v")
self.back_button.setDefault(True)
layout.addWidget(title)
layout.addWidget(subtitle)
layout.addWidget(excluded_data)
layout.addLayout(checkbox_layout)
layout.addWidget(self.back_button)
self.setLayout(layout)
self.back_button.clicked.connect(self.close)
self.checkbox.clicked.connect(self.on_checkbox_checked)
def on_checkbox_checked(self):
update = ""
number_of_excluded_comps = len(self.unused_components)
endstring = (
"componente excluido"
if number_of_excluded_comps == 1
else "componentes excluidos"
)
number_of_excluded_comps_string = (
str(number_of_excluded_comps) + " " + endstring
)
if self.checkbox.isChecked():
self.settings.setValue("excluded_checkbox", "on")
update = "Activado de excluidos"
elif not self.checkbox.isChecked():
self.settings.setValue("excluded_checkbox", "off")
update = "Desactivado de excluidos"
db = db_manager.DB_Manager()
db.log_new_config_record(config=update, details=number_of_excluded_comps_string)
db.close_connection()
self.set_excluded_state_at_parent_and_recolor()
def set_excluded_state_at_parent_and_recolor(self):
main_section = self.parent()
excluded_state = self.settings.value("excluded_checkbox")
utils.color_criticals_in_orange_in_main_section(
main_section.table, main_section.stored_criticals
)
utils.color_zeros_in_grey_in_main_section(
main_section.table, main_section.stored_criticals
)
utils.color_excluded_in_yellow_in_main_section(
main_section.table, excluded_state, main_section.unused_comps
)
|
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
#ifndef _XT_RATEEST_MATCH_H
#define _XT_RATEEST_MATCH_H
#define XT_RATEEST_MATCH_H
#define XT_RATEEST_MATCH_H_
#define _UAPI_XT_RATEEST_MATCH_H
#define _UAPI_XT_RATEEST_MATCH_H_
#define _XT_RATEEST_MATCH_H_
#include <museum/5.1.1/bionic/libc/linux/types.h>
enum xt_rateest_match_flags {
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
XT_RATEEST_MATCH_INVERT = 1<<0,
XT_RATEEST_MATCH_ABS = 1<<1,
XT_RATEEST_MATCH_REL = 1<<2,
XT_RATEEST_MATCH_DELTA = 1<<3,
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
XT_RATEEST_MATCH_BPS = 1<<4,
XT_RATEEST_MATCH_PPS = 1<<5,
};
enum xt_rateest_match_mode {
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
XT_RATEEST_MATCH_NONE,
XT_RATEEST_MATCH_EQ,
XT_RATEEST_MATCH_LT,
XT_RATEEST_MATCH_GT,
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
};
struct xt_rateest_match_info {
char name1[IFNAMSIZ];
char name2[IFNAMSIZ];
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
__u16 flags;
__u16 mode;
__u32 bps1;
__u32 pps1;
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
__u32 bps2;
__u32 pps2;
struct xt_rateest *est1 __attribute__((aligned(8)));
struct xt_rateest *est2 __attribute__((aligned(8)));
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
};
#endif
|
/**
* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
var util = require("util");
var nodePage = require("../../node_page");
function injectNode(id) {
nodePage.call(this, id);
}
util.inherits(injectNode, nodePage);
var payloadType = {
"flow": 1,
"global": 2,
"string": 3,
"num": 4,
"bool": 5,
"json": 6,
"bin": 7,
"date": 8,
};
injectNode.prototype.setPayload = function(type, value) {
// Open a payload type list.
browser.clickWithWait('//*[contains(@class, "red-ui-typedInput-container")]');
// Select a payload type.
var payloadTypeXPath = '//*[@class="red-ui-typedInput-options"]/a[' + payloadType[type] + ']';
browser.clickWithWait(payloadTypeXPath);
// Input a value.
browser.setValue('#node-input-payload', value);
}
injectNode.prototype.setTopic = function(value) {
browser.setValue('#node-input-topic', value);
}
module.exports = injectNode;
|
export default {
pagination: {
goto: "Iya ku",
page: "",
itemsPerPage: " / Emakhasi",
total: total => `Okuphelele ${total}`,
prev5: "5 Esedlule Emakhasi",
next5: "5 Emakhasi Lalandzelako"
},
table: {
confirmFilter: "Qiniseka",
resetFilter: "Setha Kabusha"
}
};
|
const path = require('path')
const { CleanWebpackPlugin } = require('clean-webpack-plugin')
const TerserPlugin = require('terser-webpack-plugin')
module.exports = {
target: "node",
entry: "./src/index.ts",
output: {
fileName: "vs-presence.js",
libraryTarget: "commonjs2",
path: path.resolve(process.cwd(), 'out'),
},
devtool: 'source-map',
externals: {
vscode: 'commonjs vscode',
},
resolve: {
extensions: ['.ts', '.js', '.json'],
},
plugins: [new CleanWebpackPlugin()],
optimization: {
minimize: true,
minimizer: [
new TerserPlugin({
extractComments: true,
terserOptions: {
output: {
comments: false,
},
mangle: false,
keep_classnames: true,
keep_fnames: true,
},
}),
],
},
module: {
rules: [
{
test: /\.ts$/,
use: 'ts-loader',
exclude: /node_modules/,
},
],
},
}
|
import argparse
import logging
import numpy as np
import os
import pandas as pd
import random
import subprocess
from pathlib import Path
from hyperopt import hp
from hyperopt.pyll.stochastic import sample
from hfta.hfht import (tune_hyperparameters, attach_common_args,
rearrange_algorithm_kwargs, handle_integers,
generate_fusible_param_flags, generate_nonfusible_param)
from hfta.workflow import extract_logging_level
from hfta.hfht.utils import fuse_dicts
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
rng_state = np.random.RandomState(seed=args.seed)
fusibles = {
'lr': hp.uniform('lr', 0.0001, 0.01),
'beta1': hp.uniform('beta1', 0.001, 0.999),
'beta2': hp.uniform('beta2', 0.001, 0.999),
'weight_decay': hp.uniform('weight_decay', 0.0, 0.5),
'gamma': hp.uniform('gamma', 0.1, 0.9),
'step_size': hp.choice('step_size', (5, 10, 20, 40)),
}
nonfusibles = {
'batch_size': hp.choice('batch_size', (8, 16, 32)),
'feature_transform': hp.choice('feature_transform', (True, False)),
}
def _run(results_dir, epochs, iters_per_epoch, params, env_vars=None):
# Build the cmd.
cmd = [
'python',
'train_classification.py',
'--epochs',
str(epochs),
'--iters-per-epoch',
str(iters_per_epoch),
'--dataset',
args.dataset,
'--dataset_type',
args.dataset_type,
'--num_points',
str(args.num_points),
'--device',
args.device,
'--eval',
'--seed',
str(args.seed),
'--batch_size',
str(generate_nonfusible_param(params, 'batch_size')),
]
if results_dir is not None:
cmd.extend(['--outf', results_dir])
if generate_nonfusible_param(params, 'feature_transform'):
cmd.append('--feature_transform')
cmd.extend(
generate_fusible_param_flags(
params,
['lr', 'beta1', 'beta2', 'weight_decay', 'gamma', 'step_size'],
))
if args.mode == 'hfta':
cmd.append('--hfta')
if args.amp:
cmd.append('--amp')
# Launch the training process.
succeeded = True
try:
logging.info('--> Running cmd = {}'.format(cmd))
subprocess.run(
cmd,
stdout=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stdout.txt'),
'w',
),
stderr=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stderr.txt'),
'w',
),
check=True,
cwd=os.path.join(
os.path.abspath(os.path.expanduser(os.path.dirname(__file__))),
'../pointnet/'),
env=env_vars,
)
except subprocess.CalledProcessError as e:
logging.error(e)
succeeded = False
return succeeded
def try_params(ids, epochs, params, env_vars=None):
""" Running the training process for pointnet classification task.
Args:
ids: Either a single int ID (for serial), or a list of IDs (for HFTA).
epochs: number of epochs to run.
params: maps hyperparameter name to its value(s). For HFTA, the values are
provided as a list.
env_vars: optional, dict(str, str) that includes extra environment that
needs to be forwarded to the subprocess call
Returns:
result(s): A single result dict for serial or a list of result dicts for
HFTA in the same order as ids.
early_stop(s): Whether the training process early stopped. A single bool
for serial or a list of bools for HFTA in the same order as ids.
"""
epochs = int(round(epochs))
ids_str = (','.join([str(i) for i in ids]) if isinstance(
ids,
(list, tuple),
) else str(ids))
# Allocate result dir.
results_dir = os.path.join(args.outdir, ids_str)
Path(results_dir).mkdir(parents=True, exist_ok=True)
# Run training.
succeeded = _run(
results_dir,
epochs,
args.iters_per_epoch,
params,
env_vars=env_vars,
)
if not succeeded:
raise RuntimeError('_run failed!')
# Gather the results.
results_frame = pd.read_csv(os.path.join(results_dir, 'eval.csv'))
if isinstance(ids, (list, tuple)):
results = [{'acc': acc} for acc in results_frame['acc'].tolist()]
assert len(results) == len(ids)
return results, [False] * len(ids)
else:
return {'acc': results_frame['acc'][0]}, False
def dry_run(
B=None,
nonfusibles_kvs=None,
epochs=None,
iters_per_epoch=None,
env_vars=None,
):
params = [{
**handle_integers(sample(fusibles, rng=rng_state)),
**nonfusibles_kvs
} for _ in range(max(B, 1))]
if B > 0:
params = fuse_dicts(params)
else:
params = params[0]
return _run(None, epochs, iters_per_epoch, params, env_vars=env_vars)
tune_hyperparameters(
space={
**fusibles,
**nonfusibles
},
try_params_callback=try_params,
dry_run_callback=dry_run,
mode=args.mode,
algorithm=args.algorithm,
nonfusibles=nonfusibles.keys(),
dry_run_repeats=args.dry_run_repeats,
dry_run_epochs=args.dry_run_epochs,
dry_run_iters_per_epoch=args.dry_run_iters_per_epoch,
metric='acc',
goal='max',
algorithm_configs={
'hyperband': args.hyperband_kwargs,
'random': args.random_kwargs,
},
seed=args.seed,
outdir=args.outdir,
)
def attach_args(parser=argparse.ArgumentParser()):
parser.add_argument(
'--workers',
type=int,
help='number of data loading workers',
default=4,
)
parser.add_argument(
'--iters-per-epoch',
type=int,
default=int(1e9),
help='number of epochs to train for',
)
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument(
'--dataset-type',
type=str,
default='shapenet',
help="dataset type shapenet|modelnet40",
)
parser.add_argument(
'--num-points',
type=int,
default=2500,
help='num of points for dataset',
)
parser.add_argument(
'--device',
type=str,
default='cuda',
choices=['cpu', 'cuda', 'xla'],
help="the device where this test is running",
)
parser.add_argument(
'--amp',
default=False,
action='store_true',
help='Enable AMP; only used when --device is cuda',
)
parser = attach_common_args(parser)
return parser
if __name__ == '__main__':
args = attach_args().parse_args()
rearrange_algorithm_kwargs(args)
logging.basicConfig(level=extract_logging_level(args))
args.outdir = os.path.abspath(os.path.expanduser(args.outdir))
args.dataset = os.path.abspath(os.path.expanduser(args.dataset))
main(args)
|
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
class Topic(github.GithubObject.NonCompletableGithubObject):
"""
This class represents topics as used by https://github.com/topics. The object reference can be found here https://developer.github.com/v3/search/#search-topics
"""
def __repr__(self):
return self.get__repr__({"name": self._name.value})
@property
def name(self):
"""
:type: string
"""
return self._name.value
@property
def display_name(self):
"""
:type: string
"""
return self._display_name.value
@property
def short_description(self):
"""
:type: string
"""
return self._short_description.value
@property
def description(self):
"""
:type: string
"""
return self._description.value
@property
def created_by(self):
"""
:type: string
"""
return self._created_by.value
@property
def released(self):
"""
:type: string
"""
return self._released.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
return self._created_at.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
return self._updated_at.value
@property
def featured(self):
"""
:type: bool
"""
return self._featured.value
@property
def curated(self):
"""
:type: bool
"""
return self._curated.value
@property
def score(self):
"""
:type: float
"""
return self._score.value
def _initAttributes(self):
self._name = github.GithubObject.NotSet
self._display_name = github.GithubObject.NotSet
self._short_description = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._created_by = github.GithubObject.NotSet
self._released = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._featured = github.GithubObject.NotSet
self._curated = github.GithubObject.NotSet
self._score = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "display_name" in attributes: # pragma no branch
self._display_name = self._makeStringAttribute(attributes["display_name"])
if "short_description" in attributes: # pragma no branch
self._short_description = self._makeStringAttribute(
attributes["short_description"]
)
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "created_by" in attributes: # pragma no branch
self._created_by = self._makeStringAttribute(attributes["created_by"])
if "released" in attributes: # pragma no branch
self._released = self._makeStringAttribute(attributes["released"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "featured" in attributes: # pragma no branch
self._featured = self._makeBoolAttribute(attributes["featured"])
if "curated" in attributes: # pragma no branch
self._curated = self._makeBoolAttribute(attributes["curated"])
if "score" in attributes: # pragma no branch
self._score = self._makeFloatAttribute(attributes["score"])
|
/*
Copyright (c) 2019, Ameer Haj Ali (UC Berkeley), and Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "header.h"
int out[128] ALIGNED16;
int in1[128] ALIGNED16;
int in2[128] ALIGNED16;
__attribute__((noinline))
void example1 () {
int i;
for (i=0; i<128; i++){
out[i] = in1[i] -in2[i];
}
}
int main(int argc,char* argv[]){
init_memory(&out[0], &out[128]);
init_memory(&in1[0], &in1[128]);
init_memory(&in2[0], &in2[128]);
BENCH("Example1", example1(), Mi*4/128*256, digest_memory(&out[0], &out[128]));
return 0;
}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=bad-whitespace, superfluous-parens
"""
Cookiecutter pre-gen hook.
This gets called with no arguments, and with the project directory
as the working directory. That is empty on the first run, but might
also already be populated when Cookiecutter is called on a pre-
existing project.
Copyright (c) 2015 Juergen Hermann <jh@web.de>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, unicode_literals, print_function
from collections import OrderedDict
import os
import sys
import pprint
DEBUG = False
def run():
"""Main loop."""
# Fallback for working with older Cookiecutter versions
Undefined = None # pylint: disable=invalid-name, unused-variable
# Make pylint happy
version = verbose = checkout = repo_dir = context_file = cookiecutter = None
version = {{ version | pprint }}
try:
version_info = tuple(int(i) for i in (version or '').split('.'))
except (ValueError, TypeError):
version_info = ()
verbose = {{ verbose | pprint }}
checkout = {{ checkout | pprint }}
repo_dir = {{ repo_dir | pprint }}
context_file = {{ context_file | pprint }}
context = {{ cookiecutter | pprint }}
if verbose or DEBUG:
print('*' * 78)
print('{} "{}"'.format(__doc__.split('.', 1)[0].strip(), sys.argv[0]))
print('')
print(u" verbose={}".format(verbose))
print(u" checkout={}".format(checkout))
print(u" version={}".format(version))
print(u" version_info={}".format(version_info))
print(u" repo_dir={}".format(repo_dir))
print(u" context_file={}".format(context_file))
print(u" context={}".format(context))
print(u""" context[pprint]={{ cookiecutter | pprint }}""")
print(u" argv={}".format(sys.argv))
print(u" cwd={}".format(os.getcwd()))
print(u" ls={}".format(os.listdir('.')))
print('*' * 78)
if __name__ == '__main__':
run()
|
const express = require('express');
const router = express.Router();
const checkUserAuth = require('../../middleware/userAuth');
const shoppingListController = require('../../controller/user/shopping-list');
router.get('/all', shoppingListController.getAllShoppingList);
module.exports = router;
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports["default"] = void 0;
var _react = require("react");
var _useCallbackRef3 = _interopRequireDefault(require("./useCallbackRef"));
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
function _iterableToArrayLimit(arr, i) { if (!(Symbol.iterator in Object(arr) || Object.prototype.toString.call(arr) === "[object Arguments]")) { return; } var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
var useTimeout = function useTimeout() {
var delay = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 1000;
var timeoutRef = (0, _react.useRef)();
var _useCallbackRef = (0, _useCallbackRef3["default"])(),
_useCallbackRef2 = _slicedToArray(_useCallbackRef, 2),
callbackRef = _useCallbackRef2[0],
setCallbackRef = _useCallbackRef2[1];
(0, _react.useEffect)(function () {
if (!timeoutRef.current && callbackRef.current) {
timeoutRef.current = setTimeout(function () {
callbackRef.current();
}, delay);
}
return function () {
clearTimeout(timeoutRef.current);
};
}, []);
return setCallbackRef;
};
var _default = useTimeout;
exports["default"] = _default;
//# sourceMappingURL=useTimeout.js.map
|
export const supportTypes = [
{ rule: /\.(bi)$/i, logo: 'fi-bi' },
{ rule: /\.(sql)$/i, logo: 'fi-spark' },
{ rule: /\.(hql)$/i, logo: 'fi-hive' },
{ rule: /\.(out)$/i, logo: 'fi-storage' },
{ rule: /\.scala$/i, logo: 'fi-scala' },
{ rule: /\.jdbc$/i, logo: 'fi-jdbc' },
{ rule: /\.python$/i, logo: 'fi-python' },
{ rule: /\.py$/i, logo: 'fi-spark-python' },
{ rule: /\.r$/i, logo: 'fi-r' },
{ rule: /\.txt$/i, logo: 'fi-txt' },
{ rule: /\.log$/i, logo: 'fi-log' },
{ rule: /\.xls$/i, logo: 'fi-xls' },
{ rule: /\.xlsx$/i, logo: 'fi-xlsx' },
{ rule: /\.csv$/i, logo: 'fi-csv' },
{ rule: /\.jar$/i, logo: 'fi-jar' },
{ rule: /\.(tisql)$/i, logo: 'fi-spark' },
{ rule: /\.tiscala$/i, logo: 'fi-scala' },
{ rule: /\.tipyspark$/i, logo: 'fi-spark-python' }
]
|
#!/usr/bin/env python
import os
from RouToolPa.Tools import RepeatModeler_search, RepeatMasker_search, TRF_search, extract_repbase
from RouToolPa.Parsers.TRF import CollectionTRF
#reference_name = "LAN210_v0.7m"
#reference_name = "Alsu24m_no_ambigious"
#workdir = "/home/mahajrod/genetics/desaminases/data/%s/masking" % reference_name
#workdir = "/media/mahajrod/d9e6e5ee-1bf7-4dba-934e-3f898d9611c8/Data/Alsu/reference/Alsu24m/masking"
"""
reference_name = "LAN210_v0.10m"
reference_file = reference_name + ".fasta"
workdir = "/home/mahajrod/genetics/desaminases/data/LAN210_v0.10m/masking/"
"""
reference_name = "Alsu24mc"
reference_file = "Alsu24mc.fasta"
reference_dir = "/media/mahajrod/d9e6e5ee-1bf7-4dba-934e-3f898d9611c8/Data/Alsu/reference/Alsu24mc/"
masking_subdir = "masking/"
workdir = reference_dir + masking_subdir
os.system("mkdir -p %s" % workdir)
os.chdir(workdir)
os.system("ln -fs ../%s %s" % (reference_file, reference_file))
os.system("mkdir -p repeatmodeler custom_lib")
custom_lib_dir = workdir + "custom_lib"
os.chdir("repeatmodeler")
RepeatModeler_search("../%s" % reference_file, reference_name,
RepeatModeler_dir="/home/mahajrod/Repositories/genetic/NGS_tools/RepeatModeler")
os.chdir(workdir)
extract_repbase("fungi", output_file="custom_lib/RepBase_fungi.fasta",
RepeatMaskerUtils_dir="/home/mahajrod/Repositories/genetic/NGS_tools/RepeatMasker/util")
for entry in os.listdir(workdir + "repeatmodeler"):
if "RM" in entry:
repeatmodeler_dir = workdir + "repeatmodeler/" + entry
break
os.chdir(workdir)
os.system("/bin/cp -f %s/consensi.fa.classified %s/custom_lib.fasta" % (repeatmodeler_dir, custom_lib_dir))
os.system("tail -n +2 %s/RepBase_fungi.fasta >> %s/custom_lib.fasta" % (custom_lib_dir, custom_lib_dir))
os.chdir(workdir)
os.system("mkdir -p repeatmasker")
os.chdir("repeatmasker")
os.system("ln -fs ../%s %s" % (reference_file, reference_file))
os.system("ln -fs ../custom_lib/custom_lib.fasta custom_lib.fasta")
RepeatMasker_search(reference_file, "fungi", custom_lib_path="custom_lib.fasta")
os.chdir(workdir)
os.system("mkdir -p TRF")
os.chdir("TRF")
os.system("ln -fs ../repeatmasker/%s.masked %s_masked_repeatmasker.fasta" % (reference_file, reference_name))
TRF_search("%s_masked_repeatmasker.fasta" % reference_name)
trf_coll = CollectionTRF(trf_file="%sTRF/%s_masked_repeatmasker.fasta.2.7.7.80.10.50.500.dat" % (workdir, reference_name),
from_file=True)
trf_coll.write_gff("%sTRF/trf.gff" % workdir)
os.system("/bin/cp -f *.mask ../%s_masked.fasta" % reference_name)
|
const express = require("express");
const app = express();
const tasks = require("./routes/tasks");
const connectDB = require("./db/connect");
require("dotenv").config();
const notFound = require("./middleware/not-found");
const errorHandlerMiddleware = require("./middleware/error-handler");
// middlewares
app.use(express.static("./public"));
app.use(express.json());
app.use(errorHandlerMiddleware);
// routes
app.use("/api/v1/tasks", tasks);
app.use(notFound);
const port = process.env.PORT || 3000;
const start = async () => {
try {
await connectDB(process.env.MONGO_URI);
app.listen(port, console.log(`Server is listning on port ${port}...`));
} catch (error) {
console.log(error);
}
};
start();
|
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#pragma once
#include <aws/autoscaling/AutoScaling_EXPORTS.h>
#include <aws/autoscaling/AutoScalingRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace AutoScaling
{
namespace Model
{
/**
* <p>Contains the parameters for DeleteNotificationConfiguration.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteNotificationConfigurationType">AWS
* API Reference</a></p>
*/
class AWS_AUTOSCALING_API DeleteNotificationConfigurationRequest : public AutoScalingRequest
{
public:
DeleteNotificationConfigurationRequest();
Aws::String SerializePayload() const override;
protected:
void DumpBodyToUrl(Aws::Http::URI& uri ) const override;
public:
/**
* <p>The name of the Auto Scaling group.</p>
*/
inline const Aws::String& GetAutoScalingGroupName() const{ return m_autoScalingGroupName; }
/**
* <p>The name of the Auto Scaling group.</p>
*/
inline void SetAutoScalingGroupName(const Aws::String& value) { m_autoScalingGroupNameHasBeenSet = true; m_autoScalingGroupName = value; }
/**
* <p>The name of the Auto Scaling group.</p>
*/
inline void SetAutoScalingGroupName(Aws::String&& value) { m_autoScalingGroupNameHasBeenSet = true; m_autoScalingGroupName = std::move(value); }
/**
* <p>The name of the Auto Scaling group.</p>
*/
inline void SetAutoScalingGroupName(const char* value) { m_autoScalingGroupNameHasBeenSet = true; m_autoScalingGroupName.assign(value); }
/**
* <p>The name of the Auto Scaling group.</p>
*/
inline DeleteNotificationConfigurationRequest& WithAutoScalingGroupName(const Aws::String& value) { SetAutoScalingGroupName(value); return *this;}
/**
* <p>The name of the Auto Scaling group.</p>
*/
inline DeleteNotificationConfigurationRequest& WithAutoScalingGroupName(Aws::String&& value) { SetAutoScalingGroupName(std::move(value)); return *this;}
/**
* <p>The name of the Auto Scaling group.</p>
*/
inline DeleteNotificationConfigurationRequest& WithAutoScalingGroupName(const char* value) { SetAutoScalingGroupName(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
* (SNS) topic.</p>
*/
inline const Aws::String& GetTopicARN() const{ return m_topicARN; }
/**
* <p>The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
* (SNS) topic.</p>
*/
inline void SetTopicARN(const Aws::String& value) { m_topicARNHasBeenSet = true; m_topicARN = value; }
/**
* <p>The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
* (SNS) topic.</p>
*/
inline void SetTopicARN(Aws::String&& value) { m_topicARNHasBeenSet = true; m_topicARN = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
* (SNS) topic.</p>
*/
inline void SetTopicARN(const char* value) { m_topicARNHasBeenSet = true; m_topicARN.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
* (SNS) topic.</p>
*/
inline DeleteNotificationConfigurationRequest& WithTopicARN(const Aws::String& value) { SetTopicARN(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
* (SNS) topic.</p>
*/
inline DeleteNotificationConfigurationRequest& WithTopicARN(Aws::String&& value) { SetTopicARN(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
* (SNS) topic.</p>
*/
inline DeleteNotificationConfigurationRequest& WithTopicARN(const char* value) { SetTopicARN(value); return *this;}
private:
Aws::String m_autoScalingGroupName;
bool m_autoScalingGroupNameHasBeenSet;
Aws::String m_topicARN;
bool m_topicARNHasBeenSet;
};
} // namespace Model
} // namespace AutoScaling
} // namespace Aws
|
import unittest
import unittest.mock as mock
import mycroft.audio.services.vlc as vlc
config = {
'backends': {
'test_simple': {
'type': 'vlc',
'active': True
}
}
}
@mock.patch('mycroft.audio.services.vlc.vlc')
class TestVlcBackend(unittest.TestCase):
def test_load_service(self, mock_vlc_mod):
bus = mock.Mock()
self.assertEqual(len(vlc.load_service(config, bus)), 1)
def test_playlist_methods(self, mock_vlc_mod):
bus = mock.Mock()
service = vlc.VlcService(config, bus)
self.assertTrue(isinstance(service.supported_uris(), list))
# Check that the tracks are added to the track_list
service.add_list(['a.mp3', 'b.ogg', ['c.wav', 'audio/wav']])
service.track_list.add_media.has_calls(['a.mp3', 'b.ogg', 'c.wav'])
# Check that clearing replaces the playlist with an empty one
empty_list = mock.Mock(name='EmptyList')
service.instance.media_list_new.return_value = empty_list
service.clear_list()
self.assertTrue(service.track_list is empty_list)
service.list_player.set_media_list.assert_called_with(empty_list)
def test_playback_methods(self, mock_vlc_mod):
bus = mock.Mock()
service = vlc.VlcService(config, bus)
loop_mode = mock.Mock(name='Loop')
normal_mode = mock.Mock(name='Normal')
mock_vlc_mod.PlaybackMode.loop = loop_mode
mock_vlc_mod.PlaybackMode.default = normal_mode
# Check normal play
service.play(repeat=False)
service.list_player.set_playback_mode.assert_called_with(normal_mode)
service.list_player.set_playback_mode.reset_mock()
self.assertTrue(service.list_player.play.called)
service.list_player.play.reset_mock()
# Check repeat
service.play(repeat=True)
service.list_player.set_playback_mode.assert_called_with(loop_mode)
service.list_player.set_playback_mode.reset_mock()
self.assertTrue(service.list_player.play.called)
service.list_player.play.reset_mock()
# Check pause
service.pause()
service.player.set_pause.assert_called_with(1)
service.player.set_pause.reset_mock()
# Check resume
service.resume()
service.player.set_pause.assert_called_with(0)
service.player.set_pause.reset_mock()
# Check stop
service.player.is_playing.return_value = False
self.assertFalse(service.stop())
service.player.is_playing.return_value = True
self.assertTrue(service.stop())
|
/**
* 发布
*
* @author: Eysonyou
* @create: 2019-05-14
*/
const app = getApp();
Page({
/**
* 页面的初始数据
*/
data: {
// 解决textarea组件在表单提交时无法获取内容的BUG
content: '',
// 保存用户信息
userInfo: {},
// 内容大小限制为200字符
contentSize: 0,
// 场景集合
scenes: ["活动", "拼团"],
// 默认的场景
sceneIndex: 0,
// 上传文件-文件列表
files: [],
fileID: ''
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function(options) {
// 当前版本不支持小程序云函数
if (!wx.cloud) {
wx.showModal({
title: '提示',
content: '微信版本过低,请升级微信'
});
return;
}
wx.setNavigationBarTitle({
title: '发起'
});
// 获取用户信息
wx.getUserInfo({
lang: 'zh_CN',
success: this.getUserInfoSuccess
});
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function() {
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function() {
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function() {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function() {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function() {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function() {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function() {
},
/**
* 选择场景
*/
bindSceneChange: function(e) {
// console.log('Scenes:', e.detail.value);
this.setData({
sceneIndex: e.detail.value,
fileID: ''
})
},
/**
* 提交数据
*/
bindSubmit: function(e){
if(!app.globalData.isLogin) {
wx.showModal({
title: '提示',
content: '发布失败,请先登录'
});
return;
}
let data = e.detail ? e.detail.value : {};
let userInfo = this.data.userInfo;
// 解决textarea组件在表单提交时无法获取内容的BUG
data.content = this.data.content;
// 图片
data['fileID'] = this.data.fileID;
// 增加用户信息
data['nickName'] = userInfo.nickName;
data['avatarUrl'] = userInfo.avatarUrl;
data['city'] = userInfo['city'];
data['country'] = userInfo['country'];
data['gender'] = userInfo['gender'];
data['language'] = userInfo['language'];
console.log(data);
console.log('post submit data');
if (data['maximum'] < 0 || data['maximum'] > 1000){
wx.showToast({
title: '超出报名上限(1~1000)',
icon: 'none'
});
return;
}
if(!data['title']) {
wx.showToast({
title: data['sceneIndex'] == 0 ? '请输入活动主题' : '请输入商品名称',
icon: 'none'
});
return;
}
if (parseInt(data['sceneIndex']) === 1 && !data['amount']) {
wx.showToast({
title: '请输入商品价格',
icon: 'none'
});
return;
}
if (!data['content']) {
wx.showToast({
title: data['sceneIndex'] == 0 ? '请输入活动内容' : '请输入商品描述',
icon: 'none'
});
return;
}
// 全局显示loading
wx.showLoading({
title: '提交中...',
});
// 调用云函数
wx.cloud.callFunction({
name: 'post',
data: data
}).then(res => {
console.log(res);
console.log('post POST func success');
// 隐藏loading
wx.hideLoading();
// 跳到详情页
wx.redirectTo({
url: `/pages/detail/detail?id=${res.result._id}`,
});
}).catch(err => {
console.log(err);
console.log('post POST func err');
wx.hideLoading();
wx.showModal({
title: '提示',
content: '系统异常,请稍后再试'
});
})
},
/**
* 计算内容长度
*/
bindInput: function(e){
let v = e.detail ? e.detail.value : '';
this.setData({
content: v,
contentSize: v.length
});
// console.log(e);
},
/**
* 获取用户信息
*/
getUserInfoSuccess: function(res){
const userInfo = res.userInfo || {};
// console.log(userInfo);
this.setData({
userInfo: userInfo
});
// console.log(this.data.userInfo);
},
/**
* 选择图片
*/
chooseImage: function (e) {
var that = this;
wx.chooseImage({
// 只能上传一张
count: 1,
// 可以指定是原图还是压缩图,默认二者都有
sizeType: ['original', 'compressed'],
// 可以指定来源是相册还是相机,默认二者都有
sourceType: ['album', 'camera'],
success: function (res) {
wx.showLoading({
title: '图片上传中...',
});
let filePath = res.tempFilePaths[0];
// 返回选定照片的本地文件路径列表
// tempFilePath可以作为img标签的src属性显示图片
that.setData({
// 只能上传一张,每次会覆盖
files: res.tempFilePaths
// files: that.data.files.concat(res.tempFilePaths)
});
// console.log(res);
// console.log(that.data.files);
// get file EXT,for .jpg
let ext = filePath.match(/\.[^.]+?$/)[0];
let sceneIndex = that.data.sceneIndex;
let time = (new Date()).getTime();
let rand = parseInt(Math.random() * 100000);
let cloudPath = `img_${sceneIndex}_${time}_${rand}${ext}`;
console.log(cloudPath);
wx.cloud.uploadFile({
cloudPath: cloudPath,
filePath: filePath,
success: function(res){
// todo
console.log(res);
that.setData({
fileID: res.fileID
});
},
fail: function(e){
// todo
console.log(e);
},
complete: function(){
wx.hideLoading();
}
});
},
fail: function(e){
console.log('----upload fail----');
console.log(e);
},
complete: function(res){
wx.hideLoading();
}
})
},
/**
* 预览图片
* todo: 待完善删除功能
*/
previewImage: function (e) {
wx.previewImage({
current: e.currentTarget.id, // 当前显示图片的http链接
urls: this.data.files // 需要预览的图片http链接列表
});
}
})
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module for training a CNN model on neural data.
Created on Mon Aug 28 11:21:43 2017
@author: paul.herringer
"""
from keras.models import load_model
from keras.callbacks import (
EarlyStopping,
CSVLogger,
ModelCheckpoint,
TensorBoard,
ReduceLROnPlateau
)
from cnn.data_processing import generate_dataset
def train_cnn_model(model, datasets, networks, parents, verbose=1, **params):
"""Generates a training dataset and trains a CNN model to predict
neuron connectivity. Saves a log of training metrics, a tensorboard
file, and the weights of the model that obtained the best validation
results.
Args:
model: A compiled Keras Model object.
datasets: List of full spike or fluorescence datasets. Each dataset
should be of shape (neurons, timesteps).
networks: List of adjacency matrices representing the true
connections between neurons in each dataset. Each entry should
be of shape (neurons, neurons).
parents: List of dicts that contain indices for the strongest
drivers of each neuron in the corresponding dataset, as
estimated by GTE. If any entries are NoneType, parents for
that dataset will be estimated within this function.
verbose: Control what gets printed to the console.
**logdir: Directory to save training logs and trained model to.
**batch_sizes: List of batch sizes (allows for batch size annealment)
**epochs: Maximum number of epochs to train for each batch size.
**early_stopping_monitor: Metric to use for early stopping.
**early_stopping_patience: Patience of early stopping monitor.
**checkpoint_monitor: Metric to use for saving checkpoints.
**lr_decay_monitor: Metric to use for learning rate annealment.
**lr_decay_factor: Factor to decay learning rate when loss plateaus.
New lr = lr*factor.
**lr_decay_patience: Patience of learning rate decay monitor.
Also accepts **params for data_processing.generate_dataset.
Returns:
model: The Keras Model that obtained the best validation results
according to the checkpoint_monitor metric. Note that this may
not be the model from the last epoch of training.
mean: Element-wise mean of the training examples. This should
be subtracted from all validation and prediction data as part
of preprocessing.
"""
# Parameters
logdir = params.setdefault('logdir', '/cnn_training_logs/')
batch_sizes = params.setdefault('batch_sizes', [256])
epochs = params.setdefault('epochs', 200)
early_stopping_monitor = params.setdefault(
'early_stopping_monitor', 'val_loss')
early_stopping_patience = params.setdefault('early_stopping_patience', 20)
checkpoint_monitor = params.setdefault('checkpoint_monitor', 'val_loss')
lr_decay_monitor = params.setdefault('lr_decay_monitor', 'val_loss')
lr_decay_factor = params.setdefault('lr_decay_factor', 0.1)
lr_decay_patience = params.setdefault('lr_decay_patience', 10)
# Training data
ex_train, ex_valid, lbl_train, lbl_valid, mean = generate_dataset(
datasets, networks, parents, mode='train', verbose=verbose, **params)
# Callbacks
early_stopper = EarlyStopping(monitor=early_stopping_monitor,
patience=early_stopping_patience)
csv_logger = CSVLogger(logdir + 'training_log.csv', append=True)
checkpoint = ModelCheckpoint(logdir + 'best_model.h5',
monitor=checkpoint_monitor,
save_best_only=True,
verbose=verbose)
tensorboard = TensorBoard(log_dir=logdir, write_graph=False)
lr_decay = ReduceLROnPlateau(monitor=lr_decay_monitor,
factor=lr_decay_factor,
patience=lr_decay_patience,
verbose=1)
# Training
for b in batch_sizes:
model.fit(
ex_train, lbl_train, batch_size=b, epochs=epochs,
validation_data=(ex_valid, lbl_valid), shuffle=True,
callbacks=[
early_stopper,
csv_logger,
checkpoint,
tensorboard,
lr_decay
]
)
model = load_model(logdir + 'best_model.h5')
return model, mean
|
import React from "react";
import { IndemniteCCn } from "../../components/IndemniteConventionnelle";
import { getIndemniteFromFinalForm } from "../../indemnite";
import { getIndemnite, getSalaireRef } from "./indemnite";
export function Result({ form }) {
const state = form.getState();
const {
hasTempsPartiel = false,
hasSameSalaire = false,
salairePeriods = [],
salaires = [],
salaire,
anciennete,
dateEntree,
dateSortie,
branche,
motif,
categorie,
} = state.values;
const { indemniteLegale, infoCalculLegal } = getIndemniteFromFinalForm(form);
const salaireRef = getSalaireRef({
anciennete,
hasSameSalaire,
hasTempsPartiel,
salaire,
salairePeriods,
salaires,
});
const { error, indemniteConventionnelle, infoCalculConventionnel } =
getIndemnite({
anciennete,
categorie,
dateEntree,
dateSortie,
motif,
salaireRef,
});
return (
<IndemniteCCn
indemniteConventionnelle={indemniteConventionnelle}
indemniteLegale={indemniteLegale}
infoCalculLegal={infoCalculLegal}
infoCalculConventionnel={infoCalculConventionnel}
branche={branche}
error={error}
/>
);
}
|
/*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
(function () {
'use strict';
var module = angular.module('pnc.common.pnc-client.rsql');
/**
* @ngdoc service
* @kind function
* @name pnc.common.pnc-client.rsql:selector
* @description
* This is an internal class used by rsqlQuery, see that for usage instructions.
*
* @author Alex Creasy
*/
module.factory('selector', [
function () {
/*
* For selecting a field to operate on e.g. `.where('name')`
*/
return function selector(ctx) {
var that = {};
that.where = function (field) {
ctx.addToQuery(field);
return ctx.next();
};
that.brackets = function (query) {
ctx.addToQuery('(' + query + ')');
return ctx.jumpTo('operator');
};
return that;
};
}
]);
})();
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright [2018] Tatarnikov Viktor [viktor@tatarnikov.org]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Scopuli.Interfaces.MySQL.SQLAlchemy import *
from Scopuli.Interfaces.MySQL.Schema.Core.Server import Server
class WebSite(Base, Schema):
"""
Таблица с перечнем сайтов
"""
__tablename__ = 'web_site'
__table_args__ = {
'mysql_engine' : 'InnoDB',
'mysql_charset': 'utf8',
'mysql_collate': 'utf8_general_ci',
'mysql_comment': 'Таблица с перечнем сайтов'
}
id = Column(Integer(), primary_key=True, autoincrement=True, doc="Row ID - Сурогатный ключ")
cd_server = Column(Integer(), ColumnDefault(2021), ForeignKey(Server.id), default=2020, nullable=False, doc="Ссылка на Server")
code = Column(String(64), ColumnDefault(""), nullable=False, doc="Уникальный код сайта в ASCII")
url = Column(String(64), ColumnDefault(""), nullable=False, doc="Уникальный URL сайта в ASCII")
code_develop = Column(String(64), ColumnDefault(""), nullable=False, doc="Уникальный код сайта в ASCII для разработки")
url_develop = Column(String(64), ColumnDefault(""), nullable=False, doc="Уникальный URL сайта в ASCII для разработки")
title = Column(String(64), ColumnDefault(""), nullable=False, doc="Наименование")
description = Column(String(128), ColumnDefault(""), nullable=False, doc="Описание")
meta_title = Column(String(256), ColumnDefault(""), nullable=False, doc="Заголовок сайта")
meta_description = Column(String(256), ColumnDefault(""), nullable=False, doc="Описание сайта")
meta_autor = Column(String(256), ColumnDefault(""), nullable=False, doc="Автор сайта")
meta_autor_url = Column(String(64), ColumnDefault(""), nullable=False, doc="Адрес автора сайта")
meta_keywords = Column(String(256), ColumnDefault(""), nullable=False, doc="Перечень ключевых слов")
meta_copyrights = Column(String(256), ColumnDefault(""), nullable=False, doc="Копирайтинг")
is_enable = Column(Boolean(), ColumnDefault(True), default=True, nullable=False, doc="Метка использования")
is_secure = Column(Boolean(), ColumnDefault(False), default=True, nullable=False, doc="Метка использования SSL")
is_devel = Column(Boolean(), ColumnDefault(False), default=True, nullable=False, doc="Метка статуса сайта")
# RelationShip's
pages = relationship("WebPage", lazy='dynamic')
server = relationship(Server, backref='WebPage')
@hybrid_property
def ip_port(self):
"""
Возвращает номер порта сервера на котором располоден виртуальный хост.
:return: Номер порта
:rtype: Integer
"""
if self.is_secure:
return 443
else:
return 80
@hybrid_property
def ip_addr(self):
"""
Возвращает IP адрес сервера на котором расположен виртуальный хост.
:return: IP адрес сервера
:rtype: IPAddress
"""
return self.server.address_ipv4
@hybrid_property
def url_prefix(self):
"""
Возвращает префикс протокола по которому работает данный сайт.
:return: Url prefix Http or Https
:rtype: String
"""
if self.is_secure:
return "https://"
else:
return "http://"
class WebPage(Base, Schema):
"""
Таблица с перечнем страниц
"""
__tablename__ = 'web_page'
__table_args__ = {
'mysql_engine' : 'InnoDB',
'mysql_charset': 'utf8',
'mysql_collate': 'utf8_general_ci',
'mysql_comment': 'Таблица с перечнем страниц'
}
id = Column(Integer(), primary_key=True, autoincrement=True, doc="Row ID - Сурогатный ключ")
cd_web_site = Column(Integer(), ForeignKey('web_site.id'), index=True, nullable=False, doc="Ссылка на WebSite")
cd_parent = Column(Integer(), ForeignKey(id), nullable=True, doc="Родитель")
code = Column(String(64), ColumnDefault(""), nullable=False, doc="Кодовое наименование модуля в ASCII")
title = Column(String(128), ColumnDefault(""), nullable=False, doc="Наименование страницы")
description = Column(String(128), ColumnDefault(""), nullable=False, doc="Описание страницы")
url = Column(String(64), ColumnDefault(""), nullable=False, doc="Уникальный URL модуля в ASCII")
url_title = Column(String(64), ColumnDefault(""), nullable=False, doc="Наименование ссылки")
url_title_short = Column(String(64), ColumnDefault(""), nullable=False, doc="Наименование ссылки короткое")
url_description = Column(String(128), ColumnDefault(""), nullable=False, doc="Описание ссылки")
url_description_short = Column(String(128), ColumnDefault(""), nullable=False, doc="Описание ссылки короткое")
is_enable = Column(Boolean(), ColumnDefault(True), default=True, nullable=False, doc="Метка использования")
template_name = Column(String(256), ColumnDefault(""), nullable=False, doc="Название шаблона")
meta_title = Column(String(256), ColumnDefault(""), nullable=False, doc="Заголовок страницы")
meta_description = Column(String(256), ColumnDefault(""), nullable=False, doc="Описание страницы")
meta_autor = Column(String(256), ColumnDefault(""), nullable=False, doc="Автор страницы")
meta_autor_url = Column(String(64), ColumnDefault(""), nullable=False, doc="Адрес автора страницы")
meta_keywords = Column(String(256), ColumnDefault(""), nullable=False, doc="Перечень ключевых слов")
# Automatic Logger
date_create = Column(DateTime(), nullable=False, default=func.utc_timestamp(), doc="AutoLogger - Время создания")
date_change = Column(DateTime(), nullable=False, default=func.utc_timestamp(), onupdate=func.utc_timestamp(),
doc="AutoLogger - Время последнего изменения")
# RelationShip's
parent = relationship("WebPage", remote_side=[id])
site = relationship("WebSite", backref="WebPage")
|
# -*- coding: utf-8 -*-
# Created by José Miguel García Benayas
import logging
import random
from graph_utils import GraphUtils
from solution_greedy_adjacent import SolutionGreedyNeighbors
from solution_greedy_ratio import SolutionGreedyRatio
class SolutionGrasp:
ADJACENT = "adjacent"
RATIO = "ratio"
LOGGER = logging.getLogger(__name__)
def find_grasp_solution(self, graph, name, solution_type, fixed_seed, alpha):
""" Find solution on graph with a GRASP algorithm. """
random.seed(fixed_seed)
total_keys = sorted(list(graph.nodes.keys()))
vertex = random.randint(total_keys[0], total_keys[-1])
solution = {vertex}
cl = graph.nodes[vertex].neighbors_indices.copy()
while len(cl) != 0:
g_min, g_max, gc = self.get_g(cl, solution_type, graph, name)
mu = g_max - alpha * (g_max - g_min)
rcl = self.get_rcl(mu, gc)
random.seed(fixed_seed)
random_position = random.randint(0, len(rcl) - 1)
u = rcl[random_position][0]
if GraphUtils.become_clique(graph, solution, u):
solution = solution.union({u})
cl -= {u}
cl.intersection_update(graph.get_node(u).neighbors_indices)
return solution
def get_g(self, candidates_list, solution_type, graph, name):
""" Find g_min and g_max with current candidate list and solution type chosen. """
g_c = dict()
for candidate in candidates_list:
chosen_solution = None
if solution_type == self.ADJACENT:
chosen_solution = SolutionGreedyNeighbors(graph, name)
if solution_type == self.RATIO:
chosen_solution = SolutionGreedyRatio(graph, name)
chosen_solution.find_clique(candidate)
g_c.update({candidate: chosen_solution.sol_value})
sorted_gc = sorted(g_c.items(), key=lambda kv: kv[1], reverse=True)
g_min = sorted_gc[-1]
g_max = sorted_gc[0]
return g_min[1], g_max[1], sorted_gc
@staticmethod
def get_rcl(mu, gc):
""" Get a remaining candidate list with mu limit. """
position = 0
for current_candidate in gc:
if current_candidate[1] <= mu:
break
else:
position += 1
if position == 0:
remaining_candidates = gc.copy()
else:
remaining_candidates = gc[:position].copy()
return remaining_candidates
def apply_ls(self, graph, solution):
sol_copy = solution.copy()
ratio_neighbors = set()
for node in solution:
ratio_neighbors.update(graph.nodes[node].neighbors_indices)
o_ratio_neighbors = sorted(ratio_neighbors, key=lambda x: graph.nodes[x].p_weight / graph.nodes[x].q_weight,
reverse=True)
new_sol_temp = sol_copy.copy()
ls_solutions = list()
for node_ratio in o_ratio_neighbors:
new_sol_temp.update({node_ratio})
if not GraphUtils.is_clique_solution(graph, new_sol_temp):
new_sol_temp = self.clean_conflicted_nodes(graph, node_ratio, new_sol_temp)
if GraphUtils.is_clique_solution(graph, new_sol_temp):
for node_clique in new_sol_temp:
result, result_ratio = self.find_clique_aux(graph, node_clique, new_sol_temp)
if GraphUtils.is_clique_solution(graph, result):
ls_solutions.append((len(result), result_ratio, result))
else:
new_sol_temp.discard(node_ratio)
return self.give_solution(ls_solutions, graph)
def clean_conflicted_nodes(self, graph, better_node, new_sol_temp):
to_delete = set()
for node in new_sol_temp:
if better_node not in graph.nodes[node].neighbors_indices and better_node != node:
to_delete.add(node)
local_sol = new_sol_temp.difference(to_delete)
local_sol.update({better_node})
return local_sol
def find_clique_aux(self, graph, father, old_clique):
clique = old_clique.copy()
adjacent = graph.get_node(father).neighbors_indices.copy()
while len(adjacent) != 0:
candidate = self.find_better(graph, adjacent)
if GraphUtils.become_clique(graph, clique, candidate):
adjacent = GraphUtils.discard_adjacent(graph, adjacent, candidate)
clique.update({candidate})
else:
adjacent.discard(candidate)
return clique, GraphUtils.calculate_clique_ratio(graph, clique)
def find_better(self, graph, adjacent):
current_ratio = -1
node_chosen = None
for node in adjacent:
node_ratio = graph.get_node(node).p_weight / graph.get_node(node).q_weight
if node_ratio > current_ratio:
current_ratio = node_ratio
node_chosen = node
return node_chosen
def give_solution(self, ls_solutions, graph):
sort_by_cardinality = sorted(ls_solutions, key=lambda x: x[0], reverse=True)
max_ratio = sort_by_cardinality[0][0]
ls_tuple_max_cardinality = [(x, y, z) for (x, y, z) in sort_by_cardinality if x == max_ratio]
sorted_by_ratio = sorted(ls_tuple_max_cardinality, key=lambda x: x[1], reverse=True)
if not GraphUtils.is_clique_solution(graph, sorted_by_ratio[0][2]):
print('a')
return sorted_by_ratio[0][2]
|
# -*- coding: utf-8 -*-
import os
import re
from sphinx_testing import with_app
import unittest
CR = '\r?\n'
nwdiag_fontpath = '/usr/share/fonts/truetype/ipafont/ipagp.ttf'
with_png_app = with_app(srcdir='tests/docs/nwdiag',
buildername='latex',
write_docstring=True,
confoverrides={
'latex_documents': [('index', 'test.tex', '', 'test', 'manual')],
})
with_pdf_app = with_app(srcdir='tests/docs/nwdiag',
buildername='latex',
write_docstring=True,
confoverrides={
'latex_documents': [('index', 'test.tex', '', 'test', 'manual')],
'nwdiag_latex_image_format': 'PDF',
'nwdiag_fontpath': nwdiag_fontpath,
})
with_oldpdf_app = with_app(srcdir='tests/docs/nwdiag',
buildername='latex',
write_docstring=True,
confoverrides={
'latex_documents': [('index', 'test.tex', '', 'test', 'manual')],
'nwdiag_tex_image_format': 'PDF',
'nwdiag_fontpath': nwdiag_fontpath,
})
class TestSphinxcontribNwdiagLatex(unittest.TestCase):
@with_png_app
def test_build_png_image(self, app, status, warning):
"""
.. nwdiag::
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, r'\\sphinxincludegraphics{{nwdiag-.*?}.png}')
@unittest.skipUnless(os.path.exists(nwdiag_fontpath), "TrueType font not found")
@with_pdf_app
def test_build_pdf_image1(self, app, status, warning):
"""
.. nwdiag::
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, r'\\sphinxincludegraphics{{nwdiag-.*?}.pdf}')
@unittest.skipUnless(os.path.exists(nwdiag_fontpath), "TrueType font not found")
@with_oldpdf_app
def test_build_pdf_image2(self, app, status, warning):
"""
.. nwdiag::
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, r'\\includegraphics{{nwdiag-.*?}.pdf}')
@with_png_app
def test_width_option(self, app, status, warning):
"""
.. nwdiag::
:width: 3cm
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, r'\\sphinxincludegraphics\[width=3cm\]{{nwdiag-.*?}.png}')
@with_png_app
def test_height_option(self, app, status, warning):
"""
.. nwdiag::
:height: 4cm
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, r'\\sphinxincludegraphics\[height=4cm\]{{nwdiag-.*?}.png}')
@with_png_app
def test_scale_option(self, app, status, warning):
"""
.. nwdiag::
:scale: 50%
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, r'\\sphinxincludegraphics\[scale=0.5\]{{nwdiag-.*?}.png}')
@with_png_app
def test_align_option_left(self, app, status, warning):
"""
.. nwdiag::
:align: left
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, (r'{\\sphinxincludegraphics{{nwdiag-.*?}.png}'
r'\\hspace\*{\\fill}}'))
@with_png_app
def test_align_option_center(self, app, status, warning):
"""
.. nwdiag::
:align: center
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, (r'{\\hspace\*{\\fill}'
r'\\sphinxincludegraphics{{nwdiag-.*?}.png}'
r'\\hspace\*{\\fill}}'))
@with_png_app
def test_align_option_right(self, app, status, warning):
"""
.. nwdiag::
:align: right
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, (r'{\\hspace\*{\\fill}'
r'\\sphinxincludegraphics{{nwdiag-.*?}.png}'))
@with_png_app
def test_caption_option(self, app, status, warning):
"""
.. nwdiag::
:caption: hello world
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
figure = re.compile((r'\\begin{figure}\[htbp\]' + CR +
r'\\centering' + CR +
r'\\capstart' + CR + CR +
r'\\noindent\\sphinxincludegraphics{{nwdiag-.*?}.png}' + CR +
r'\\caption{hello world}\\label{\\detokenize{index:id1}}\\end{figure}'),
re.DOTALL)
self.assertRegexpMatches(source, figure)
@with_png_app
def test_caption_option_and_align_option(self, app, status, warning):
"""
.. nwdiag::
:align: left
:caption: hello world
network { A; B; }
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
figure = re.compile((r'\\begin{wrapfigure}{l}{0pt}' + CR +
r'\\centering' + CR +
r'\\noindent\\sphinxincludegraphics{{nwdiag-.*?}.png}' + CR +
r'\\caption{hello world}\\label{\\detokenize{index:id1}}\\end{wrapfigure}'),
re.DOTALL)
self.assertRegexpMatches(source, figure)
@with_png_app
def test_href(self, app, status, warning):
"""
.. nwdiag::
network { A; B; }
A [href = ':ref:`target`'];
"""
app.builder.build_all()
source = (app.outdir / 'test.tex').read_text(encoding='utf-8')
self.assertRegexpMatches(source, r'\\sphinxincludegraphics{{nwdiag-.*?}.png}')
|
export default class Linker {
constructor(
type,
label,
link,
isUsePermanentLink = false,
permalink = '',
) {
this.type = type;
this.label = label;
this.link = link;
this.isUsePermanentLink = isUsePermanentLink;
this.permalink = permalink;
this.generateMarkdownText = this.generateMarkdownText.bind(this);
}
static types = {
markdownLink: 'mdLink',
growiLink: 'growiLink',
pukiwikiLink: 'pukiwikiLink',
}
static patterns = {
pukiwikiLinkWithLabel: /^\[\[(?<label>.+)>(?<link>.+)\]\]$/, // https://regex101.com/r/2fNmUN/2
pukiwikiLinkWithoutLabel: /^\[\[(?<label>.+)\]\]$/, // https://regex101.com/r/S7w5Xu/1
growiLink: /^\[(?<label>\/.+)\]$/, // https://regex101.com/r/DJfkYf/3
markdownLink: /^\[(?<label>.*)\]\((?<link>.*)\)$/, // https://regex101.com/r/DZCKP3/2
}
generateMarkdownText() {
let reshapedLink = this.link;
if (this.isUsePermanentLink && this.permalink != null) {
reshapedLink = this.permalink;
}
if (this.label === '') {
this.label = reshapedLink;
}
if (this.type === Linker.types.pukiwikiLink) {
if (this.label === reshapedLink) return `[[${reshapedLink}]]`;
return `[[${this.label}>${reshapedLink}]]`;
}
if (this.type === Linker.types.growiLink) {
return `[${reshapedLink}]`;
}
if (this.type === Linker.types.markdownLink) {
return `[${this.label}](${reshapedLink})`;
}
}
// create an instance of Linker from string
static fromMarkdownString(str) {
// if str doesn't mean a linker, create a link whose label is str
let label = str;
let link = '';
let type = this.types.markdownLink;
// pukiwiki with separator ">".
if (str.match(this.patterns.pukiwikiLinkWithLabel)) {
type = this.types.pukiwikiLink;
({ label, link } = str.match(this.patterns.pukiwikiLinkWithLabel).groups);
}
// pukiwiki without separator ">".
else if (str.match(this.patterns.pukiwikiLinkWithoutLabel)) {
type = this.types.pukiwikiLink;
({ label } = str.match(this.patterns.pukiwikiLinkWithoutLabel).groups);
link = label;
}
// markdown
else if (str.match(this.patterns.markdownLink)) {
type = this.types.markdownLink;
({ label, link } = str.match(this.patterns.markdownLink).groups);
}
// growi
else if (str.match(this.patterns.growiLink)) {
type = this.types.growiLink;
({ label } = str.match(this.patterns.growiLink).groups);
link = label;
}
const isUsePermanentLink = false;
const permalink = '';
return new Linker(
type,
label,
link,
isUsePermanentLink,
permalink,
);
}
// create an instance of Linker from text with index
static fromLineWithIndex(line, index) {
const { beginningOfLink, endOfLink } = this.getBeginningAndEndIndexOfLink(line, index);
// if index is in a link, extract it from line
let linkStr = '';
if (beginningOfLink >= 0 && endOfLink >= 0) {
linkStr = line.substring(beginningOfLink, endOfLink);
}
return this.fromMarkdownString(linkStr);
}
// return beginning and end indexies of link
// if index is not in a link, return { beginningOfLink: -1, endOfLink: -1 }
static getBeginningAndEndIndexOfLink(line, index) {
let beginningOfLink;
let endOfLink;
// pukiwiki link ('[[link]]')
[beginningOfLink, endOfLink] = this.getBeginningAndEndIndexWithPrefixAndSuffix(line, index, '[[', ']]');
// markdown link ('[label](link)')
if (beginningOfLink < 0 || endOfLink < 0 || beginningOfLink > index || endOfLink < index) {
[beginningOfLink, endOfLink] = this.getBeginningAndEndIndexWithPrefixAndSuffix(line, index, '[', ')', '](');
}
// growi link ('[/link]')
if (beginningOfLink < 0 || endOfLink < 0 || beginningOfLink > index || endOfLink < index) {
[beginningOfLink, endOfLink] = this.getBeginningAndEndIndexWithPrefixAndSuffix(line, index, '[/', ']');
}
// return { beginningOfLink: -1, endOfLink: -1 }
if (beginningOfLink < 0 || endOfLink < 0 || beginningOfLink > index || endOfLink < index) {
[beginningOfLink, endOfLink] = [-1, -1];
}
return { beginningOfLink, endOfLink };
}
// return begin and end indexies as array only when index is between prefix and suffix and link contains containText.
static getBeginningAndEndIndexWithPrefixAndSuffix(line, index, prefix, suffix, containText = '') {
const beginningIndex = line.lastIndexOf(prefix, index);
const IndexOfContainText = line.indexOf(containText, beginningIndex + prefix.length);
const endIndex = line.indexOf(suffix, IndexOfContainText + containText.length);
if (beginningIndex < 0 || IndexOfContainText < 0 || endIndex < 0) {
return [-1, -1];
}
return [beginningIndex, endIndex + suffix.length];
}
}
|
int main(){
int a;
int b;
a = 0x01;
b = ~a;
return b;
}
|
"""
sentry.models.team
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import warnings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model,
sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.http import absolute_uri
class TeamManager(BaseManager):
def get_for_user(self, organization, user, access=None, access_groups=True,
with_projects=False):
"""
Returns a list of all teams a user has some level of access to.
Each <Team> returned has an ``access_type`` attribute which holds the
OrganizationMemberType value.
"""
from sentry.models import (
AccessGroup, OrganizationMember, OrganizationMemberTeam,
OrganizationMemberType, Project
)
if not user.is_authenticated():
return []
base_team_qs = self.filter(
organization=organization,
status=TeamStatus.VISIBLE
)
if user.is_superuser or (settings.SENTRY_PUBLIC and access is None):
inactive = list(OrganizationMemberTeam.objects.filter(
organizationmember__user=user,
organizationmember__organization=organization,
is_active=False,
).values_list('team', flat=True))
team_list = base_team_qs
if inactive:
team_list = team_list.exclude(id__in=inactive)
team_list = list(team_list)
if user.is_superuser:
access = OrganizationMemberType.OWNER
else:
access = OrganizationMemberType.MEMBER
for team in team_list:
team.access_type = access
else:
om_qs = OrganizationMember.objects.filter(
user=user,
organization=organization,
)
if access is not None:
om_qs = om_qs.filter(type__lte=access)
try:
om = om_qs.get()
except OrganizationMember.DoesNotExist:
team_qs = self.none()
else:
team_qs = om.get_teams()
for team in team_qs:
team.access_type = om.type
team_list = set(team_qs)
# TODO(dcramer): remove all of this junk when access groups are
# killed
ag_qs = AccessGroup.objects.filter(
members=user,
team__organization=organization,
team__status=TeamStatus.VISIBLE,
).select_related('team')
if access is not None:
ag_qs = ag_qs.filter(type__lte=access)
for ag in ag_qs:
if ag.team in team_list:
continue
ag.team.is_access_group = True
ag.team.access_type = ag.type
team_list.add(ag.team)
results = sorted(team_list, key=lambda x: x.name.lower())
if with_projects:
# these kinds of queries make people sad :(
for idx, team in enumerate(results):
project_list = list(Project.objects.get_for_user(
team=team,
user=user,
_skip_team_check=True
))
results[idx] = (team, project_list)
return results
# TODO(dcramer): pull in enum library
class TeamStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class Team(Model):
"""
A team represents a group of individuals which maintain ownership of projects.
"""
organization = FlexibleForeignKey('sentry.Organization')
slug = models.SlugField()
name = models.CharField(max_length=64)
status = BoundedPositiveIntegerField(choices=(
(TeamStatus.VISIBLE, _('Active')),
(TeamStatus.PENDING_DELETION, _('Pending Deletion')),
(TeamStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), default=TeamStatus.VISIBLE)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = TeamManager(cache_fields=(
'pk',
'slug',
))
class Meta:
app_label = 'sentry'
db_table = 'sentry_team'
unique_together = (('organization', 'slug'),)
__repr__ = sane_repr('slug', 'owner_id', 'name')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
slugify_instance(self, self.name, organization=self.organization)
super(Team, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri(reverse('sentry-team-dashboard', args=[
self.organization.slug,
self.slug,
]))
def get_owner_name(self):
if not self.owner:
return None
if self.owner.first_name:
return self.owner.first_name
if self.owner.email:
return self.owner.email.split('@', 1)[0]
return self.owner.username
@property
def member_set(self):
from sentry.models import OrganizationMember
return self.organization.member_set.filter(
Q(organizationmemberteam__team=self) |
Q(has_global_access=True),
user__is_active=True,
).exclude(
id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=False,
organizationmemberteam__team=self,
).values('id')
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Team.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(
user=user,
)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def get_audit_log_data(self):
return {
'slug': self.slug,
'name': self.name,
'status': self.status,
}
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import print_function
import os
import math
import argparse
import numpy as np
import cntk
import _cntk_py
from cntk.distributed import *
from cntk.io import MinibatchSource, HTKFeatureDeserializer, HTKMLFDeserializer, StreamDef, StreamDefs
from cntk.layers import Recurrence, Dense, LSTM
from cntk.learner import *
from cntk.models import Sequential, For
from cntk.ops import input_variable, cross_entropy_with_softmax, classification_error
from cntk.training_session import *
# default Paths relative to current python file.
abs_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(abs_path, "Models")
feature_dim = 33
num_classes = 132
context = 2
# Create a minibatch source.
def create_mb_source(features_file, labels_file, label_mapping_filem, total_number_of_samples):
for file_name in [features_file, labels_file, label_mapping_file]:
if not os.path.exists(file_name):
raise RuntimeError("File '%s' does not exist. Please check that datadir argument is set correctly." % (file_name))
fd = HTKFeatureDeserializer(StreamDefs(
amazing_features = StreamDef(shape=feature_dim, context=(context,context), scp=features_file)))
ld = HTKMLFDeserializer(label_mapping_file, StreamDefs(
awesome_labels = StreamDef(shape=num_classes, mlf=labels_file)))
# Enabling BPTT with truncated_length > 0
return MinibatchSource([fd,ld], truncation_length=250, epoch_size=total_number_of_samples)
def create_recurrent_network():
# Input variables denoting the features and label data
features = input_variable(((2*context+1)*feature_dim))
labels = input_variable((num_classes))
# create network
model = Sequential([For(range(3), lambda : Recurrence(LSTM(256))),
Dense(num_classes)])
z = model(features)
ce = cross_entropy_with_softmax(z, labels)
errs = classification_error (z, labels)
return {
'feature': features,
'label': labels,
'ce' : ce,
'errs' : errs,
'output': z
}
# Create trainer
def create_trainer(network, epoch_size, num_quantization_bits, block_size, warm_up, progress_writers):
# Create learner
if block_size != None and num_quantization_bits != 32:
raise RuntimeError("Block momentum cannot be used with quantization, please remove quantized_bits option.")
lr = [0.001]
local_learner = adam_sgd(network['output'].parameters,
lr=learning_rate_schedule(lr, UnitType.sample, epoch_size),
momentum=momentum_as_time_constant_schedule(1000),
low_memory=True,
gradient_clipping_threshold_per_sample=15, gradient_clipping_with_truncation=True)
if block_size != None:
parameter_learner = block_momentum_distributed_learner(local_learner, block_size=block_size)
else:
parameter_learner = data_parallel_distributed_learner(local_learner, num_quantization_bits=num_quantization_bits, distributed_after=warm_up)
# Create trainer
return cntk.Trainer(network['output'], (network['ce'], network['errs']), parameter_learner, progress_writers)
# Train and test
def train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size):
input_map = {
network['feature']: train_source.streams.amazing_features,
network['label']: train_source.streams.awesome_labels
}
training_session(
trainer=trainer,
mb_source = train_source,
var_to_stream = input_map,
mb_size = minibatch_size,
progress_frequency=epoch_size,
checkpoint_config = CheckpointConfig(frequency = epoch_size,
filename = os.path.join(model_path, "HKT_LSTM_Truncated"),
restore = False),
cv_config = CrossValidationConfig(source=test_source, mb_size=minibatch_size)
).train()
def htk_lstm_truncated(features_file, labels_file, label_mapping_file, minibatch_size=64, epoch_size=640000, num_quantization_bits=32,
block_size=3200, warm_up=0, max_epochs=5, num_mbs_per_log=None, gen_heartbeat=False,log_to_file=None, tensorboard_logdir=None):
_cntk_py.set_computation_network_trace_level(0)
network = create_recurrent_network()
progress_writers = [cntk.utils.ProgressPrinter(
freq=num_mbs_per_log,
tag='Training',
log_to_file=log_to_file,
rank=Communicator.rank(),
gen_heartbeat=gen_heartbeat,
num_epochs=max_epochs)]
if tensorboard_logdir is not None:
progress_writers.append(cntk.utils.TensorBoardProgressWriter(
freq=num_mbs_per_log,
log_dir=tensorboard_logdir,
rank=Communicator.rank(),
model=network['output']))
trainer = create_trainer(network, epoch_size, num_quantization_bits, block_size, warm_up, progress_writers)
train_source = create_mb_source(features_file, labels_file, label_mapping_file, total_number_of_samples=max_epochs * epoch_size)
# Testing with training data, just for testing purposes
test_source = create_mb_source(features_file, labels_file, label_mapping_file, total_number_of_samples=max_epochs * epoch_size)
train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size)
if __name__=='__main__':
parser = argparse.ArgumentParser()
data_path = os.path.join(abs_path, "..", "Data")
parser.add_argument('-datadir', '--datadir', help='Data directory where the AN4 files are located', required=False, default=data_path)
parser.add_argument('-outputdir', '--outputdir', help='Output directory for checkpoints and models', required=False, default=None)
parser.add_argument('-logdir', '--logdir', help='Log file', required=False, default=None)
parser.add_argument('-tensorboard_logdir', '--tensorboard_logdir', help='Directory where TensorBoard logs should be created', required=False, default=None)
parser.add_argument('-n', '--num_epochs', help='Total number of epochs to train', type=int, required=False, default='160')
parser.add_argument('-m', '--minibatch_size', help='Minibatch size', type=int, required=False, default='64')
parser.add_argument('-e', '--epoch_size', help='Epoch size', type=int, required=False, default='50000')
parser.add_argument('-q', '--quantized_bits', help='Number of quantized bits used for gradient aggregation', type=int, required=False, default='32')
parser.add_argument('-a', '--distributed_after', help='Number of samples to train with before running distributed', type=int, required=False, default='0')
parser.add_argument('-b', '--block_samples', type=int, help="Number of samples per block for block momentum (BM) distributed learner (if 0 BM learner is not used)", required=False, default=None)
parser.add_argument('-device', '--device', type=int, help="Force to run the script on a specified device", required=False, default=None)
args = vars(parser.parse_args())
if args['outputdir'] is not None:
model_path = args['outputdir'] + "/models"
if args['logdir'] is not None:
log_dir = args['logdir']
if args['device'] is not None:
cntk.device.set_default_device(cntk.device.gpu(args['device']))
data_path = args['datadir']
if not os.path.isdir(data_path):
raise RuntimeError("Directory %s does not exist" % data_path)
os.chdir(data_path)
features_file = os.path.join(data_path, 'glob_0000.scp')
labels_file = os.path.join(data_path, 'glob_0000.mlf')
label_mapping_file = os.path.join(data_path, 'state.list')
try:
htk_lstm_truncated(features_file, labels_file, label_mapping_file,
minibatch_size=args['minibatch_size'],
epoch_size=args['epoch_size'],
num_quantization_bits=args['quantized_bits'],
block_size=args['block_samples'],
warm_up=args['distributed_after'],
max_epochs=args['num_epochs'],
log_to_file=args['logdir'],
num_mbs_per_log=100,
gen_heartbeat=False,
tensorboard_logdir=args['tensorboard_logdir'])
finally:
os.chdir(abs_path)
Communicator.finalize()
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[20],{14:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var r=arguments[t];for(var n in r)Object.prototype.hasOwnProperty.call(r,n)&&(e[n]=r[n])}return e},o=function(){function e(e,t){for(var r=0;r<t.length;r++){var n=t[r];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(e,n.key,n)}}return function(t,r,n){return r&&e(t.prototype,r),n&&e(t,n),t}}(),a=i(r(0)),u=r(30),l=i(u),s=i(r(31));function i(e){return e&&e.__esModule?e:{default:e}}var p=function(e){function t(e){return function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t),function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,(t.__proto__||Object.getPrototypeOf(t)).call(this,e))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),o(t,[{key:"render",value:function(){return a.default.createElement(l.default,n({},this.props,{partName:this.props.partName,width:this.props.mmWidth,terminals:this.props.terminals,leds:this.props.leds,raisedMidsection:!1,hasTopRow:!1,partNumber:this.props.partNumber,labels:["a","0","1","2","3","4","5","6","7"],colorCard:{isAnalogIn:!0}}),a.default.createElement(s.default,{type:"POWER",isBlack:!0,numRows:4,numCols:1}),a.default.createElement(s.default,{type:"IO",isBlack:!1,numRows:4,numCols:2}),a.default.createElement(s.default,{type:"IO",isBlack:!1,numRows:4,numCols:2}),a.default.createElement(s.default,{type:"IO",isBlack:!1,numRows:4,numCols:2}),a.default.createElement(s.default,{type:"IO",isBlack:!1,numRows:4,numCols:2}))}}]),t}(a.default.PureComponent);t.default=p,p.defaultProps={partName:"RTD8 S",partNumber:"2702120",mmWidth:53.6,mmHeight:122,terminals:(0,u.defaultTerminals)(12),leds:(0,u.defaultLeds)(12)}}}]);
|
# Copyright: (c) 2021, Edwin G. W. Peters
import numpy as np
def customXCorr(a,b,N=None):
"""
Faster xcorr function than numpy's. Done by using FFT's instead of conventional convolution
"""
Na = len(a)
Nb = len(b)
if N is None:
N = np.max([Na,Nb])
A = np.fft.fft(a,N)
B = np.fft.fft(b,N)
return np.fft.ifft(A*np.conj(B),N)
def customXCorrFast(a,b):
"""
Ensures zero padding to a prime of the longest of a and b before calling customXCorr
"""
La, Lb = len(a), len(b)
Nfft = int(2**np.ceil(np.log2(max((La,Lb)))))
return customXCorr(a,b,Nfft)
|
# Objective: create a list and convert it to a pandas data frame
# script create date: 23/Feb/2020
# load library
import pandas as pd
# creat a list
mylist = [46,'ashish',10000]
# convert list to pandas dataframe
df= pd.DataFrame(mylist)
# show the data type of dataframe. Use info(). Equivalent in R is str()
# See this SO post: https://stackoverflow.com/questions/27637281/what-are-python-pandas-equivalents-for-r-functions-like-str-summary-and-he
df.info()
df.memory_usage()
print("Dataframe objects are: ",df.head())
# As we can see the data frame has no column headers. So add them using columns() function.
# But first transpose the list items from column to rows
df = pd.DataFrame(mylist).T
#print(df)
# Now add column headers to the coerced list
df.columns = ['age', 'name', 'salary']
print(df)
|
import numpy as np
from scipy.linalg import lapack as lapack
def copy_lower_to_upper(A):
A += np.tril(A, k=-1).T
def invpd(A, return_chol=False):
L = np.linalg.cholesky(A)
Ainv = lapack.dpotri(L, lower=True)[0]
copy_lower_to_upper(Ainv)
if return_chol:
return Ainv, L
else:
return Ainv
def blockarray(*args, **kwargs):
return np.array(np.bmat(*args, **kwargs), copy=False)
def symmetrize(A):
return (A + A.T) / 2.
|
"""
This file contains some basic functions for loading and saving images.
"""
import cv2
import pickle
import numpy as np
# Custom imports
from os import listdir
from os.path import join as path_join
def load_images(path):
"""
Load a set of images from a folder
Inputs
----------
path: str
Path to a folder containing a set of images
Outputs
-------
images: numpy.ndarray
An array containing a set of images
file_names: numpy.ndarray
An array containing the file names of 'images'
"""
file_names = sorted(listdir(path))
n_images = len(file_names)
n_rows, n_cols, n_channels = cv2.imread(path_join(path, file_names[0])).shape
images = np.zeros((n_images, n_rows, n_cols, n_channels), dtype = 'uint8')
for i in range(n_images):
images[i] = cv2.imread(path_join(path, file_names[i]))
return images, file_names
def save_image(path, file_name, image):
"""
Wrapper for saving image
"""
cv2.imwrite(path_join(path, file_name), image)
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE252_Unchecked_Return_Value__wchar_t_puts_01.c
Label Definition File: CWE252_Unchecked_Return_Value.string.label.xml
Template File: point-flaw-01.tmpl.c
*/
/*
* @description
* CWE: 252 Unchecked Return Value
* Sinks: puts
* GoodSink: Check if putws() fails
* BadSink : Do not check if _putws() fails
* Flow Variant: 01 Baseline
*
* */
#include "std_testcase.h"
#ifndef OMITBAD
void CWE252_Unchecked_Return_Value__wchar_t_puts_01_bad()
{
{
/* FLAW: Do not check the return value */
_putws(L"string");
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
static void good1()
{
{
/* FIX: check the return value */
if (_putws(L"string") == WEOF)
{
printLine("puts failed!");
}
}
}
void CWE252_Unchecked_Return_Value__wchar_t_puts_01_good()
{
good1();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
its own for testing or for building a binary to use in testing binary
analysis tools. It is not used when compiling all the testcases as one
application, which is how source code analysis tools are tested. */
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE252_Unchecked_Return_Value__wchar_t_puts_01_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE252_Unchecked_Return_Value__wchar_t_puts_01_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _propTypes = require('prop-types');
var _propTypes2 = _interopRequireDefault(_propTypes);
var _shallowequal = require('shallowequal');
var _shallowequal2 = _interopRequireDefault(_shallowequal);
var _raf = require('raf');
var _raf2 = _interopRequireDefault(_raf);
var _shouldUpdate2 = require('./shouldUpdate');
var _shouldUpdate3 = _interopRequireDefault(_shouldUpdate2);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _objectWithoutProperties(obj, keys) { var target = {}; for (var i in obj) { if (keys.indexOf(i) >= 0) continue; if (!Object.prototype.hasOwnProperty.call(obj, i)) continue; target[i] = obj[i]; } return target; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } // eslint-disable-line import/no-unresolved
var noop = function noop() {};
var Headroom = function (_Component) {
_inherits(Headroom, _Component);
_createClass(Headroom, null, [{
key: 'getDerivedStateFromProps',
value: function getDerivedStateFromProps(props, state) {
if (props.disable && state.state !== 'unfixed') {
return {
translateY: 0,
className: 'headroom headroom--unfixed headroom-disable-animation',
animation: false,
state: 'unfixed'
};
}
return null;
}
}]);
function Headroom(props) {
_classCallCheck(this, Headroom);
// Class variables.
var _this = _possibleConstructorReturn(this, (Headroom.__proto__ || Object.getPrototypeOf(Headroom)).call(this, props));
_this.setRef = function (ref) {
return _this.inner = ref;
};
_this.setHeightOffset = function () {
_this.setState({
height: _this.inner ? _this.inner.offsetHeight : ''
});
_this.resizeTicking = false;
};
_this.getScrollY = function () {
var parent = _this.props.parent();
if (parent && parent.pageYOffset !== void 0) {
return parent.pageYOffset;
} else if (parent && parent.scrollTop !== void 0) {
return parent.scrollTop;
}
return (document.documentElement || document.body.parentNode || document.body).scrollTop;
};
_this.getViewportHeight = function () {
return window.innerHeight || document.documentElement.clientHeight || document.body.clientHeight;
};
_this.getDocumentHeight = function () {
var body = document.body;
var documentElement = document.documentElement;
return Math.max(body.scrollHeight, documentElement.scrollHeight, body.offsetHeight, documentElement.offsetHeight, body.clientHeight, documentElement.clientHeight);
};
_this.getElementPhysicalHeight = function (elm) {
var offsetHeight = 0;
var clientHeight = 0;
if (elm) {
offsetHeight = elm.offsetHeight;
clientHeight = elm.clientHeight;
}
return Math.max(offsetHeight, clientHeight);
};
_this.getElementHeight = function (elm) {
var offsetHeight = 0;
var clientHeight = 0;
var scrollHeight = 0;
if (elm) {
offsetHeight = elm.offsetHeight;
clientHeight = elm.clientHeight;
scrollHeight = elm.scrollHeight;
}
return Math.max(scrollHeight, offsetHeight, clientHeight);
};
_this.getScrollerPhysicalHeight = function () {
var parent = _this.props.parent();
return parent === window || parent === document.body ? _this.getViewportHeight() : _this.getElementPhysicalHeight(parent);
};
_this.getScrollerHeight = function () {
var parent = _this.props.parent();
return parent === window || parent === document.body ? _this.getDocumentHeight() : _this.getElementHeight(parent);
};
_this.isOutOfBound = function (currentScrollY) {
var pastTop = currentScrollY < 0;
var scrollerPhysicalHeight = _this.getScrollerPhysicalHeight();
var scrollerHeight = _this.getScrollerHeight();
var pastBottom = currentScrollY + scrollerPhysicalHeight > scrollerHeight;
return pastTop || pastBottom;
};
_this.handleScroll = function () {
if (!_this.scrollTicking) {
_this.scrollTicking = true;
(0, _raf2.default)(_this.update);
}
};
_this.handleResize = function () {
if (!_this.resizeTicking) {
_this.resizeTicking = true;
(0, _raf2.default)(_this.setHeightOffset);
}
};
_this.unpin = function () {
_this.props.onUnpin();
_this.setState({
translateY: '-100%',
className: 'headroom headroom--unpinned',
animation: true,
state: 'unpinned'
});
};
_this.unpinSnap = function () {
_this.props.onUnpin();
_this.setState({
translateY: '-100%',
className: 'headroom headroom--unpinned headroom-disable-animation',
animation: false,
state: 'unpinned'
});
};
_this.pin = function () {
_this.props.onPin();
_this.setState({
translateY: 0,
className: 'headroom headroom--pinned',
animation: true,
state: 'pinned'
});
};
_this.unfix = function () {
_this.props.onUnfix();
_this.setState({
translateY: 0,
className: 'headroom headroom--unfixed headroom-disable-animation',
animation: false,
state: 'unfixed'
});
};
_this.update = function () {
_this.currentScrollY = _this.getScrollY();
if (!_this.isOutOfBound(_this.currentScrollY)) {
var _shouldUpdate = (0, _shouldUpdate3.default)(_this.lastKnownScrollY, _this.currentScrollY, _this.props, _this.state),
action = _shouldUpdate.action;
if (action === 'pin') {
_this.pin();
} else if (action === 'unpin') {
_this.unpin();
} else if (action === 'unpin-snap') {
_this.unpinSnap();
} else if (action === 'unfix') {
_this.unfix();
}
}
_this.lastKnownScrollY = _this.currentScrollY;
_this.scrollTicking = false;
};
_this.currentScrollY = 0;
_this.lastKnownScrollY = 0;
_this.scrollTicking = false;
_this.resizeTicking = false;
_this.state = {
state: 'unfixed',
translateY: 0,
className: 'headroom headroom--unfixed'
};
return _this;
}
_createClass(Headroom, [{
key: 'componentDidMount',
value: function componentDidMount() {
this.setHeightOffset();
if (!this.props.disable) {
this.props.parent().addEventListener('scroll', this.handleScroll);
if (this.props.calcHeightOnResize) {
this.props.parent().addEventListener('resize', this.handleResize);
}
}
}
}, {
key: 'shouldComponentUpdate',
value: function shouldComponentUpdate(nextProps, nextState) {
return !(0, _shallowequal2.default)(this.props, nextProps) || !(0, _shallowequal2.default)(this.state, nextState);
}
}, {
key: 'componentDidUpdate',
value: function componentDidUpdate(prevProps, prevState) {
// If children have changed, remeasure height.
if (prevProps.children !== this.props.children) {
this.setHeightOffset();
}
// Add/remove event listeners when re-enabled/disabled
if (!prevProps.disable && this.props.disable) {
this.props.parent().removeEventListener('scroll', this.handleScroll);
this.props.parent().removeEventListener('resize', this.handleResize);
if (prevState.state !== 'unfixed' && this.state.state === 'unfixed') {
this.props.onUnfix();
}
} else if (prevProps.disable && !this.props.disable) {
this.props.parent().addEventListener('scroll', this.handleScroll);
if (this.props.calcHeightOnResize) {
this.props.parent().addEventListener('resize', this.handleResize);
}
}
}
}, {
key: 'componentWillUnmount',
value: function componentWillUnmount() {
this.props.parent().removeEventListener('scroll', this.handleScroll);
window.removeEventListener('scroll', this.handleScroll);
this.props.parent().removeEventListener('resize', this.handleResize);
}
}, {
key: 'render',
value: function render() {
var _props = this.props,
userClassName = _props.className,
divProps = _objectWithoutProperties(_props, ['className']);
delete divProps.onUnpin;
delete divProps.onPin;
delete divProps.onUnfix;
delete divProps.disableInlineStyles;
delete divProps.disable;
delete divProps.parent;
delete divProps.children;
delete divProps.upTolerance;
delete divProps.downTolerance;
delete divProps.pinStart;
delete divProps.calcHeightOnResize;
var style = divProps.style,
wrapperStyle = divProps.wrapperStyle,
rest = _objectWithoutProperties(divProps, ['style', 'wrapperStyle']);
var innerStyle = {
position: this.props.disable || this.state.state === 'unfixed' ? 'relative' : 'fixed',
top: 0,
left: 0,
right: 0,
zIndex: 1,
WebkitTransform: 'translate3D(0, ' + this.state.translateY + ', 0)',
MsTransform: 'translate3D(0, ' + this.state.translateY + ', 0)',
transform: 'translate3D(0, ' + this.state.translateY + ', 0)'
};
var className = this.state.className;
// Don't add css transitions until after we've done the initial
// negative transform when transitioning from 'unfixed' to 'unpinned'.
// If we don't do this, the header will flash into view temporarily
// while it transitions from 0 — -100%.
if (this.state.animation) {
innerStyle = _extends({}, innerStyle, {
WebkitTransition: 'all .2s ease-in-out',
MozTransition: 'all .2s ease-in-out',
OTransition: 'all .2s ease-in-out',
transition: 'all .2s ease-in-out'
});
className += ' headroom--scrolled';
}
if (!this.props.disableInlineStyles) {
innerStyle = _extends({}, innerStyle, style);
} else {
innerStyle = style;
}
var wrapperStyles = _extends({}, wrapperStyle, {
height: this.state.height ? this.state.height : null
});
var wrapperClassName = userClassName ? userClassName + ' headroom-wrapper' : 'headroom-wrapper';
return _react2.default.createElement(
'div',
{ style: wrapperStyles, className: wrapperClassName },
_react2.default.createElement(
'div',
_extends({
ref: this.setRef
}, rest, {
style: innerStyle,
className: className
}),
this.props.children
)
);
}
}]);
return Headroom;
}(_react.Component);
Headroom.propTypes = {
className: _propTypes2.default.string,
parent: _propTypes2.default.func,
children: _propTypes2.default.any.isRequired,
disableInlineStyles: _propTypes2.default.bool,
disable: _propTypes2.default.bool,
upTolerance: _propTypes2.default.number,
downTolerance: _propTypes2.default.number,
onPin: _propTypes2.default.func,
onUnpin: _propTypes2.default.func,
onUnfix: _propTypes2.default.func,
wrapperStyle: _propTypes2.default.object,
pinStart: _propTypes2.default.number,
style: _propTypes2.default.object,
calcHeightOnResize: _propTypes2.default.bool
};
Headroom.defaultProps = {
parent: function parent() {
return window;
},
disableInlineStyles: false,
disable: false,
upTolerance: 5,
downTolerance: 0,
onPin: noop,
onUnpin: noop,
onUnfix: noop,
wrapperStyle: {},
pinStart: 0,
calcHeightOnResize: true
};
exports.default = Headroom;
|
# Copyright (c) 2018, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.tests.utils import assert_eq
from cudf.utils.utils import IS_NEP18_ACTIVE
missing_arrfunc_cond = not IS_NEP18_ACTIVE
missing_arrfunc_reason = "NEP-18 support is not available in NumPy"
# Test implementation based on dask array test
# https://github.com/dask/dask/blob/master/dask/array/tests/test_array_function.py
@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)
@pytest.mark.parametrize("np_ar", [np.random.random(100)])
@pytest.mark.parametrize(
"func",
[
lambda x: np.mean(x),
lambda x: np.sum(x),
lambda x: np.var(x, ddof=1),
lambda x: np.unique(x),
lambda x: np.dot(x, x),
lambda x: np.linalg.norm(x),
],
)
def test_array_func_cudf_series(np_ar, func):
cudf_ser = cudf.Series(np_ar)
expect = func(np_ar)
got = func(cudf_ser)
if np.isscalar(expect):
assert_eq(expect, got)
else:
assert_eq(expect, got.to_array())
@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)
@pytest.mark.parametrize(
"pd_df", [pd.DataFrame(np.random.uniform(size=(100, 10)))]
)
@pytest.mark.parametrize(
"func",
[lambda x: np.mean(x), lambda x: np.sum(x), lambda x: np.var(x, ddof=1)],
)
def test_array_func_cudf_dataframe(pd_df, func):
cudf_df = cudf.from_pandas(pd_df)
expect = func(pd_df)
got = func(cudf_df)
assert_eq(expect, got)
@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)
@pytest.mark.parametrize(
"pd_df", [pd.DataFrame(np.random.uniform(size=(100, 10)))]
)
@pytest.mark.parametrize(
"func",
[
lambda x: np.cov(x, x),
lambda x: np.dot(x, x),
lambda x: np.linalg.norm(x),
lambda x: np.linalg.det(x),
],
)
def test_array_func_missing_cudf_dataframe(pd_df, func):
cudf_df = cudf.from_pandas(pd_df)
with pytest.raises(TypeError):
func(cudf_df)
# we only implement sum among all numpy non-ufuncs
@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)
@pytest.mark.parametrize("np_ar", [np.random.random(100)])
@pytest.mark.parametrize("func", [lambda x: np.sum(x)])
def test_array_func_cudf_index(np_ar, func):
cudf_index = cudf.core.index.as_index(cudf.Series(np_ar))
expect = func(np_ar)
got = func(cudf_index)
assert_eq(expect, got)
@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)
@pytest.mark.parametrize("np_ar", [np.random.random(100)])
@pytest.mark.parametrize(
"func",
[
lambda x: np.cov(x, x),
lambda x: np.dot(x, x),
lambda x: np.linalg.norm(x),
lambda x: np.linalg.det(x),
],
)
def test_array_func_missing_cudf_index(np_ar, func):
cudf_index = cudf.core.index.as_index(cudf.Series(np_ar))
with pytest.raises(TypeError):
func(cudf_index)
@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)
@pytest.mark.parametrize(
"func",
[
lambda x: np.cov(x, x),
lambda x: np.dot(x, x),
lambda x: np.linalg.norm(x),
lambda x: np.linalg.det(x),
],
)
def test_array_func_missing_cudf_multi_index(func):
levels = [["a", "b"], ["c", "d"]]
codes = [[0, 1], [1, 0]]
cudf_multi_index = cudf.MultiIndex(levels, codes)
with pytest.raises(TypeError):
func(cudf_multi_index)
@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)
def test_list_input_array_func():
ar = np.array([1, 2, 3])
s = cudf.Series(ar)
expect = np.concatenate([ar, ar, ar])
got = np.concatenate([s, s, s])
assert_eq(expect, got.to_array())
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional, Tuple
from sydent.users.accounts import Account
if TYPE_CHECKING:
from sydent.sydent import Sydent
class AccountStore:
def __init__(self, sydent: "Sydent") -> None:
self.sydent = sydent
def getAccountByToken(self, token: str) -> Optional[Account]:
"""
Select the account matching the given token, if any.
:param token: The token to identify the account, if any.
:return: The account matching the token, or None if no account matched.
"""
cur = self.sydent.db.cursor()
res = cur.execute(
"select a.user_id, a.created_ts, a.consent_version from accounts a, tokens t "
"where t.user_id = a.user_id and t.token = ?",
(token,),
)
row: Optional[Tuple[str, int, Optional[str]]] = res.fetchone()
if row is None:
return None
return Account(*row)
def storeAccount(
self, user_id: str, creation_ts: int, consent_version: Optional[str]
) -> None:
"""
Stores an account for the given user ID.
:param user_id: The Matrix user ID to create an account for.
:param creation_ts: The timestamp in milliseconds.
:param consent_version: The version of the terms of services that the user last
accepted.
"""
cur = self.sydent.db.cursor()
cur.execute(
"insert or ignore into accounts (user_id, created_ts, consent_version) "
"values (?, ?, ?)",
(user_id, creation_ts, consent_version),
)
self.sydent.db.commit()
def setConsentVersion(self, user_id: str, consent_version: Optional[str]) -> None:
"""
Saves that the given user has agreed to all of the terms in the document of the
given version.
:param user_id: The Matrix ID of the user that has agreed to the terms.
:param consent_version: The version of the document the user has agreed to.
"""
cur = self.sydent.db.cursor()
cur.execute(
"update accounts set consent_version = ? where user_id = ?",
(consent_version, user_id),
)
self.sydent.db.commit()
def addToken(self, user_id: str, token: str) -> None:
"""
Stores the authentication token for a given user.
:param user_id: The Matrix user ID to save the given token for.
:param token: The token to store for that user ID.
"""
cur = self.sydent.db.cursor()
cur.execute(
"insert into tokens (user_id, token) values (?, ?)",
(user_id, token),
)
self.sydent.db.commit()
def delToken(self, token: str) -> int:
"""
Deletes an authentication token from the database.
:param token: The token to delete from the database.
"""
cur = self.sydent.db.cursor()
cur.execute(
"delete from tokens where token = ?",
(token,),
)
deleted = cur.rowcount
self.sydent.db.commit()
return deleted
|
// export * as log from './log'
export default { log: {} }
|
import os
import pandas as pd
from . import unsafe_helper
class Helper:
def __init__(self):
self.unsafe = unsafe_helper.Unsafe()
def format_path_w_symbol(self, path, symbol):
return os.path.join(path, symbol)
def format_df_dates(self, df, column_name):
df[column_name] = pd.to_datetime(df[column_name])
def get_row_w_value(self, df, column, value):
return self.unsafe.get_row_w_value(df, column, value)
def get_row_w_index(self, df, index):
return self.unsafe.get_row_w_index(df, index)
def get_value_from_row(self, row, column):
return row[column].values[0]
def format_date_file(self, symbol, path):
filename = '{}_dates.csv'.format(symbol)
return os.path.join(path, filename)
def format_image_file(self, symbol, path):
filename = '{}.npy'.format(symbol)
return os.path.join(path, filename)
def format_price_file(self, symbol, path):
filename= '{}.csv'.format(symbol)
return os.path.join(path, filename)
|
#!/usr/bin/env python3
# vim: set expandtab tabstop=4 shiftwidth=4:
# Copyright 2021 Christopher J. Kucera
# <cj@apocalyptech.com>
# <http://apocalyptech.com/contact.php>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the development team nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CJ KUCERA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import stat
import hashlib
# NOTE: This was written and tested on an install which has Steam Cloud saves
# *disabled* -- I have no idea exactly how all the sync-state attributes work,
# or how the localtime/time/remotetime timestamps work together. This assumes
# that the files are only ever gonna stay local. Don't trust this if you're
# using a setup where cloud saves are active!
# ALSO NOTE: Only tested/run on Linux. I think it should work fine on other
# platforms, but eh.
class CacheFile:
def __init__(self, filename, path, root, size, localtime, time, remotetime,
sha, syncstate, persiststate, platformstosync2):
self.filename = filename
self.path = path
self.root = root
self.full_filename = os.path.join(self.path, self.filename)
self.size = size
self.localtime = localtime
self.time = time
self.remotetime = remotetime
self.sha = sha
self.syncstate = syncstate
self.persiststate = persiststate
self.platformstosync2 = platformstosync2
@staticmethod
def from_df(filename, path, df):
attrs = {}
for line in df:
line = line.strip()
if line == '{':
continue
elif line == '}':
break
parts = line.split("\t")
key = parts[0].strip('"')
val = parts[-1].strip('"')
attrs[key] = val
return CacheFile(filename,
path,
attrs['root'],
attrs['size'],
attrs['localtime'],
attrs['time'],
attrs['remotetime'],
attrs['sha'],
attrs['syncstate'],
attrs['persiststate'],
attrs['platformstosync2'],
)
def sync(self):
# NOTE: the "time" field in remotecache.vdf is often not actually the
# file's mtime -- it'll be a little bit *before* that. Usually by just
# a second or so, but I've seen a gap as high as 17 seconds. No clue
# why that can happen (I assume it must be a gap between the file being
# "registered" and being written, or something), but just keep in mind
# that when syncing an otherwise unchanged file, that "time" parameter
# could get updated. (The "localtime" parameter *does* always match
# the file's mtime, though.)
statinfo = os.stat(self.full_filename)
self.size = str(statinfo.st_size)
mtime = str(int(statinfo.st_mtime))
self.localtime = mtime
self.time = mtime
with open(self.full_filename, 'rb') as df:
self.sha = hashlib.sha1(df.read()).hexdigest()
def get_lines(self):
lines = []
lines.append("\t\"{}\"".format(self.filename))
lines.append("\t{")
for key in [
'root',
'size',
'localtime',
'time',
'remotetime',
'sha',
'syncstate',
'persiststate',
'platformstosync2',
]:
lines.append("\t\t\"{}\"\t\t\"{}\"".format(
key,
getattr(self, key),
))
lines.append("\t}")
return lines
class RemoteCache:
def __init__(self, cache_filename):
self.cache_filename = cache_filename
self.cache_dir = os.path.abspath(os.path.dirname(cache_filename))
self.files_dir = os.path.join(self.cache_dir, 'remote')
self.files = {}
with open(cache_filename) as df:
self.game_id = int(df.readline().strip().strip('"'))
df.readline()
change_num_split = df.readline().strip().split("\t")
assert(change_num_split[0] == '"ChangeNumber"')
self.change_num = int(change_num_split[-1].strip('"'))
next_line = df.readline().strip()
while next_line != '}':
inner_filename = next_line.strip('"')
file = CacheFile.from_df(inner_filename, self.files_dir, df)
self.files[file.filename] = file
next_line = df.readline().strip()
def __getitem__(self, key):
return self.files[key]
def __contains__(self, key):
return key in self.files
def sync_all(self):
for file in self.files.values():
file.sync()
def get_lines(self):
lines = []
lines.append('"{}"'.format(self.game_id))
lines.append('{')
lines.append("\t\"ChangeNumber\"\t\t\"{}\"".format(self.change_num))
for file in self.files.values():
lines.extend(file.get_lines())
lines.append('}')
return lines
def write_to(self, filename):
with open(filename, 'w') as df:
for line in self.get_lines():
print(line, file=df)
# Steam sets execute bits, so we will too.
statinfo = os.stat(filename)
os.chmod(filename, statinfo.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def overwrite(self):
self.write_to(self.cache_filename)
|
import { Directive, Input } from '@angular/core';
import { Gate } from '../../../../../../common/cdk/component/gate';
import { SchemaRowStyleArchive } from '../../../../../../schema/core/api/styling/schema.row-style.archive';
export class StructureRowStyleGate extends Gate {
constructor(schemaRowStyleArchive) {
super();
this.schemaRowStyleArchive = schemaRowStyleArchive;
}
ngOnChanges(changes) {
if (this.isDefined('rowStyle', changes)) {
this.schemaRowStyleArchive.next(this.rowStyle);
}
}
}
StructureRowStyleGate.decorators = [
{ type: Directive, args: [{
selector: 'gui-structure[rowStyle]'
},] }
];
StructureRowStyleGate.ctorParameters = () => [
{ type: SchemaRowStyleArchive }
];
StructureRowStyleGate.propDecorators = {
rowStyle: [{ type: Input }]
};
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoic3RydWN0dXJlLXJvdy1zdHlsZS5nYXRlLmpzIiwic291cmNlUm9vdCI6IiIsInNvdXJjZXMiOlsiLi4vLi4vLi4vLi4vLi4vLi4vLi4vLi4vYnVpbGQtY2xpL3Byb2plY3RzL25neC1ncmlkL3NyYy9zdHJ1Y3R1cmUvZ3JpZC9mZWF0dXJlL2dhdGUvcm93L3N0eWxlL3N0cnVjdHVyZS1yb3ctc3R5bGUuZ2F0ZS50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiQUFBQSxPQUFPLEVBQUUsU0FBUyxFQUFFLEtBQUssRUFBYSxNQUFNLGVBQWUsQ0FBQztBQUM1RCxPQUFPLEVBQUUsSUFBSSxFQUFFLE1BQU0sNkNBQTZDLENBQUM7QUFHbkUsT0FBTyxFQUFFLHFCQUFxQixFQUFFLE1BQU0sb0VBQW9FLENBQUM7QUFNM0csTUFBTSxPQUFPLHFCQUFzQixTQUFRLElBQUk7SUFLOUMsWUFBNkIscUJBQTRDO1FBQ3hFLEtBQUssRUFBRSxDQUFDO1FBRG9CLDBCQUFxQixHQUFyQixxQkFBcUIsQ0FBdUI7SUFFekUsQ0FBQztJQUVELFdBQVcsQ0FBQyxPQUF5QztRQUVwRCxJQUFJLElBQUksQ0FBQyxTQUFTLENBQUMsVUFBVSxFQUFFLE9BQU8sQ0FBQyxFQUFFO1lBQ3hDLElBQUksQ0FBQyxxQkFBcUIsQ0FBQyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQTBCLENBQUMsQ0FBQztTQUNqRTtJQUNGLENBQUM7OztZQWpCRCxTQUFTLFNBQUM7Z0JBQ1YsUUFBUSxFQUFFLHlCQUF5QjthQUNuQzs7O1lBTFEscUJBQXFCOzs7dUJBUTVCLEtBQUsiLCJzb3VyY2VzQ29udGVudCI6WyJpbXBvcnQgeyBEaXJlY3RpdmUsIElucHV0LCBPbkNoYW5nZXMgfSBmcm9tICdAYW5ndWxhci9jb3JlJztcbmltcG9ydCB7IEdhdGUgfSBmcm9tICcuLi8uLi8uLi8uLi8uLi8uLi9jb21tb24vY2RrL2NvbXBvbmVudC9nYXRlJztcbmltcG9ydCB7IEd1aVJvd1N0eWxlIH0gZnJvbSAnLi4vLi4vLi4vLi4vLi4vLi4vZ3VpL2dyaWQvY29yZS9hcGkvZ3VpLmdyaWQucHVibGljLWFwaSc7XG5pbXBvcnQgeyBTY2hlbWFSb3dTdHlsZSB9IGZyb20gJy4uLy4uLy4uLy4uLy4uLy4uL3NjaGVtYS9jb3JlL2FwaS9zdHlsaW5nL3NjaGVtYS5yb3ctc3R5bGUnO1xuaW1wb3J0IHsgU2NoZW1hUm93U3R5bGVBcmNoaXZlIH0gZnJvbSAnLi4vLi4vLi4vLi4vLi4vLi4vc2NoZW1hL2NvcmUvYXBpL3N0eWxpbmcvc2NoZW1hLnJvdy1zdHlsZS5hcmNoaXZlJztcbmltcG9ydCB7IE5nQ2hhbmdlcyB9IGZyb20gJy4uLy4uLy4uLy4uLy4uLy4uL2NvbW1vbi9jZGsvY29tcG9uZW50L25nLWNoYW5nZXMnO1xuXG5ARGlyZWN0aXZlKHtcblx0c2VsZWN0b3I6ICdndWktc3RydWN0dXJlW3Jvd1N0eWxlXSdcbn0pXG5leHBvcnQgY2xhc3MgU3RydWN0dXJlUm93U3R5bGVHYXRlIGV4dGVuZHMgR2F0ZSBpbXBsZW1lbnRzIE9uQ2hhbmdlcyB7XG5cblx0QElucHV0KClcblx0cm93U3R5bGU6IEd1aVJvd1N0eWxlO1xuXG5cdGNvbnN0cnVjdG9yKHByaXZhdGUgcmVhZG9ubHkgc2NoZW1hUm93U3R5bGVBcmNoaXZlOiBTY2hlbWFSb3dTdHlsZUFyY2hpdmUpIHtcblx0XHRzdXBlcigpO1xuXHR9XG5cblx0bmdPbkNoYW5nZXMoY2hhbmdlczogTmdDaGFuZ2VzPFN0cnVjdHVyZVJvd1N0eWxlR2F0ZT4pIHtcblxuXHRcdGlmICh0aGlzLmlzRGVmaW5lZCgncm93U3R5bGUnLCBjaGFuZ2VzKSkge1xuXHRcdFx0dGhpcy5zY2hlbWFSb3dTdHlsZUFyY2hpdmUubmV4dCh0aGlzLnJvd1N0eWxlIGFzIFNjaGVtYVJvd1N0eWxlKTtcblx0XHR9XG5cdH1cblxufVxuIl19
|
# Copyright (C) 2017 Tiancheng Zhao, Carnegie Mellon University
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
import torch
import torch.nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
def get_bleu_stats(ref, hyps):
scores = []
for hyp in hyps:
try:
scores.append(sentence_bleu([ref], hyp, smoothing_function=SmoothingFunction().method7,
weights=[1./3, 1./3,1./3]))
except:
scores.append(0.0)
return np.max(scores), np.mean(scores)
def gaussian_kld(recog_mu, recog_logvar, prior_mu, prior_logvar):
kld = -0.5 * torch.sum(1 + (recog_logvar - prior_logvar)
- torch.div(torch.pow(prior_mu - recog_mu, 2), torch.exp(prior_logvar))
- torch.div(torch.exp(recog_logvar), torch.exp(prior_logvar)), 1)
return kld
def norm_log_liklihood(x, mu, logvar):
return -0.5*torch.sum(logvar + np.log(2*np.pi) + torch.div(torch.pow((x-mu), 2), torch.exp(logvar)), 1)
def sample_gaussian(mu, logvar):
epsilon = logvar.new_empty(logvar.size()).normal_()
std = torch.exp(0.5 * logvar)
z= mu + std * epsilon
return z
def get_bow(embedding, avg=False):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
"""
embedding_size = embedding.size(2)
if avg:
return embedding.mean(1), embedding_size
else:
return embedding.sum(1), embedding_size
def dynamic_rnn(cell, inputs, sequence_length, init_state=None, output_fn=None):
sorted_lens, len_ix = sequence_length.sort(0, descending=True)
# Used for later reorder
inv_ix = len_ix.clone()
inv_ix[len_ix] = torch.arange(0, len(len_ix)).type_as(inv_ix)
# The number of inputs that have lengths > 0
valid_num = torch.sign(sorted_lens).long().sum().item()
zero_num = inputs.size(0) - valid_num
# print('zero_num:', zero_num)
sorted_inputs = inputs[len_ix].contiguous()
if init_state is not None:
sorted_init_state = init_state[:, len_ix].contiguous()
packed_inputs = pack_padded_sequence(sorted_inputs[:valid_num], list(sorted_lens[:valid_num]), batch_first=True)
if init_state is not None:
outputs, state = cell(packed_inputs, sorted_init_state[:, :valid_num])
else:
outputs, state = cell(packed_inputs)
# Reshape *final* output to (batch_size, hidden_size)
outputs, _ = pad_packed_sequence(outputs, batch_first=True)
# Add back the zero lengths
if zero_num > 0:
outputs = torch.cat([outputs, outputs.new_zeros(zero_num, outputs.size(1), outputs.size(2))], 0)
if init_state is not None:
state = torch.cat([state, sorted_init_state[:, valid_num:]], 1)
else:
state = torch.cat([state, state.new_zeros(state.size(0), zero_num, state.size(2))], 1)
# Reorder to the original order
outputs = outputs[inv_ix].contiguous()
state = state[:, inv_ix].contiguous()
# compensate the last last layer dropout, necessary????????? need to check!!!!!!!!
state = F.dropout(state, cell.dropout, cell.training)
outputs = F.dropout(outputs, cell.dropout, cell.training)
if output_fn is not None:
outputs = output_fn(outputs)
return outputs, state
def get_rnn_encode(embedding, cell, length_mask=None, scope=None, reuse=None):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
The padding should have zero
"""
with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse):
if length_mask is None:
length_mask = torch.sum(torch.sign(torch.max(torch.abs(embedding), 2)[0]), 1)
length_mask = length_mask.long()
_, encoded_input = dynamic_rnn(cell, embedding, sequence_length=length_mask)
# get only the last layer
encoded_input = encoded_input[-1]
return encoded_input, rnn.hidden_size
def get_bi_rnn_encode(embedding, cell, length_mask=None, scope=None, reuse=None):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
The padding should have zero
"""
with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse):
if length_mask is None:
length_mask = torch.sum(torch.sign(torch.max(torch.abs(embedding), 2)[0]), 1)
length_mask = length_mask.long()
_, encoded_input = dynamic_rnn(cell, embedding, sequence_length=length_mask)
# get only the last layer
encoded_input = torch.cat([encoded_input[-2], encoded_input[-1]], 1)
return encoded_input, cell.hidden_size * 2
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:12261")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:12261")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Deprecated Python API for GraphExecutor."""
import warnings
from . import graph_executor
def create(*args, **kwargs):
warnings.warn(
"This function has been moved to tvm.contrib.graph_executor and will be removed "
"in the next TVM release"
)
return graph_executor.create(*args, **kwargs)
|
#!/usr/bin/env python
import os
from os.path import basename
import click
import logging
from datetime import datetime
import gzip
from typing import List
import pyopenms as oms
from mzqc import MZQCFile as qc
from qccalculator import utils, basicqc, idqc, idqcmq, enzymeqc, masstraceqc
rqs: List[qc.RunQuality] = list()
sqs: List[qc.SetQuality] = list()
out = str()
zp = False
# @click.pass_context
def finale():
logging.warn("Calculated metrics from {} different input peak files".format(len(rqs)))
logging.warn("Attempting to write results to {}{}".format(out, ".gz" if zp else ""))
if any(rqs) or any(sqs) or out:
if zp:
with gzip.GzipFile(out + '.gz', 'w') as fh:
fh.write(qc.JsonSerialisable.ToJson(mzqc_assembly(rqs, sqs, out), readability=1).encode('utf-8'))
else:
with open(out, 'w') as fh:
fh.write(qc.JsonSerialisable.ToJson(mzqc_assembly(rqs, sqs, out), readability=0))
logging.warn("Done. Thank you for choosing QCCalculator!")
def mzqc_assembly(rqs, sqs, out):
# TODO check all the metrics to see which ontologies were used
cv_qc = qc.ControlledVocabulary(ref="QC",
name="Proteomics Standards Initiative Quality Control Ontology",
version="0.1.0",
uri="https://github.com/HUPO-PSI/qcML-development/blob/master/cv/v0_1_0/qc-cv.obo")
cv_ms = qc.ControlledVocabulary(ref="MS",
name="Proteomics Standards Initiative Mass Spectrometry Ontology",
version="4.1.7", uri="https://github.com/HUPO-PSI/psi-ms-CV/blob/master/psi-ms.obo")
return qc.MzQcFile(version="0.1.0",
creationDate=datetime.now().isoformat(),
runQualities=rqs,
setQualities=sqs,
controlledVocabularies=[cv_qc, cv_ms])
@click.group(chain=True)
@click.option('--output', required=True, type=click.Path(), default="/tmp/out.mzQC",
help="The path and name of the desired output file.")
@click.option('--zip/--no-zip', default=False,
help="Apply gzip to the output. Appends '.gz' to the target filename and pretty formatting.")
def start(output, zip):
"""Calculate quality metrics for given files.
Multiple files input is possible (each after a "full/basic" COMMAND).
All metrics of one QCCalculator execution will be stored in on output file.
If you need separate mzQC files, please execute separately.
For more information on the different COMMAND types, try QCCalculator COMMAND --help"""
logging.warn("Recieved output destination {}".format(output))
global out
out = output
if zip:
global zp
zp = True
@start.command()
@click.argument('filename', type=click.Path(exists=True))
@click.option('--mzid', type=click.Path(exists=True),
help="If you have a corresponding mzid file you need to pass it, too. Mutually exclusive to idxml.")
@click.option('--idxml', type=click.Path(exists=True),
help="If you have a corresponding idxml file you need to pass it, too. Mutually exclusive to mzid.")
@click.pass_context
def full(ctx, filename, mzid=None, idxml=None):
"""Calculate all possible metrics for these files. These data sources will be included in set metrics."""
exp = oms.MSExperiment()
oms.MzMLFile().load(click.format_filename(filename), exp)
rq = basicqc.getBasicQuality(exp)
if idxml and mzid:
logging.warn("Sorry, you can only give one id file. Please choose one.")
click.echo(ctx.get_help())
return
elif not idxml and not mzid:
logging.warn("Sorry, you must give one id file in this mode.")
click.echo(ctx.get_help())
return
ms2num = 0
for x in rq.qualityMetrics:
if x.name == "Number of MS2 spectra":
ms2num = x.value
if ms2num < 1:
logging.warn("We seem to have found no MS2 spectra which is unlikely to be true since you have also given some identifications. \
We continue with symbolic value of 1 for the number of MS2 spectra, \
however this means some metrics will invariably be incorrect!\
Please make sure, we have the right inputs.")
ms2num = 1
pros = list()
peps = list()
if mzid:
oms_id = oms.MzIdentMLFile()
idf = mzid
if idxml:
oms_id = oms.IdXMLFile()
idf = idxml
if idf:
oms_id.load(click.format_filename(idf), pros, peps)
rq.qualityMetrics.extend(idqc.getIDQuality(exp, pros, peps, ms2num))
rqs.append(rq)
finale()
@start.command()
@click.argument('filename', type=click.Path(exists=True))
@click.option('--zipurl', type=click.Path(exists=True), required=True,
help="The URL to a max quant output zip file (must contain evidence.txt and parameters.txt).")
@click.option('--rawname', type=str, default="",
help="The raw file name of interest (as in evidence.txt) without path or extension.")
@click.pass_context
def maxq(ctx, filename, zipurl, rawname):
"""Calculate all possible metrics for these files. These data sources will be included in set metrics."""
exp = oms.MSExperiment()
oms.MzMLFile().load(click.format_filename(filename), exp)
rq = basicqc.getBasicQuality(exp)
ms2num = 0
for x in rq.qualityMetrics:
if x.name == "Number of MS2 spectra":
ms2num = x.value
if ms2num < 1:
logging.warn("We seem to have found no MS2 spectra which is unlikely to be true since you have also given some identifications. \
We continue with symbolic value of 1 for the number of MS2 spectra, \
however this means some metrics will invariably be incorrect!\
Please make sure, we have the right inputs.")
ms2num = 1
try:
mq, params = idqcmq.loadMQZippedResults(zipurl)
if not rawname:
logging.warning("Infering rawname from mzML")
rawname = basename(
exp.getExperimentalSettings().getSourceFiles()[0].getNameOfFile().decode()) # TODO split extensions
rq.qualityMetrics.extend(idqcmq.getMQMetrics(rawname, params, mq, ms2num))
rqs.append(rq)
except:
logging.warn("Retrieving any results from the URL failed.")
finale()
@start.command()
@click.argument('filename', type=click.Path(exists=True))
def basic(filename):
"""Calculate the basic metrics available from virtually every mzML file."""
exp = oms.MSExperiment()
oms.MzMLFile().load(click.format_filename(filename), exp)
rq = basicqc.getBasicQuality(exp)
rqs.append(rq)
finale()
if __name__ == "__main__":
start()
# QCCalculator --output cli-test.mzqc full --mzid tests/CPTAC_CompRef_00_iTRAQ_01_2Feb12_Cougar_11-10-09.mzid tests/CPTAC_CompRef_00_iTRAQ_01_2Feb12_Cougar_11-10-09.trfr.t3.mzML
|
"""
$url rtp.pt/play
$type live, vod
$region Portugal
"""
import re
from base64 import b64decode
from urllib.parse import unquote
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream.hls import HLSStream
@pluginmatcher(re.compile(
r"https?://www\.rtp\.pt/play/"
))
class RTPPlay(Plugin):
_m3u8_re = re.compile(r"""
hls\s*:\s*(?:
(["'])(?P<string>[^"']*)\1
|
decodeURIComponent\s*\((?P<obfuscated>\[.*?])\.join\(
|
atob\s*\(\s*decodeURIComponent\s*\((?P<obfuscated_b64>\[.*?])\.join\(
)
""", re.VERBOSE)
_schema_hls = validate.Schema(
validate.transform(lambda text: next(reversed(list(RTPPlay._m3u8_re.finditer(text))), None)),
validate.any(
None,
validate.all(
validate.get("string"),
str,
validate.any(
validate.length(0),
validate.url()
)
),
validate.all(
validate.get("obfuscated"),
str,
validate.parse_json(),
validate.transform(lambda arr: unquote("".join(arr))),
validate.url()
),
validate.all(
validate.get("obfuscated_b64"),
str,
validate.parse_json(),
validate.transform(lambda arr: unquote("".join(arr))),
validate.transform(lambda b64: b64decode(b64).decode("utf-8")),
validate.url()
)
)
)
def _get_streams(self):
self.session.http.headers.update({"User-Agent": useragents.CHROME,
"Referer": self.url})
hls_url = self.session.http.get(self.url, schema=self._schema_hls)
if hls_url:
return HLSStream.parse_variant_playlist(self.session, hls_url)
__plugin__ = RTPPlay
|
//Handles Login Submission
const handleLogin = (e) => {
e.preventDefault();
$("quizMessage").animate({width:'hide'},350);
if($("#user").val() == '' || $("#pass").val() == '') {
handleError("Username or password is empty");
return false;
}
console.log($("input[name=_csrf]").val());
sendAjax('POST', $("#loginForm").attr("action"), $("#loginForm").serialize(), redirect);
return false;
};
//Handles Signup Submission
const handleSignup = (e) => {
e.preventDefault();
$("#quizMessage").animate({width:'hide'},350);
if($("#user").val() == '' || $("#pass").val() == '' || $("#pass2").val() == ''){
handleError("All fields are required");
return false;
}
if($("#pass").val() !== $("#pass2").val()) {
handleError("Passwords do not match");
return false;
}
sendAjax('POST', $("#signupForm").attr("action"), $("#signupForm").serialize(), redirect);
return false;
};
//Handles Password Change Submission
const handleChangePass = (e) => {
e.preventDefault();
$("#quizMessage").animate({width:'hide'},350);
if($("#user").val() == '' || $("#pass").val() == '' || $("#pass2").val() == '' || $("#oldpass").val() == ''){
handleError("All fields are required");
return false;
}
if($("#pass").val() !== $("#pass2").val()) {
handleError("Passwords do not match");
return false;
}
sendAjax('POST', $("#changePassForm").attr("action"), $("#changePassForm").serialize(), redirect);
return false;
};
//Renders Login Window
const LoginWindow = (props) => {
return (
<form id="loginForm"
name=""
onSubmit={handleLogin}
action="/login"
method="POST"
className="mainForm"
>
<label htmlFor="username">Username: </label>
<input id="user" type="text" name="username" placeholder="username"/>
<label htmlFor="pass">Password: </label>
<input id="pass" type="password" name="pass" placeholder="password"/>
<input type="hidden" name="_csrf" value={props.csrf}/>
<input className="formSubmit" type="submit" value="Sign in" />
</form>
);
};
//Renders Signup Window
const SignupWindow = (props) => {
return (
<form id="signupForm"
name="signupForm"
onSubmit={handleSignup}
action="/signup"
method="POST"
className="mainForm"
>
<label htmlFor="username">Username: </label>
<input id="user" type="text" name="username" placeholder="username"/><br />
<label htmlFor="pass">Password: </label>
<input id="pass" type="password" name="pass" placeholder="password"/><br />
<label htmlFor="pass2">Password: </label>
<input id="pass2" type="password" name="pass2" placeholder="retype password"/><br />
<input type="hidden" name="_csrf" value={props.csrf}/>
<input className="formSubmit" type="submit" value="Sign Up" /><br />
</form>
);
};
//Renders Password Change Window
const ChangePassWindow = (props) => {
return (
<form id="changePassForm"
name="changePassForm"
onSubmit={handleChangePass}
action="/changepass"
method="POST"
className="mainForm"
>
<label htmlFor="username">Username: </label>
<input id="user" type="text" name="username" placeholder="username"/><br />
<label htmlFor="pass">Password: </label>
<input id="pass" type="password" name="pass" placeholder="password"/><br />
<label htmlFor="pass2">Password: </label>
<input id="pass2" type="password" name="pass2" placeholder="retype password"/><br />
<label htmlFor="oldpass">Old Password: </label>
<input id="oldpass" type="password" name="oldPass" placeholder="old password"/><br />
<input type="hidden" name="_csrf" value={props.csrf}/>
<input className="formSubmit" type="submit" value="Sign Up" />
</form>
);
};
//Renders About Window
const AboutWindow = (props) => {
return (
<div className="mainForm">This is a quiz game where you are presented with a song and must guess its associated artist.</div>
);
};
//Sets Login Window up to be rendered
const createLoginWindow = (csrf) => {
ReactDOM.render(
<LoginWindow csrf={csrf} />,
document.querySelector("#content")
);
};
//Sets Signup Window up to be rendered
const createSignupWindow = (csrf) => {
ReactDOM.render(
<SignupWindow csrf={csrf} />,
document.querySelector("#content")
);
};
//Sets Password Change Window up to be rendered
const createChangePassWindow = (csrf) => {
ReactDOM.render(
<ChangePassWindow csrf={csrf} />,
document.querySelector("#content")
);
};
//Sets About Window up to be rendered
const createAboutWindow = (csrf) => {
ReactDOM.render(
<AboutWindow csrf={csrf} />,
document.querySelector("#content")
);
};
//Startup function
const setup = (csrf) => {
const aboutButton = document.querySelector("#aboutButton");
const loginButton = document.querySelector("#loginButton");
const signupButton = document.querySelector("#signupButton");
const changePassButton = document.querySelector("#changePassButton");
aboutButton.addEventListener("click", (e) => {
e.preventDefault();
createAboutWindow(csrf);
return false;
});
signupButton.addEventListener("click", (e) => {
e.preventDefault();
createSignupWindow(csrf);
return false;
});
changePassButton.addEventListener("click", (e) => {
e.preventDefault();
createChangePassWindow(csrf);
return false;
});
loginButton.addEventListener("click", (e) => {
e.preventDefault();
createLoginWindow(csrf);
return false;
});
createLoginWindow(csrf);
};
const getToken = () => {
sendAjax('GET', '/getToken', null, (result) => {
setup(result.csrfToken);
});
};
$(document).ready(function() {
getToken();
});
|
/*
var vid = new Whammy.Video();
vid.add(canvas or data url)
vid.compile()
*/
window.Whammy = (function(){
// in this case, frames has a very specific meaning, which will be
// detailed once i finish writing the code
function toWebM(frames, outputAsArray){
var info = checkFrames(frames);
//max duration by cluster in milliseconds
var CLUSTER_MAX_DURATION = 30000;
var EBML = [
{
"id": 0x1a45dfa3, // EBML
"data": [
{
"data": 1,
"id": 0x4286 // EBMLVersion
},
{
"data": 1,
"id": 0x42f7 // EBMLReadVersion
},
{
"data": 4,
"id": 0x42f2 // EBMLMaxIDLength
},
{
"data": 8,
"id": 0x42f3 // EBMLMaxSizeLength
},
{
"data": "webm",
"id": 0x4282 // DocType
},
{
"data": 2,
"id": 0x4287 // DocTypeVersion
},
{
"data": 2,
"id": 0x4285 // DocTypeReadVersion
}
]
},
{
"id": 0x18538067, // Segment
"data": [
{
"id": 0x1549a966, // Info
"data": [
{
"data": 1e6, //do things in millisecs (num of nanosecs for duration scale)
"id": 0x2ad7b1 // TimecodeScale
},
{
"data": "whammy",
"id": 0x4d80 // MuxingApp
},
{
"data": "whammy",
"id": 0x5741 // WritingApp
},
{
"data": doubleToString(info.duration),
"id": 0x4489 // Duration
}
]
},
{
"id": 0x1654ae6b, // Tracks
"data": [
{
"id": 0xae, // TrackEntry
"data": [
{
"data": 1,
"id": 0xd7 // TrackNumber
},
{
"data": 1,
"id": 0x63c5 // TrackUID
},
{
"data": 0,
"id": 0x9c // FlagLacing
},
{
"data": "und",
"id": 0x22b59c // Language
},
{
"data": "V_VP8",
"id": 0x86 // CodecID
},
{
"data": "VP8",
"id": 0x258688 // CodecName
},
{
"data": 1,
"id": 0x83 // TrackType
},
{
"id": 0xe0, // Video
"data": [
{
"data": info.width,
"id": 0xb0 // PixelWidth
},
{
"data": info.height,
"id": 0xba // PixelHeight
}
]
}
]
}
]
},
//cluster insertion point
]
}
];
//Generate clusters (max duration)
var frameNumber = 0;
var clusterTimecode = 0;
while(frameNumber < frames.length){
var clusterFrames = [];
var clusterDuration = 0;
do {
clusterFrames.push(frames[frameNumber]);
clusterDuration += frames[frameNumber].duration;
frameNumber++;
}while(frameNumber < frames.length && clusterDuration < CLUSTER_MAX_DURATION);
var clusterCounter = 0;
var cluster = {
"id": 0x1f43b675, // Cluster
"data": [
{
"data": Math.round(clusterTimecode),
"id": 0xe7 // Timecode
}
].concat(clusterFrames.map(function(webp){
var block = makeSimpleBlock({
discardable: 0,
frame: webp.data.slice(4),
invisible: 0,
keyframe: 1,
lacing: 0,
trackNum: 1,
timecode: Math.round(clusterCounter)
});
clusterCounter += webp.duration;
return {
data: block,
id: 0xa3
};
}))
}
//Add cluster to segment
EBML[1].data.push(cluster);
clusterTimecode += clusterDuration;
}
return generateEBML(EBML, outputAsArray)
}
// sums the lengths of all the frames and gets the duration, woo
function checkFrames(frames){
var width = frames[0].width,
height = frames[0].height,
duration = frames[0].duration;
for(var i = 1; i < frames.length; i++){
if(frames[i].width != width) throw "Frame " + (i + 1) + " has a different width";
if(frames[i].height != height) throw "Frame " + (i + 1) + " has a different height";
if(frames[i].duration < 0 || frames[i].duration > 0x7fff) throw "Frame " + (i + 1) + " has a weird duration (must be between 0 and 32767)";
duration += frames[i].duration;
}
return {
duration: duration,
width: width,
height: height
};
}
function numToBuffer(num){
var parts = [];
while(num > 0){
parts.push(num & 0xff)
num = num >> 8
}
return new Uint8Array(parts.reverse());
}
function strToBuffer(str){
// return new Blob([str]);
var arr = new Uint8Array(str.length);
for(var i = 0; i < str.length; i++){
arr[i] = str.charCodeAt(i)
}
return arr;
// this is slower
// return new Uint8Array(str.split('').map(function(e){
// return e.charCodeAt(0)
// }))
}
//sorry this is ugly, and sort of hard to understand exactly why this was done
// at all really, but the reason is that there's some code below that i dont really
// feel like understanding, and this is easier than using my brain.
function bitsToBuffer(bits){
var data = [];
var pad = (bits.length % 8) ? (new Array(1 + 8 - (bits.length % 8))).join('0') : '';
bits = pad + bits;
for(var i = 0; i < bits.length; i+= 8){
data.push(parseInt(bits.substr(i,8),2))
}
return new Uint8Array(data);
}
function generateEBML(json, outputAsArray){
var ebml = [];
for(var i = 0; i < json.length; i++){
var data = json[i].data;
if(typeof data == 'object') data = generateEBML(data, outputAsArray);
if(typeof data == 'number') data = bitsToBuffer(data.toString(2));
if(typeof data == 'string') data = strToBuffer(data);
if(data.length){
var z = z;
}
var len = data.size || data.byteLength || data.length;
var zeroes = Math.ceil(Math.ceil(Math.log(len)/Math.log(2))/8);
var size_str = len.toString(2);
var padded = (new Array((zeroes * 7 + 7 + 1) - size_str.length)).join('0') + size_str;
var size = (new Array(zeroes)).join('0') + '1' + padded;
//i actually dont quite understand what went on up there, so I'm not really
//going to fix this, i'm probably just going to write some hacky thing which
//converts that string into a buffer-esque thing
ebml.push(numToBuffer(json[i].id));
ebml.push(bitsToBuffer(size));
ebml.push(data)
}
//output as blob or byteArray
if(outputAsArray){
//convert ebml to an array
var buffer = toFlatArray(ebml)
return new Uint8Array(buffer);
}else{
return new Blob(ebml, {type: "video/webm"});
}
}
function toFlatArray(arr, outBuffer){
if(outBuffer == null){
outBuffer = [];
}
for(var i = 0; i < arr.length; i++){
if(typeof arr[i] == 'object'){
//an array
toFlatArray(arr[i], outBuffer)
}else{
//a simple element
outBuffer.push(arr[i]);
}
}
return outBuffer;
}
//OKAY, so the following two functions are the string-based old stuff, the reason they're
//still sort of in here, is that they're actually faster than the new blob stuff because
//getAsFile isn't widely implemented, or at least, it doesn't work in chrome, which is the
// only browser which supports get as webp
//Converting between a string of 0010101001's and binary back and forth is probably inefficient
//TODO: get rid of this function
function toBinStr_old(bits){
var data = '';
var pad = (bits.length % 8) ? (new Array(1 + 8 - (bits.length % 8))).join('0') : '';
bits = pad + bits;
for(var i = 0; i < bits.length; i+= 8){
data += String.fromCharCode(parseInt(bits.substr(i,8),2))
}
return data;
}
function generateEBML_old(json){
var ebml = '';
for(var i = 0; i < json.length; i++){
var data = json[i].data;
if(typeof data == 'object') data = generateEBML_old(data);
if(typeof data == 'number') data = toBinStr_old(data.toString(2));
var len = data.length;
var zeroes = Math.ceil(Math.ceil(Math.log(len)/Math.log(2))/8);
var size_str = len.toString(2);
var padded = (new Array((zeroes * 7 + 7 + 1) - size_str.length)).join('0') + size_str;
var size = (new Array(zeroes)).join('0') + '1' + padded;
ebml += toBinStr_old(json[i].id.toString(2)) + toBinStr_old(size) + data;
}
return ebml;
}
//woot, a function that's actually written for this project!
//this parses some json markup and makes it into that binary magic
//which can then get shoved into the matroska comtainer (peaceably)
function makeSimpleBlock(data){
var flags = 0;
if (data.keyframe) flags |= 128;
if (data.invisible) flags |= 8;
if (data.lacing) flags |= (data.lacing << 1);
if (data.discardable) flags |= 1;
if (data.trackNum > 127) {
throw "TrackNumber > 127 not supported";
}
var out = [data.trackNum | 0x80, data.timecode >> 8, data.timecode & 0xff, flags].map(function(e){
return String.fromCharCode(e)
}).join('') + data.frame;
return out;
}
// here's something else taken verbatim from weppy, awesome rite?
function parseWebP(riff){
var VP8 = riff.RIFF[0].WEBP[0];
var frame_start = VP8.indexOf('\x9d\x01\x2a'); //A VP8 keyframe starts with the 0x9d012a header
for(var i = 0, c = []; i < 4; i++) c[i] = VP8.charCodeAt(frame_start + 3 + i);
var width, horizontal_scale, height, vertical_scale, tmp;
//the code below is literally copied verbatim from the bitstream spec
tmp = (c[1] << 8) | c[0];
width = tmp & 0x3FFF;
horizontal_scale = tmp >> 14;
tmp = (c[3] << 8) | c[2];
height = tmp & 0x3FFF;
vertical_scale = tmp >> 14;
return {
width: width,
height: height,
data: VP8,
riff: riff
}
}
// i think i'm going off on a riff by pretending this is some known
// idiom which i'm making a casual and brilliant pun about, but since
// i can't find anything on google which conforms to this idiomatic
// usage, I'm assuming this is just a consequence of some psychotic
// break which makes me make up puns. well, enough riff-raff (aha a
// rescue of sorts), this function was ripped wholesale from weppy
function parseRIFF(string){
var offset = 0;
var chunks = {};
while (offset < string.length) {
var id = string.substr(offset, 4);
var len = parseInt(string.substr(offset + 4, 4).split('').map(function(i){
var unpadded = i.charCodeAt(0).toString(2);
return (new Array(8 - unpadded.length + 1)).join('0') + unpadded
}).join(''),2);
var data = string.substr(offset + 4 + 4, len);
offset += 4 + 4 + len;
chunks[id] = chunks[id] || [];
if (id == 'RIFF' || id == 'LIST') {
chunks[id].push(parseRIFF(data));
} else {
chunks[id].push(data);
}
}
return chunks;
}
// here's a little utility function that acts as a utility for other functions
// basically, the only purpose is for encoding "Duration", which is encoded as
// a double (considerably more difficult to encode than an integer)
function doubleToString(num){
return [].slice.call(
new Uint8Array(
(
new Float64Array([num]) //create a float64 array
).buffer) //extract the array buffer
, 0) // convert the Uint8Array into a regular array
.map(function(e){ //since it's a regular array, we can now use map
return String.fromCharCode(e) // encode all the bytes individually
})
.reverse() //correct the byte endianness (assume it's little endian for now)
.join('') // join the bytes in holy matrimony as a string
}
function WhammyVideo(speed, quality){ // a more abstract-ish API
this.frames = [];
this.duration = 1000 / speed;
this.quality = quality || 0.8;
}
WhammyVideo.prototype.add = function(frame, duration){
if(typeof duration != 'undefined' && this.duration) throw "you can't pass a duration if the fps is set";
if(typeof duration == 'undefined' && !this.duration) throw "if you don't have the fps set, you ned to have durations here.";
if(frame.canvas){ //CanvasRenderingContext2D
frame = frame.canvas;
}
if(frame.toDataURL){
frame = frame.toDataURL('image/webp', this.quality)
}else if(typeof frame != "string"){
throw "frame must be a a HTMLCanvasElement, a CanvasRenderingContext2D or a DataURI formatted string"
}
if (!(/^data:image\/webp;base64,/ig).test(frame)) {
throw "Input must be formatted properly as a base64 encoded DataURI of type image/webp";
}
this.frames.push({
image: frame,
duration: duration || this.duration
})
}
WhammyVideo.prototype.compile = function(outputAsArray){
return new toWebM(this.frames.map(function(frame){
var webp = parseWebP(parseRIFF(atob(frame.image.slice(23))));
webp.duration = frame.duration;
return webp;
}), outputAsArray)
}
return {
Video: WhammyVideo,
fromImageArray: function(images, fps, outputAsArray){
return toWebM(images.map(function(image){
var webp = parseWebP(parseRIFF(atob(image.slice(23))))
webp.duration = 1000 / fps;
return webp;
}), outputAsArray)
},
toWebM: toWebM
// expose methods of madness
}
})()
|
import FWCore.ParameterSet.Config as cms
from RecoTauTag.RecoTau.TauDiscriminatorTools import requireLeadTrackCalo
caloRecoTauDiscriminationAgainstMuon = cms.EDProducer("CaloRecoTauDiscriminationAgainstMuon",
# tau collection to discriminate
CaloTauProducer = cms.InputTag('caloRecoTauProducer'),
Prediscriminants = requireLeadTrackCalo,
# algorithm parameters
caloCompCoefficient = cms.double(0.5), ## user definde 2D Cut. Reject tau if calo * caloCompCoeff + segm * segmCompCoeff > cut
segmCompCoefficient = cms.double(0.5),
muonCompCut = cms.double(0.0),
discriminatorOption = cms.string('noSegMatch'), ## available options are; noSegMatch, twoDCut, merePresence, combined
muonSource = cms.InputTag("muons"),
dRmatch = cms.double(0.5)
)
|
def add_native_methods(clazz):
def addItems__java_lang_String____int__int__(a0, a1, a2, a3):
raise NotImplementedError()
def delItems__int__int__(a0, a1, a2):
raise NotImplementedError()
def select__int__(a0, a1):
raise NotImplementedError()
def deselect__int__(a0, a1):
raise NotImplementedError()
def makeVisible__int__(a0, a1):
raise NotImplementedError()
def setMultipleSelections__boolean__(a0, a1):
raise NotImplementedError()
def getMaxWidth____(a0):
raise NotImplementedError()
def create__sun_awt_windows_WComponentPeer__(a0, a1):
raise NotImplementedError()
def isSelected__int__(a0, a1):
raise NotImplementedError()
clazz.addItems__java_lang_String____int__int__ = addItems__java_lang_String____int__int__
clazz.delItems__int__int__ = delItems__int__int__
clazz.select__int__ = select__int__
clazz.deselect__int__ = deselect__int__
clazz.makeVisible__int__ = makeVisible__int__
clazz.setMultipleSelections__boolean__ = setMultipleSelections__boolean__
clazz.getMaxWidth____ = getMaxWidth____
clazz.create__sun_awt_windows_WComponentPeer__ = create__sun_awt_windows_WComponentPeer__
clazz.isSelected__int__ = isSelected__int__
|
import React from 'react';
import { NavLink } from 'react-router-dom';
import "../css/home.css";
const HomePage = (props) => {
return (
<div className="hero">
<section className="container">
<div className="overlay"></div>
<div className="hero-content">
<div className="content-wrapper">
<h2>myColors</h2>
<h5>Developer's personal studio</h5>
<NavLink to="/register" className="btn btn-h mt-3">join now</NavLink>
</div>
</div>
</section>
</div>
);
}
export default HomePage;
|
import React, {useCallback, useRef, useState} from 'react';
import {useModelState} from "../misc/custom-hooks";
import {Alert, Button, ControlLabel, Form, FormControl, FormGroup, Icon, Modal, Schema} from "rsuite";
import firebase from "firebase/compat/app";
import {auth, database} from "../misc/firebase";
const initialForm={
name:'',
description:''
}
const {StringType}=Schema.Types;
const model=Schema.Model({
name:StringType().isRequired('Chat name is required'),
description:StringType().isRequired('Description is required')
})
function CreateRoomBtnModal(props) {
const {isOpen,open,close}=useModelState();
const [formValue,setFormValue]=useState(initialForm);
const [isLoading,setIsLoading]=useState(false);
const formRef=useRef();
const onFormChange=useCallback((value)=>{
setFormValue(value);
},[])
const onSubmit=async ()=>{
if(!formRef.current.check()){
return;
}
setIsLoading(true);
const newRoomdata={
...formValue,
createdAt:firebase.database.ServerValue.TIMESTAMP,
admins:{
[auth.currentUser.uid]:true,
}
}
try {
await database.ref('rooms').push(newRoomdata);
Alert.info(`${formValue.name} has been created`,4000);
setIsLoading(false);
setFormValue(initialForm);
close();
}
catch (err){
setIsLoading(false);
Alert.error(err.message,4000);
}
}
return (
<div className='mt-3'>
<Button block color='green' onClick={open}>
<Icon icon='creative'/>Create new chat room
</Button>
<Modal show={isOpen} onHide={close}>
<Modal.Header>
<Modal.Title>New Chat room</Modal.Title>
</Modal.Header>
<Modal.Body>
<Form fluid onChange={onFormChange} formValue={formValue} model={model} ref={formRef}>
<FormGroup>
<ControlLabel>Room name</ControlLabel>
<FormControl name='name' placeholder='Enter the name of the chat room...'/>
</FormGroup>
<FormGroup>
<ControlLabel>Description</ControlLabel>
<FormControl componentClass='textarea' rows={5} name='description' placeholder='Enter the room description...'/>
</FormGroup>
</Form>
</Modal.Body>
<Modal.Footer>
<Button block color='orange' onClick={onSubmit} disabled={isLoading}>
Create new chat room
</Button>
</Modal.Footer>
</Modal>
</div>
);
}
export default CreateRoomBtnModal;
|
# coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.iam_resource_permission_list import IamResourcePermissionList # noqa: E501
from intersight.rest import ApiException
class TestIamResourcePermissionList(unittest.TestCase):
"""IamResourcePermissionList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIamResourcePermissionList(self):
"""Test IamResourcePermissionList"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.iam_resource_permission_list.IamResourcePermissionList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
// @flow
// Implementation of `sourcecred discourse`
// This is a (likely temporary command) to facilitate loading a single
// discourse server.
import dedent from "../util/dedent";
import {LoggingTaskReporter} from "../util/taskReporter";
import type {Command} from "./command";
import * as Common from "./common";
import * as Weights from "../core/weights";
import {load} from "../api/load";
import {declaration as discourseDeclaration} from "../plugins/discourse/declaration";
import {type Project, createProject} from "../core/project";
function usage(print: (string) => void): void {
print(
dedent`\
usage: sourcecred discourse DISCOURSE_URL
[--weights WEIGHTS_FILE]
sourcecred discourse --help
Loads a target Discourse server, generating cred scores for it.
Arguments:
DISCOURSE_URL
The url to the Discourse server in question, for example
https://discourse.sourcecred.io
--weights WEIGHTS_FILE
Path to a json file which contains a weights configuration.
This will be used instead of the default weights and persisted.
--help
Show this help message and exit, as 'sourcecred help discourse'.
Environment variables:
SOURCECRED_DIRECTORY
Directory owned by SourceCred, in which data, caches,
registries, etc. are stored. Optional: defaults to a
directory 'sourcecred' under your OS's temporary directory;
namely:
${Common.defaultSourcecredDirectory()}
`.trimRight()
);
}
function die(std, message) {
std.err("fatal: " + message);
std.err("fatal: run 'sourcecred help discourse' for help");
return 1;
}
const command: Command = async (args, std) => {
const positionalArgs = [];
let weightsPath: string | null = null;
for (let i = 0; i < args.length; i++) {
switch (args[i]) {
case "--help": {
usage(std.out);
return 0;
}
case "--weights": {
if (weightsPath != null)
return die(std, "'--weights' given multiple times");
if (++i >= args.length)
return die(std, "'--weights' given without value");
weightsPath = args[i];
break;
}
default: {
positionalArgs.push(args[i]);
break;
}
}
}
if (positionalArgs.length !== 1) {
return die(std, "Expected one positional arguments (or --help).");
}
const [serverUrl] = positionalArgs;
const httpRE = new RegExp(/^https?:\/\//);
if (!httpRE.test(serverUrl)) {
die(std, "expected server url to start with 'https://' or 'http://'");
}
const projectId = serverUrl.trim().replace(httpRE, "");
const project: Project = createProject({
id: projectId,
discourseServer: {serverUrl},
});
const taskReporter = new LoggingTaskReporter();
let weights = Weights.empty();
if (weightsPath) {
weights = await Common.loadWeights(weightsPath);
}
const plugins = [discourseDeclaration];
await load(
{
project,
params: null,
weightsOverrides: weights,
plugins,
sourcecredDirectory: Common.sourcecredDirectory(),
githubToken: null,
},
taskReporter
);
return 0;
};
export const help: Command = async (args, std) => {
if (args.length === 0) {
usage(std.out);
return 0;
} else {
usage(std.err);
return 1;
}
};
export default command;
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
from telethon import events
import os
import requests
import logging
from userbot import bot, OCR_SPACE_API_KEY, CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
async def ocr_space_file(filename,
overlay=False,
api_key=OCR_SPACE_API_KEY,
language='eng'):
""" OCR.space API request with local file.
Python3.5 - not tested on 2.7
:param filename: Your file path & name.
:param overlay: Is OCR.space overlay required in your response.
Defaults to False.
:param api_key: OCR.space API key.
Defaults to 'helloworld'.
:param language: Language code to be used in OCR.
List of available language codes can be found on https://ocr.space/OCRAPI
Defaults to 'en'.
:return: Result in JSON format.
"""
payload = {
'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
}
with open(filename, 'rb') as f:
r = requests.post(
'https://api.ocr.space/parse/image',
files={filename: f},
data=payload,
)
return r.json()
@register(pattern=r".ocr (.*)", outgoing=True)
async def ocr(event):
await event.edit("`Reading...`")
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
lang_code = event.pattern_match.group(1)
downloaded_file_name = await bot.download_media(
await event.get_reply_message(), TEMP_DOWNLOAD_DIRECTORY)
test_file = await ocr_space_file(filename=downloaded_file_name,
language=lang_code)
try:
ParsedText = test_file["ParsedResults"][0]["ParsedText"]
except BaseException:
await event.edit("`Couldn't read it.`\n`I guess I need new glasses.`")
else:
await event.edit(f"`Here's what I could read from it:`\n\n{ParsedText}"
)
os.remove(downloaded_file_name)
CMD_HELP.update({
'ocr':
".ocr <language>\nUsage: Reply to an image or sticker to extract text from it.\n\nGet language codes from [here](https://ocr.space/ocrapi)"
})
|
/* libuEv - Micro event loop library
*
* Copyright (c) 2012 Flemming Madsen <flemming!madsen()madsensoft!dk>
* Copyright (c) 2013-2017 Joachim Nilsson <troglobit()gmail!com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <errno.h>
#include "uev.h"
/**
* Create an I/O watcher
* @param ctx A valid libuEv context
* @param w Pointer to an uev_t watcher
* @param cb I/O callback
* @param arg Optional callback argument
* @param fd File descriptor to watch, or -1 to register an empty watcher
* @param events Events to watch for: %UEV_READ, %UEV_WRITE, %UEV_EDGE, %UEV_ONESHOW
*
* @return POSIX OK(0) or non-zero with @param errno set on error.
*/
int uev_io_init(uev_ctx_t *ctx, uev_t *w, uev_cb_t *cb, void *arg, int fd, int events)
{
if (fd < 0) {
errno = EINVAL;
return -1;
}
if (_uev_watcher_init(ctx, w, UEV_IO_TYPE, cb, arg, fd, events))
return -1;
return _uev_watcher_start(w);
}
/**
* Reset an I/O watcher
* @param w Pointer to an uev_t watcher
* @param fd New file descriptor to monitor
* @param events Requested events to watch for, a mask of %UEV_READ and %UEV_WRITE
*
* @return POSIX OK(0) or non-zero with @param errno set on error.
*/
int uev_io_set(uev_t *w, int fd, int events)
{
if ((events & UEV_ONESHOT) && _uev_watcher_active(w))
return _uev_watcher_rearm(w);
/* Ignore any errors, only to clean up anything lingering ... */
uev_io_stop(w);
return uev_io_init(w->ctx, w, (uev_cb_t *)w->cb, w->arg, fd, events);
}
/**
* Start an I/O watcher
* @param w Watcher to start (again)
*
* @return POSIX OK(0) or non-zero with @param errno set on error.
*/
int uev_io_start(uev_t *w)
{
return uev_io_set(w, w->fd, w->events);
}
/**
* Stop an I/O watcher
* @param w Watcher to stop
*
* @return POSIX OK(0) or non-zero with @param errno set on error.
*/
int uev_io_stop(uev_t *w)
{
return _uev_watcher_stop(w);
}
/**
* Local Variables:
* indent-tabs-mode: t
* c-file-style: "linux"
* End:
*/
|
import { h } from 'omi';
import createSvgIcon from './utils/createSvgIcon';
export default createSvgIcon(h("path", {
d: "M0 15h2V9H0v6zm3 2h2V7H3v10zm19-8v6h2V9h-2zm-3 8h2V7h-2v10zM16.5 3h-9C6.67 3 6 3.67 6 4.5v15c0 .83.67 1.5 1.5 1.5h9c.83 0 1.5-.67 1.5-1.5v-15c0-.83-.67-1.5-1.5-1.5zM16 19H8V5h8v14z"
}), 'Vibration');
|
const express = require("express");
const server = express();
const nunjucks = require("nunjucks");
const mongoose = require("mongoose");
mongoose.connect('mongodb+srv://blood-donation:blood-donation@blood-donation-43xtg.mongodb.net/test?retryWrites=true&w=majority', {
useNewUrlParser: true,
useUnifiedTopology: true,
});
//css, images, js sao arquivos estaticos
server.use(express.static('public'));
server.use(express.urlencoded({ extended: true }));
nunjucks.configure("./", {
express:server,
noCache: true,
});
server.get('/', function(req, res){
const donors = [];
return res.render("index.html", {donors})
});
server.post('/', function(req, res){
const name = req.body.name;
const email = req.body.email;
const blood = req.body.blood;
if(name == "" || email == "" || blood == ""){
return res.send("All fields are required!")
}
const query = `INSERT INTO donors ("name", "email", "blood")
VALUES ($1, $2, $3)`;
const values = [name, email, blood];
mongoose.Query(query, values, function(err){
if(err) return res.send("Database error.");
return res.redirect("/");
});
});
server.listen(3000, function(){
console.log("Server initialized.")
});
|
'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
var prefix = 'fas';
var iconName = 'broadcast-tower';
var width = 640;
var height = 512;
var ligatures = [];
var unicode = 'f519';
var svgPathData = 'M150.94 192h33.73c11.01 0 18.61-10.83 14.86-21.18-4.93-13.58-7.55-27.98-7.55-42.82s2.62-29.24 7.55-42.82C203.29 74.83 195.68 64 184.67 64h-33.73c-7.01 0-13.46 4.49-15.41 11.23C130.64 92.21 128 109.88 128 128c0 18.12 2.64 35.79 7.54 52.76 1.94 6.74 8.39 11.24 15.4 11.24zM89.92 23.34C95.56 12.72 87.97 0 75.96 0H40.63c-6.27 0-12.14 3.59-14.74 9.31C9.4 45.54 0 85.65 0 128c0 24.75 3.12 68.33 26.69 118.86 2.62 5.63 8.42 9.14 14.61 9.14h34.84c12.02 0 19.61-12.74 13.95-23.37-49.78-93.32-16.71-178.15-.17-209.29zM614.06 9.29C611.46 3.58 605.6 0 599.33 0h-35.42c-11.98 0-19.66 12.66-14.02 23.25 18.27 34.29 48.42 119.42.28 209.23-5.72 10.68 1.8 23.52 13.91 23.52h35.23c6.27 0 12.13-3.58 14.73-9.29C630.57 210.48 640 170.36 640 128s-9.42-82.48-25.94-118.71zM489.06 64h-33.73c-11.01 0-18.61 10.83-14.86 21.18 4.93 13.58 7.55 27.98 7.55 42.82s-2.62 29.24-7.55 42.82c-3.76 10.35 3.85 21.18 14.86 21.18h33.73c7.02 0 13.46-4.49 15.41-11.24 4.9-16.97 7.53-34.64 7.53-52.76 0-18.12-2.64-35.79-7.54-52.76-1.94-6.75-8.39-11.24-15.4-11.24zm-116.3 100.12c7.05-10.29 11.2-22.71 11.2-36.12 0-35.35-28.63-64-63.96-64-35.32 0-63.96 28.65-63.96 64 0 13.41 4.15 25.83 11.2 36.12l-130.5 313.41c-3.4 8.15.46 17.52 8.61 20.92l29.51 12.31c8.15 3.4 17.52-.46 20.91-8.61L244.96 384h150.07l49.2 118.15c3.4 8.16 12.76 12.01 20.91 8.61l29.51-12.31c8.15-3.4 12-12.77 8.61-20.92l-130.5-313.41zM271.62 320L320 203.81 368.38 320h-96.76z';
exports.definition = {
prefix: prefix,
iconName: iconName,
icon: [
width,
height,
ligatures,
unicode,
svgPathData
]};
exports.faBroadcastTower = exports.definition;
exports.prefix = prefix;
exports.iconName = iconName;
exports.width = width;
exports.height = height;
exports.ligatures = ligatures;
exports.unicode = unicode;
exports.svgPathData = svgPathData;
|
#include "types.h"
#include "user.h"
#define NELEM(x) (sizeof(x)/sizeof((x)[0]))
#define FEA (argv[1][0]=='-' && argv[1][1]=='f' && argv[1][2]=='e' && argv[1][3]=='a')
struct proc proc[NPROC];
static char *states[] = {
[UNUSED] "unused",
[EMBRYO] "embryo",
[SLEEPING] "sleep ",
[RUNNABLE] "runble",
[RUNNING] "run ",
[ZOMBIE] "zombie"
};
struct proc *p;
char *state;
uint time;
int main(int argc, char *argv[]){
list_procs(proc,sizeof(proc));
if(FEA){
printf(1,"%s ", "PPID");
}
printf(1,"%s %s %s", "PID", "STATE", "CMD");
if(FEA){
printf(1," %s", "STIME");
}
printf(1,"\n");
for(p = proc; p < &proc[NPROC]; p++){
if(p->state == UNUSED)
continue;
if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
state = states[p->state];
else
state = "???";
if(FEA){
printf(1,"%d ", p->ppid);
}
printf(1,"%d %s %s ", p->pid, state, p->name);
if(FEA){ //Conversion to time taking quantum as 20uS
time=(p->times);
printf(1," %d.", time/50000);
time=time%50000;
time/=50;
if (time<100){
printf(1,"0");
}
if (time<10){
printf(1,"0");
}
printf(1,"%d", time);
}
printf(1,"\n");
}
exit();
}
|
/**
* DevExtreme (data/local_store.js)
* Version: 16.2.6
* Build date: Tue Mar 28 2017
*
* Copyright (c) 2012 - 2017 Developer Express Inc. ALL RIGHTS RESERVED
* EULA: https://www.devexpress.com/Support/EULAs/DevExtreme.xml
*/
"use strict";
var $ = require("jquery"),
Class = require("../core/class"),
abstract = Class.abstract,
errors = require("./errors").errors,
ArrayStore = require("./array_store");
var LocalStoreBackend = Class.inherit({
ctor: function(store, storeOptions) {
this._store = store;
this._dirty = !!storeOptions.data;
this.save();
var immediate = this._immediate = storeOptions.immediate;
var flushInterval = Math.max(100, storeOptions.flushInterval || 1e4);
if (!immediate) {
var saveProxy = $.proxy(this.save, this);
setInterval(saveProxy, flushInterval);
$(window).on("beforeunload", saveProxy);
if (window.cordova) {
document.addEventListener("pause", saveProxy, false)
}
}
},
notifyChanged: function() {
this._dirty = true;
if (this._immediate) {
this.save()
}
},
load: function() {
this._store._array = this._loadImpl();
this._dirty = false
},
save: function() {
if (!this._dirty) {
return
}
this._saveImpl(this._store._array);
this._dirty = false
},
_loadImpl: abstract,
_saveImpl: abstract
});
var DomLocalStoreBackend = LocalStoreBackend.inherit({
ctor: function(store, storeOptions) {
var name = storeOptions.name;
if (!name) {
throw errors.Error("E4013")
}
this._key = "dx-data-localStore-" + name;
this.callBase(store, storeOptions)
},
_loadImpl: function() {
var raw = localStorage.getItem(this._key);
if (raw) {
return JSON.parse(raw)
}
return []
},
_saveImpl: function(array) {
if (!array.length) {
localStorage.removeItem(this._key)
} else {
localStorage.setItem(this._key, JSON.stringify(array))
}
}
});
var localStoreBackends = {
dom: DomLocalStoreBackend
};
var LocalStore = ArrayStore.inherit({
ctor: function(options) {
if ("string" === typeof options) {
options = {
name: options
}
} else {
options = options || {}
}
this.callBase(options);
this._backend = new localStoreBackends[options.backend || "dom"](this, options);
this._backend.load()
},
clear: function() {
this.callBase();
this._backend.notifyChanged()
},
_insertImpl: function(values) {
var b = this._backend;
return this.callBase(values).done($.proxy(b.notifyChanged, b))
},
_updateImpl: function(key, values) {
var b = this._backend;
return this.callBase(key, values).done($.proxy(b.notifyChanged, b))
},
_removeImpl: function(key) {
var b = this._backend;
return this.callBase(key).done($.proxy(b.notifyChanged, b))
}
}, "local");
module.exports = LocalStore;
module.exports.default = module.exports;
|
(function (app) {
'use strict';
var forgotPasswordController = function ($scope) {
};
app.controller('forgotPasswordController', ['$scope', forgotPasswordController]);
})(angular.module('accountApp'));
|
// Copyright 2018-present the Material Components for iOS authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* IMPORTANT:
This file contains supplemental code used to populate the examples with dummy data and/or
instructions. It is not necessary to import this file to use Material Components for iOS.
*/
#import <UIKit/UIKit.h>
@interface DialogDismissalOverPresentedControllerViewController : UIViewController
@end
|
/*!
* jQuery Browser Plugin v0.0.5
* https://github.com/gabceb/jquery-browser-plugin
*
* Original jquery-browser code Copyright 2005, 2013 jQuery Foundation, Inc. and other contributors
* http://jquery.org/license
*
* Modifications Copyright 2013 Gabriel Cebrian
* https://github.com/gabceb
*
* Released under the MIT license
*
* Date: 2013-07-29T17:23:27-07:00
*/
(function( jQuery, window, undefined ) {
"use strict";
var matched, browser;
jQuery.uaMatch = function( ua ) {
ua = ua.toLowerCase();
var match = /(opr)[\/]([\w.]+)/.exec( ua ) ||
/(chrome)[ \/]([\w.]+)/.exec( ua ) ||
/(version)[ \/]([\w.]+).*(safari)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec( ua ) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec( ua ) ||
/(msie) ([\w.]+)/.exec( ua ) ||
ua.indexOf("trident") >= 0 && /(rv)(?::| )([\w.]+)/.exec( ua ) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec( ua ) ||
[];
var platform_match = /(ipad)/.exec( ua ) ||
/(iphone)/.exec( ua ) ||
/(android)/.exec( ua ) ||
/(windows phone)/.exec(ua) ||
/(win)/.exec( ua ) ||
/(mac)/.exec( ua ) ||
/(linux)/.exec( ua ) ||
[];
return {
browser: match[ 3 ] || match[ 1 ] || "",
version: match[ 2 ] || "0",
platform: platform_match[0] || ""
};
};
matched = jQuery.uaMatch( window.navigator.userAgent );
browser = {};
if ( matched.browser ) {
browser[ matched.browser ] = true;
browser.version = matched.version;
browser.versionNumber = parseFloat(matched.version, 10);
}
if ( matched.platform ) {
browser[ matched.platform ] = true;
}
// Chrome, Opera 15+ and Safari are webkit based browsers
if ( browser.chrome || browser.opr || browser.safari ) {
browser.webkit = true;
}
// IE11 has a new token so we will assign it msie to avoid breaking changes
if ( browser.rv )
{
var ie = 'msie';
matched.browser = ie;
browser[ie] = true;
}
// Opera 15+ are identified as opr
if ( browser.opr )
{
var opera = 'opera';
matched.browser = opera;
browser[opera] = true;
}
// Assign the name and platform variable
browser.name = matched.browser;
browser.platform = matched.platform;
jQuery.browser = browser;
})( jQuery, window );
|
#题目地址:https://time.geekbang.org/column/article/74788
#几种算法
#第一种:暴力回溯
from typing import List
least = float("inf")
def yh_triangle(level,levelnum,num,yhlist):
#level表示现在是访问第几层
#levelnum表示现在是对这一层的第几个元素进行访问
#num是指到这里走的路径长度
#yhlist是储存的杨辉三角的数组
global least
if (level == len(yhlist) - 1):
if num < least:
least = num
return
else:
#访问下一层的数组,只能访问levelnum和levelnum+1
yh_triangle(level+1,levelnum,num + yhlist[level + 1][levelnum],yhlist)
yh_triangle(level+1,levelnum + 1, num + yhlist[level+1][levelnum + 1],yhlist)
#动态规划,从下到上的办法
def yh_triangle(yhlist):
n = len(yhlist)
res = [[0]*i for i in range(1,n+1)]
#对第0层的元素先赋值
res[0][0] = yhlist[0][0]
for j in range(1,n):
#对每一层而言,第一个和最后一个元素特殊对待
for i in range(j+1):
if (i == 0):
res[j][i] = res[j - 1][i] + yhlist[j][i]
elif (i == j):
res[j][i] = res[j - 1][i - 1] + yhlist[j][i]
else:
res[j][i] = min(res[j - 1][i - 1] + yhlist[j][i], res[j - 1][i] + yhlist[j][i])
print(res)
return min(res[-1])
def yh_triangle_bottom_up(nums) -> int:
assert len(nums) > 0
n = len(nums)
memo = nums[-1].copy()
for i in range(n-1, 0, -1):
for j in range(i):
memo[j] = min(memo[j] + nums[i-1][j], memo[j+1] + nums[i-1][j])
return memo[0]
if __name__ == '__main__':
nums = [[3], [2, 6], [5, 4, 2], [6, 0, 3, 2]]
# yh_triangle(0,0,3,nums)
# print(least)
print(yh_triangle(nums))
# print(yh_triangle_space_optimization(nums))
print(yh_triangle_bottom_up(nums))
|
$(document).ready(function () {
adjustFieldsView();
$(document).on("change", "input[name=fields-display-view-type]", null, adjustFieldsView);
});
function adjustFieldsView() {
let viewType = $("input[name=fields-display-view-type]:checked").val();
$("input[name=fields-display-view-type]").each(function () {
let type = $(this).val();
if (type === viewType) {
$(this).closest("label.btn").addClass("active");
} else {
$(this).closest("label.btn").removeClass("active");
}
});
let tree = $(".fields-tree");
let list = $(".fields-list");
let raw = $(".fields-raw");
tree.hide();
list.hide();
raw.hide();
switch (viewType) {
case "tree":
tree.show();
break;
case "list":
list.show();
break;
case "raw":
raw.show();
break;
}
}
|
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
/* Open a temporary file and cache it with io_cache. Delete it on close */
#include "my_global.h"
#include "mysys_priv.h"
#include "mysql/psi/mysql_file.h"
#include "my_sys.h"
#include <m_string.h>
#include "my_static.h"
#include "mysys_err.h"
/*
Remove an open tempfile so that it doesn't survive
if we crash.
*/
static my_bool cache_remove_open_tmp(IO_CACHE *cache __attribute__((unused)),
const char *name)
{
#if O_TEMPORARY == 0
/* The following should always succeed */
(void) my_delete(name,MYF(MY_WME));
#endif /* O_TEMPORARY == 0 */
return 0;
}
/*
** Open tempfile cached by IO_CACHE
** Should be used when no seeks are done (only reinit_io_buff)
** Return 0 if cache is inited ok
** The actual file is created when the IO_CACHE buffer gets filled
** If dir is not given, use TMPDIR.
*/
my_bool open_cached_file(IO_CACHE *cache, const char* dir, const char *prefix,
size_t cache_size, myf cache_myflags)
{
DBUG_ENTER("open_cached_file");
cache->dir= dir ? my_strdup(key_memory_IO_CACHE,
dir,MYF(cache_myflags & MY_WME)) : (char*) 0;
cache->prefix= (prefix ? my_strdup(key_memory_IO_CACHE,
prefix,MYF(cache_myflags & MY_WME)) :
(char*) 0);
cache->file_name=0;
cache->buffer=0; /* Mark that not open */
if (!init_io_cache(cache,-1,cache_size,WRITE_CACHE,0L,0,
MYF(cache_myflags | MY_NABP)))
{
DBUG_RETURN(0);
}
my_free(cache->dir);
my_free(cache->prefix);
DBUG_RETURN(1);
}
/* Create the temporary file */
my_bool real_open_cached_file(IO_CACHE *cache)
{
char name_buff[FN_REFLEN];
int error=1;
DBUG_ENTER("real_open_cached_file");
if ((cache->file= mysql_file_create_temp(cache->file_key, name_buff,
cache->dir, cache->prefix,
(O_RDWR | O_BINARY | O_TRUNC | O_TEMPORARY | O_SHORT_LIVED),
MYF(MY_WME))) >= 0)
{
error=0;
cache_remove_open_tmp(cache, name_buff);
}
DBUG_RETURN(error);
}
void close_cached_file(IO_CACHE *cache)
{
DBUG_ENTER("close_cached_file");
if (my_b_inited(cache))
{
File file=cache->file;
cache->file= -1; /* Don't flush data */
(void) end_io_cache(cache);
if (file >= 0)
{
(void) mysql_file_close(file, MYF(0));
}
my_free(cache->dir);
my_free(cache->prefix);
}
DBUG_VOID_RETURN;
}
|
"""
slapdtest - module for spawning test instances of OpenLDAP's slapd server
See https://www.python-ldap.org/ for details.
"""
__version__ = '3.4.0'
from slapdtest._slapdtest import SlapdObject, SlapdTestCase, SysLogHandler
from slapdtest._slapdtest import requires_ldapi, requires_sasl, requires_tls
from slapdtest._slapdtest import requires_init_fd
from slapdtest._slapdtest import skip_unless_ci
|
/**
* @fileoverview added by tsickle
* @suppress {checkTypes,extraRequire,uselessCode} checked by tsc
*/
import { Injectable } from '@angular/core';
import * as i0 from "@angular/core";
export class TextService {
constructor() { }
}
TextService.decorators = [
{ type: Injectable, args: [{
providedIn: 'root'
},] },
];
/** @nocollapse */
TextService.ctorParameters = () => [];
/** @nocollapse */ TextService.ngInjectableDef = i0.defineInjectable({ factory: function TextService_Factory() { return new TextService(); }, token: TextService, providedIn: "root" });
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoidGV4dC5zZXJ2aWNlLmpzIiwic291cmNlUm9vdCI6Im5nOi8vYW5ndWxhci13ZWJsaW5laW5kaWEtdGV4dC1ib3gvIiwic291cmNlcyI6WyJsaWIvdGV4dC5zZXJ2aWNlLnRzIl0sIm5hbWVzIjpbXSwibWFwcGluZ3MiOiI7Ozs7QUFBQSxPQUFPLEVBQUUsVUFBVSxFQUFFLE1BQU0sZUFBZSxDQUFDOztBQUszQyxNQUFNO0lBRUosaUJBQWlCOzs7WUFMbEIsVUFBVSxTQUFDO2dCQUNWLFVBQVUsRUFBRSxNQUFNO2FBQ25CIiwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IHsgSW5qZWN0YWJsZSB9IGZyb20gJ0Bhbmd1bGFyL2NvcmUnO1xuXG5ASW5qZWN0YWJsZSh7XG4gIHByb3ZpZGVkSW46ICdyb290J1xufSlcbmV4cG9ydCBjbGFzcyBUZXh0U2VydmljZSB7XG5cbiAgY29uc3RydWN0b3IoKSB7IH1cbn1cbiJdfQ==
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class DiskAttachment(object):
def __init__(self, attachmentId=None, diskId=None, instanceType=None, instanceId=None, status=None, attachTime=None):
"""
:param attachmentId: (Optional) 挂载ID
:param diskId: (Optional) 云硬盘ID
:param instanceType: (Optional) 挂载实例的类型,取值为 vm、nc
:param instanceId: (Optional) 挂载实例的ID
:param status: (Optional) 挂载状态,取值为 "attaching", "attached", "detaching", "detached"
:param attachTime: (Optional) 挂载时间
"""
self.attachmentId = attachmentId
self.diskId = diskId
self.instanceType = instanceType
self.instanceId = instanceId
self.status = status
self.attachTime = attachTime
|
import pandas as pd
import numpy as np
# Function to calculate missing values by column
def missing_values_table(df):
# Total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
# Rename the columns
mis_val_table_ren_columns = mis_val_table.rename(
columns = {0 : 'Missing Values', 1 : '% of Total Values'})
# Sort the table by percentage of missing descending
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:,1] != 0].sort_values(
'% of Total Values', ascending=False).round(1)
# Print some summary information
print ("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.")
# Return the dataframe with missing information
return mis_val_table_ren_columns
def format_dtfm(dtfm, convert_cols=[], nan=['.'],logger=False):
"""
This function can be used to format data from a dataframe to the correct type.
Formatting data is important as it allows us to preform machine learning over
variables such as discreetly labelled data. i.e if I knew the bull type then
I would want to include this data as an Integer rather than a string.
USAGE: format_dtfm(dtfm, logger=False)
INPUTS:
dtfm: pandas dataframe
(optional)
logger=False: wheather to log info to console
OUTPUT:
formatted dataframe
"""
if logger:
print('Data Info:\n{}'.format(dtfm.info()))
print('\nData Head:\n{}'.format(dtfm.head()))
# convert i in nan to np.nan
for i in nan:
dtfm = dtfm.replace(i, np.nan)
# convert objects to numbers
for col in convert_cols:
labels = []
for i, row in dtfm.iterrows():
if row[col] not in labels:
labels.append(row[col])
# iterate through collected labels replacing them with integers
for i in range(len(labels)):
# replace item with index
dtfm[[col]] = dtfm[[col]].replace(labels[i], i)
if logger:
print("Converted cols info:\n{}".format(dtfm[convert_cols].info()))
print("Converted cols head:\n{}".format(dtfm[convert_cols].head()))
return dtfm
def remove_missing(dtfm, p_missing=50, logger=False):
"""
This function can be used to remove column with multiple
missing values from a dataframe
USAGE: remove_missing(dtfm, p_missing=50, logger=False)
INPUTS:
dtfm: pandas dataframe
(optional)
p_missing=50: a what percentage of missing values
should the variable be removed
logger=False: whether the func should log progress
to terminal
OUTPUT:
dtfm
"""
m_dtfm = missing_values_table(dtfm)
if logger:
print("Missing Values:\n{}".format(m_dtfm))
#isolate missing cols
missing_cols = list(m_dtfm[m_dtfm['% of Total Values'] > p_missing].index)
if logger:
print('We will remove {} columns'.format(len(missing_cols)))
# drop the cols
dtfm = dtfm.drop(columns = list(missing_cols))
return dtfm
def find_outliers(dtfm, cols, remove=False, out_file='outliers.xlsx', logger=False):
"""
This function can be used to find and remove outliers from a dataset
USAGE: remove_outliers(dtfm, cols, out_file='outliers.xlsx', logger=False)
INPUTS:
dtfm: pandas dataframe
cols: colum titles of cols to check
(optional)
remove=False
out_file='outliers.xlsx': file to pip the removed anomalies to
logger=False: wheather to print progress or not:
OUTPUT:
dataframe
"""
outlier_frames = []
# remove outliers from emasured values
for i in cols:
first_q = dtfm[i].describe()['25%']
third_q = dtfm[i].describe()['75%']
q_range = third_q-first_q
# calc outliers
outliers=dtfm[(dtfm[i] < (first_q - 3 * q_range)) | (dtfm[i] > (third_q +3 * q_range))]
outlier_frames.append(outliers)
print("Shape: {}, {}".format(outliers.shape[0], logger))
if logger == True & outliers.shape[0] > 0:
print('Outliers {}:\n{}'.format(i, outliers))
# pipe outliers to out file
outlier_frames = pd.concat(outlier_frames)
outlier_frames.to_excel(out_file)
if logger:
print("All Outiers:\n{}".format(outlier_frames))
# remove outliers from dataframe
if remove:
dtfm = dtfm.drop(outlier_frames.index)
return dtfm
if __name__ == '__main__':
format_data = True
missing_cols = True
p_missing = 50
outliers = True
remove_outliers = False
write_new = True
logger = True
# inputs
convert_cols=["AMOSTRA", "REPLICATA", "ANIMAL", "PARTIDA"]
outlier_cols=["AI","PI","ALTO","FRAG_CRO","MOT_PRE","MOT_POS","CONC_CAMARA","VF","AD","VAP", "VSL","VCL","ALH","BCF","STR","LIN","MOTILE_PCT","PROGRESSIVE_PCT","RAPID_PCT","MEDIUM_PCT","SLOW_PCT","STATIC_PCT"]
dtfm=pd.read_excel('initial_data.xlsx', sheet_name='BD_Research_Fapesp_final', header=1)
if format_data == True:
nan = ['.']
dtfm = format_dtfm(dtfm,nan=nan,convert_cols=convert_cols,logger=logger)
if missing_cols:
dtfm = remove_missing(dtfm, p_missing=p_missing, logger=logger)
if outliers:
dtfm = find_outliers(dtfm, outlier_cols, logger=logger, remove=remove_outliers)
if write_new:
dtfm.to_excel('cleaned_data.xlsx')
|
"""
WSGI config for depictions project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "depictions.settings")
application = get_wsgi_application()
|
# MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from omegaconf import DictConfig
from torch import Tensor
from typing import Dict
from collections import OrderedDict
from openspeech.models import register_model, OpenspeechEncoderDecoderModel
from openspeech.decoders import LSTMAttentionDecoder
from openspeech.encoders import ConvolutionalLSTMEncoder
from openspeech.models.deep_cnn_with_joint_ctc_listen_attend_spell.configurations import \
DeepCNNWithJointCTCListenAttendSpellConfigs
from openspeech.vocabs.vocab import Vocabulary
@register_model('deep_cnn_with_joint_ctc_listen_attend_spell', dataclass=DeepCNNWithJointCTCListenAttendSpellConfigs)
class DeepCNNWithJointCTCListenAttendSpellModel(OpenspeechEncoderDecoderModel):
r"""
Listen, Attend and Spell model with configurable encoder and decoder.
Paper: https://arxiv.org/abs/1508.01211
Args:
configs (DictConfig): configuration set.
vocab (Vocabulary): the class of vocabulary
Inputs:
- **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be
a padded `FloatTensor` of size ``(batch, seq_length, dimension)``.
- **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
* outputs (dict): Result of model predictions.
"""
def __init__(self, configs: DictConfig, vocab: Vocabulary, ) -> None:
super(DeepCNNWithJointCTCListenAttendSpellModel, self).__init__(configs, vocab)
def build_model(self):
self.encoder = ConvolutionalLSTMEncoder(
input_dim=self.configs.audio.num_mels,
num_layers=self.configs.model.num_encoder_layers,
num_classes=self.num_classes,
hidden_state_dim=self.configs.model.hidden_state_dim,
dropout_p=self.configs.model.encoder_dropout_p,
bidirectional=self.configs.model.encoder_bidirectional,
rnn_type=self.configs.model.rnn_type,
joint_ctc_attention=self.configs.model.joint_ctc_attention,
)
decoder_hidden_state_dim = self.configs.model.hidden_state_dim << 1 \
if self.configs.model.encoder_bidirectional \
else self.configs.model.hidden_state_dim
self.decoder = LSTMAttentionDecoder(
num_classes=self.num_classes,
max_length=self.configs.model.max_length,
hidden_state_dim=decoder_hidden_state_dim,
pad_id=self.vocab.pad_id,
sos_id=self.vocab.sos_id,
eos_id=self.vocab.eos_id,
num_heads=self.configs.model.num_attention_heads,
dropout_p=self.configs.model.decoder_dropout_p,
num_layers=self.configs.model.num_decoder_layers,
attn_mechanism=self.configs.model.decoder_attn_mechanism,
rnn_type=self.configs.model.rnn_type,
)
def set_beam_decoder(self, beam_size: int = 3):
""" Setting beam search decoder """
from openspeech.search import BeamSearchLSTM
self.decoder = BeamSearchLSTM(
decoder=self.decoder,
beam_size=beam_size,
)
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Dict[str, Tensor]:
r"""
Forward propagate a `inputs` and `targets` pair for inference.
Inputs:
inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
* outputs (dict): Result of model predictions.
"""
return super(DeepCNNWithJointCTCListenAttendSpellModel, self).forward(inputs, input_lengths)
def training_step(self, batch: tuple, batch_idx: int) -> OrderedDict:
r"""
Forward propagate a `inputs` and `targets` pair for training.
Inputs:
batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths`
batch_idx (int): The index of batch
Returns:
loss (torch.Tensor): loss for training
"""
return super(DeepCNNWithJointCTCListenAttendSpellModel, self).training_step(batch, batch_idx)
def validation_step(self, batch: tuple, batch_idx: int) -> OrderedDict:
r"""
Forward propagate a `inputs` and `targets` pair for validation.
Inputs:
batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths`
batch_idx (int): The index of batch
Returns:
loss (torch.Tensor): loss for training
"""
return super(DeepCNNWithJointCTCListenAttendSpellModel, self).validation_step(batch, batch_idx)
def test_step(self, batch: tuple, batch_idx: int) -> OrderedDict:
r"""
Forward propagate a `inputs` and `targets` pair for test.
Inputs:
batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths`
batch_idx (int): The index of batch
Returns:
loss (torch.Tensor): loss for training
"""
return super(DeepCNNWithJointCTCListenAttendSpellModel, self).test_step(batch, batch_idx)
|
// Data back Explorer
// import Freezeframe from 'freezeframe';
// prepare docs
var DA_doc = "./assets/dist/js/DA_collection.json";
var card_doc = "./assets/dist/js/AIE_card_collection.json";
var DP_doc = "./assets/dist/js/DP_collection.json";
$(document).ready(function() {
loadData(DA_doc, DP_doc, card_doc);
setupInteraction();
});
// load two json documents and update the panel
function loadData(DA_doc, DP_doc, card_doc) {
let classStr = panelLayout();
// create NS navigator
createDA(DA_doc, classStr);
// create EL filter button group
createDP(DP_doc, classStr);
// load card data
createDisplay(card_doc);
}
// activate all the interactive components
function setupInteraction() {
console.log("Now binding interactions");
// activate responsive responsive header + filter panel layout
$(window).resize(function() {
var classStr = panelLayout();
if(classStr.length < 5){
$("div.btn-primary-group").removeClass("btn-primary-group list-group").addClass("btn-primary-group-sm");
$("div.btn-secondary-group").removeClass("btn-secondary-group").addClass("btn-secondary-group-sm");
$("div.btn-secondary-group-sm > .btn").removeClass("btn-block text-left").addClass("text-center");
} else {
$("div.btn-primary-group-sm").removeClass("btn-primary-group-sm").addClass("btn-primary-group" + classStr);
$("div.btn-secondary-group-sm").removeClass("btn-secondary-group-sm").addClass("btn-secondary-group" + classStr.replace(" list-group", ""));
$("div.btn-secondary-group > .btn").removeClass("text-center").addClass("btn-block text-left");
}
});
$("header .title-bold").click(function () {
if($(window).outerWidth() < 768) {
$("#filter-panel").slideToggle(180);
}
if($(window).outerWidth() < 576) {
$(".img-overlay").off("hover", "**" );
$(".img-overlay").tooltip("disable");
}
});
// activate search box
$("input.form-control").focus( function() {
$(".search-result").text("");
if($(this).val().trim() == "Search")
$(this).val("");
});
$("input.form-control").blur( function() {
if($(this).val().trim() == "")
$(this).val("Search");
});
$(".nav-button").click(searchFunc);
$(".form-control").bind('keydown', function(eve){
var keyCode = eve.which || arguments.callee.caller.arguments[0];
if (keyCode == 13) { searchFunc(); $(".form-control").blur();} //ignore space button
});
// activate NS navigator
$(".btn-primary-group > .btn").click(DA_scroller);
$(".btn-primary-group-sm > .btn").click(DA_scroller);
// activate top info reminders
// $(window).scroll(reminderSpy);
// activate scroll spy
$(window).scroll(displaySpy);
//activate the first part
$(".btn-primary-group > .btn").first().addClass("active");
$(".btn-primary-group-sm > .btn").first().addClass("active");
// activate EL filter button group
// $(".btn-secondary-group > .btn").click(DP_filter);
// $(".btn-secondary-group-sm > .btn").click(DP_filter);
// $(".btn-sub-list > li").click(DP_sub_filter);
// image lazy loading
// $(".modal-body").ImgLoading({timeout: 1000});
// activate front-back transition buttons within cards
// $(".card-bottom > button").click(cardOver);
// $(".card-footer > button").click(cardTrans);
// open new example tag
// $(".card-footer a").click(function(){
// window.open($(this).attr("href"));
// });
// card footer url
// $(".card-footer a").tooltip({title: "Click to watch full video in a new window", placement: "top"});
// hover front gif
// image lazy loading
// $(".card-img").load($(this).attr("data-target"), function() {
// $(this).attr("src", $(this).attr("data-target"));
// if($($(this).parent()[0]).attr("class") == "card-img-box") {
// $($(this).next()[0]).tooltip({title: "Click to zoom in", placement: "top"});
// } else {
// const logo = new Freezeframe($(this));
// }
// });
// function ImgLoading(status) {
// status = status || "front";
// var parent = $(this).parent()[0];
// var cls = $(this).attr("class");
// var img = new Image();
// img.src = $(this).attr("data-echo");
// $(img).attr("onerror", $(this).attr("onerror"));
// loading
// if(img.complete) {
// callback.call(img);
// return;
// }
// loaded
// if(img.onload) {
// $(img).addClass(cls);
// $(this).replaceWith(img);
// if(status == "front") {
// const logo = new Freezeframe($(this));
// return;
// }
// if(status == "back") {
// $($(this).next()[0]).tooltip({title: "Click to zoom in", placement: "top"});
// $(parent).hover(fullScreenOver, fullScreenOut);
// return;
// }
// };
// }
// $(window).on("load", function(){
// $(".front > img").each(function(){ImgLoading("front");});
// $(".back > img").each(function(){ImgLoading("back");});
// });
// echo.init({
// offset: 0,
// throttle: 250,
// unload: false,
// callback: function(element, op) {
// var status = ($($(element).parent()[0]).attr("class") == "card-img-box" ? "back" : "front");
// if(op === 'load' && status === "front"){
// $(element).prev(".card-frontPrev")[0].src = $($(element).prev(".card-frontPrev")[0]).attr("data-target");
// $($(element).parent()[0]).hover(function(){
// $($(this).children(".card-frontPrev")[0]).fadeOut(10);
// }, function() {
// $($(this).children(".card-frontPrev")[0]).fadeIn(160);
// });
// }
// if(op === 'load' && status === "back") {
// $($(element).next()[0]).tooltip({title: "Click to zoom in", placement: "top"});
// $($(element).parents(".card-img-box")[0]).hover(fullScreenOver, fullScreenOut);
// return;
// }
// if(op === 'load' && $($(element).parent()[0]).attr("class") !== "modal-body") {
// const logo = new Freezeframe($(element));
// return;
// }
// if(op === 'unload') {
// element.src = "assets/media/fail_loading_light.svg";
// }
// }
// });
// const frontImg = new Freezeframe('.card-deck .card-img');
// $(".card-deck .card-img").onload(function(){
// var parentsSet = $(this).parentsUntil(".card-deck");
// var name = $(parentsSet[parentsSet.length - 1]).attr("name");
// const logo = new Freezeframe("[name=\'" + name + "\'] .front > .card-img");
// const logo = new Freezeframe(this);
// });
// hover full-screen button on card image
$(".card-img-box").hover(fullScreenOver, fullScreenOut);
// $(".img-overlay").tooltip({title: "Click to zoom in", placement: "top"});
// toggle modal
$(".img-overlay")
.click(modalInfo)
.click(function () {
$("#zooming-modal").modal({
backdrop: false,
keyboard: false,
focus: true,
show: true
});
});
$("a.modal-title").tooltip({title: "Click to watch full video in a new window", placement: "top"});
$("a.modal-title").click(function(){
window.open($(this).attr("href"));
$("a.modal-title").tooltip("hide");
});
let data_provider = "";
$('#zooming-modal').on('shown.bs.modal', function() {
let modalWindowCarousel = $("#carouselModal").get(0);
data_provider = $(modalWindowCarousel).attr("data-provider");
});
$(".modal .carousel").on("slide.bs.carousel", function(event) {
let aimCard = $(`[name="${data_provider}"]`).get(0);
// console.log(data_provider);
let aimCarousel = $(aimCard).find(".carousel").get(0);
if(event.direction === "right") {
$(aimCarousel).find("a.carousel-control-prev").click();
} else if(event.direction === "left") {
$(aimCarousel).find("a.carousel-control-next").click();
}
})
// abandon right mouse click.
// ** From here
// if (window.Event) {document.captureEvents(Event.MOUSEUP); }
// function nocontextmenu() {
// event.cancelBubble = true
// event.returnValue = false;
// return false;
// }
// function norightclick(e) {
// if (window.Event) {
// if (e.which == 2 || e.which == 3)
// return false;
// } else if (event.button == 2 || event.button == 3) {
// event.cancelBubble = true
// event.returnValue = false;
// return false;
// }
// }
// for IE5+
// document.oncontextmenu = nocontextmenu;
// for all others
// document.onmousedown = norightclick;
// End **
}
// create NS components & display NS frame
function createDA(DA_doc, classStr) {
// calc panel's position $ screen width measuring
classStr = "btn-primary-group" + classStr;
let stickyTopElem = new StickyTop();
stickyTopElem.appendToDisplay();
// create NS part
let DA_Group = $("<div></div>").addClass(classStr)
.attr("id", "display-scroll");
$.ajaxSettings.async = false;
$.getJSON(DA_doc, function (json) {
$.each(json, function (i, item){
let DA_single = new DA_Nav(item);
let DA_nav_btn = DA_single.drawDANav(); // create spy btn
let DA_top = DA_single.drawDATop(); // create display part
let DA_joint_tag = DA_single.getJointTag();
let currentDisplayPart = $("<div></div>").attr("id", DA_joint_tag); // create spy panel
DA_Group.append(DA_nav_btn);
currentDisplayPart
.append(DA_top)
.append($("<div></div>").addClass("row row-cols-1 row-cols-sm-2 row-cols-lg-3").addClass("card-deck")); // create card deck
currentDisplayPart.appendTo("#card-display");
DA_single.DACreatingComplete(stickyTopElem);
});
});
$("#filter-panel > .btn-panel").last().append(DA_Group);
}
// construct DA_Nav class
// "DA_id": 100,
// "DA_num": 10,
// "DA_nav_tag": "biology",
// "DA_nav_color": "#8180DF",
// "DA_desc": "",
// "DA_class_object": [
// {
// "DA_class_id": "101",
// "DA_class_tag": "whole body movement",
// "DA_class_color": "#EB63BD"
// }, ......
function DA_Nav(DA_object) {
// color method
this._color_hash = DA_Nav.ColorHash.init();
this._created = 0; // nothing created: 0; both created: 1
this._DA_id = DA_object["DA_id"] || 500;
this._DA_num = DA_object["DA_num"] || 0;
this._DA_nav_tag = DA_object["DA_nav_tag"] || "navigator";
this._DA_desc = DA_object["DA_desc"] || "Interpretation for Dynamic Approaches. Interpretation for Dynamic Approaches. Interpretation for Dynamic Approaches. Interpretation for Dynamic Approaches. Interpretation for Dynamic Approaches.";
this._DA_sub_arr = DA_object["DA_class_object"] || [{"DA_class_id": "501", "DA_class_tag": "navigator sub class example", "DA_class_color": "#EBEDF6"}];
this._DA_joint_tag = this._DA_nav_tag.split(" ").join("_");
this._DA_nav_color = DA_object["DA_nav_color"] || "#EBEDF6";
this._color_hash.set_color(this._DA_id, this._DA_nav_color);
}
// public color hash data sharing
DA_Nav.ColorHash = {
_data: { 500: "#EBEDF6" },
init: function(){
let color = {};
color.set_color = function(key_id, color_str) {
key_id = key_id || 500;
color_str = color_str || "";
if(color_str) {
if(color_str.indexOf("#") < 0)
color_str = "#" + color_str;
if([3,4,7].indexOf(color_str.length) < 0)
return false;
DA_Nav.ColorHash._data[key_id] = color_str;
return true;
}
return false;
};
color.get_color = function(key_id) {
key_id = key_id || 500;
if(DA_Nav.ColorHash._data.hasOwnProperty(key_id))
return DA_Nav.ColorHash._data[key_id];
else
return undefined;
};
return color;
}
}
DA_Nav.prototype.getJointTag = function() {
return this._DA_joint_tag;
}
DA_Nav.prototype.drawDANav= function() {
let classString = "btn btn-block text-left";
let DA_nav_btn = $("<a></a>").addClass([classString, this._DA_joint_tag].join(" "))
.text(this._DA_nav_tag.replace(this._DA_nav_tag[0], this._DA_nav_tag[0].toUpperCase()) + ` (${this._DA_num})`)
.attr({type: "button", href: "#" + this._DA_joint_tag})
.prepend($("<span></span>").addClass("btn-id").css("background-color", this._DA_nav_color))
.append($("<span></span>").addClass("btn-sign").css("background", this._DA_nav_color));
return DA_nav_btn;
}
DA_Nav.prototype.drawDATop = function() {
let thisDA_Nav = this;
// display color reminder
let sub_label = $("<ul></ul>").addClass("display-sub-label");
thisDA_Nav._DA_sub_arr.forEach(eg_object => {
// add to color hash list
let DA_class_id = eg_object["DA_class_id"] || (500 + index + 1);
let DA_class_color = eg_object["DA_class_color"] || "#EBEDF6";
thisDA_Nav._color_hash.set_color(DA_class_id, DA_class_color);
// add sub label to display
let DA_class_tag = eg_object.DA_class_tag;
let DA_class_label = $("<li></li>")
.text(DA_class_tag)
.prepend($("<span></span>").css("background-color", DA_class_color));
DA_class_label.appendTo(sub_label);
});
// display title
let DA_display_tag = thisDA_Nav._DA_nav_tag ? "approaches: " + thisDA_Nav._DA_nav_tag.toLowerCase() : "approaches: dynamic approaches";
let display_title = $("<h2></h2>").addClass("display-title")
.text(DA_display_tag + " (" + thisDA_Nav._DA_num + ")")
.prepend($("<span></span>").css("background-color", thisDA_Nav._DA_nav_color));
// integrated display top
let display_top = $("<div></div>").addClass("deck-reminder")
.css({
"top": document.querySelector("#card-display").getBoundingClientRect().top,
// "background-color": "white",
// "z-index": 500
})
.append(display_title)
.append($("<p></p>").addClass("display-desc").text(thisDA_Nav._DA_desc))
.append(sub_label);
return display_top;
}
DA_Nav.prototype.DACreatingComplete = function(stickyTopElem) {
// ......
if(this._created <= 0 && stickyTopElem !==undefined) {
this._created = new Date().getTime();
this._sticky_top_object = stickyTopElem;
this._interactionInit();
return true;
} else {
console.warn(`DA tab & sticky top for "${this._DA_nav_tag}" have already been created before.`);
return false;
}
}
DA_Nav.prototype._interactionInit = function () {
if(this._created) {
this._DA_btn = document.querySelector(`.btn-primary-group > .${this.getJointTag()}`);
this._sticky_top = document.querySelector(`#${this._DA_joint_tag} > .deck-reminder`);
this._display_desc = this._sticky_top.querySelector(".display-desc");
// record in/out of scroll Y position
this._in_sticky = false;
// bind event listeners
this._topEventBinding();
// this._scrollEventBinding();
}
}
DA_Nav.prototype._stickyToggle = function(option) {
option = option || true;
// callback = callback || undefined;
if(!this.hasOwnProperty("_in_sticky")) {
console.log("Either sticky top or DA button has been deployed yet.");
return false;
} else if(option === undefined) {
this._in_sticky = !this._in_sticky;
} else if(option === true) {
this._in_sticky = true;
} else {
this._in_sticky = false;
}
// if(callback !== undefined)
// callback();
return true;
}
DA_Nav.prototype._isSticky = function() {
return this._in_sticky;
}
// listen to sticky top
DA_Nav.prototype._topEventBinding = function () {
let thisDA_Nav = this;
let stickyOnAnimation = function() {
$(thisDA_Nav._sticky_top).fadeTo("normal", 0, () => $(thisDA_Nav._display_desc).css("display", "none")).fadeTo("normal", 1);
};
let stickyOffAnimation = function() {
$(thisDA_Nav._sticky_top).fadeTo("normal", 0, () => $(thisDA_Nav._display_desc).css("display", "block")).fadeTo("normal", 1);
}
$(window).scroll(function(){
let isSticky = thisDA_Nav._isSticky();
if(isSticky) {
console.log("True", thisDA_Nav._DA_nav_tag);
}
// mark DA state
if(parseInt(Math.round(window.scrollY)) === thisDA_Nav._sticky_top.offsetTop + 5) {
console.log("yes!", thisDA_Nav._DA_nav_tag);
}
if(parseInt(Math.round(window.scrollY)) === thisDA_Nav._sticky_top.offsetTop + 5 && !thisDA_Nav._isSticky()) {
// console.log("yes!", thisDA_Nav._DA_nav_tag);
thisDA_Nav._stickyToggle(true);
// thisDA_Nav._sticky_top_object.showDisplay(thisDA_Nav._sticky_top);
} else if(parseInt(Math.round(window.scrollY)) !== thisDA_Nav._sticky_top.offsetTop && thisDA_Nav._isSticky()) {
thisDA_Nav._stickyToggle(false);
}
});
}
// listen to scroll action
DA_Nav.prototype._scrollEventBinding = function () {
return;
}
// construct a sticky top class
function StickyTop(DA_sticky_top) {
this._DA_sticky_top_elem = DA_sticky_top || undefined;
let sticky_top_object = this;
let createStickyTop = function(DA_sticky_top_elem) {
let new_sticky_top = document.createElement("div");
sticky_top_object.showDisplay(DA_sticky_top_elem, new_sticky_top);
new_sticky_top.classList.add("deck-reminder", "active-sticky");
return new_sticky_top;
};
this._sticky_top = createStickyTop(this._DA_sticky_top_elem);
}
StickyTop.prototype.appendToDisplay = function(container, top) {
container = container || document.getElementById("card-display");
top = top || document.querySelector("header").getBoundingClientRect()["height"];
this._sticky_top.style.cssText = ` position: fixed; z-index: 500; top: ${top}px; right: 0; `;
// append to container
container.appendChild(this._sticky_top);
}
StickyTop.prototype.showDisplay = function(DA_sticky_top_elem, sticky_top_container) {
DA_sticky_top_elem = DA_sticky_top_elem || undefined;
sticky_top_container = sticky_top_container || this._sticky_top;
if(DA_sticky_top_elem === undefined)
return false;
let display_title=DA_sticky_top_elem.querySelector(".display-title").cloneNode(true);
let sub_label = DA_sticky_top_elem.querySelector(".display-sub-label").cloneNode(true);
sticky_top_container.innerHTML = "";
sticky_top_container.appendChild(display_title);
sticky_top_container.appendChild(sub_label);
console.log("holo")
return true;
}
// create EL components
// x > 0 .active
// x < 0 :not(.active)
// x == 0 .disabled
function createDP(DP_doc, classStr) {
classStr = "btn-secondary-group" + classStr.replace(" list-group", "");
let btnClassStr = "text-left btn-block";
if(classStr.indexOf("sm") > 0) {
btnClassStr = "text-center";
}
let DP_Group = $("<div></div>").addClass(classStr);
$("#filter-panel > .btn-panel").first().append(DP_Group);
$.getJSON(DP_doc, function(json) {
// create EL components
$.each(json, function(i, item) {
let DP_single = new DP_Tab(item);
let { DP_primary_btn, DP_sub_ul } = DP_single.drawDP(btnClassStr);
DP_Group.append(DP_primary_btn).append(DP_sub_ul);
DP_single.DPCreatingComplete();
});
});
}
// construct DP_filter class
// "DP_id": 1,
// "DP_tag": "illustrate characteristic",
// "DA_sub_tag": "Depict Reality, Exaggerate Reality"
function DP_Tab(DP_object) {
this._created = 0; // if displayed on screen: > 0, if not: 0
this._DP_id = DP_object["DP_id"];
this._DP_tag = DP_object["DP_tag"];
let DP_sub_tag = DP_object["DP_sub_tag"].split(",");
this._DP_sub_tags = DP_sub_tag.map(tag => tag = tag.trim());; // array
}
// Public method
DP_Tab.prototype.drawDP = function(btnClassStr) {
btnClassStr = btnClassStr || "text-left btn-block";
let DP_sub_ul = $("<ul></ul>").addClass("btn-sub-list");
let DP_sub_tags = this._DP_sub_tags;
let DP_primary_btn = $("<button></button>")
.addClass("btn " + btnClassStr)
.addClass("active")
.text(this._DP_tag)
.prepend($("<span></span>"));
DP_sub_tags.forEach(tag => {
let DP_sub_li = $("<li></li>")
.addClass("active")
.attr("id", DP_Tab.DP_abr(tag))
.text(tag);
DP_sub_li.appendTo(DP_sub_ul);
});
return { DP_primary_btn, DP_sub_ul };
}
// Public method
DP_Tab.prototype.DPCreatingComplete = function() {
if(this._created <= 0) {
this._created = new Date().getTime();
this._interactionInit();
return true;
} else {
console.warn(`DP tab ${this._DP_id} has already been created before.`);
return false;
}
}
// Private method
DP_Tab.prototype._DP_abr_list = function() {
let DP_sub_tags = this._DP_sub_tags || [];
let DP_abr_list = [];
DP_sub_tags.forEach(tag => {
tag = tag.toLowerCase() || "dynamic purpose";
let tag_abr = tag.substr(0, 2) + (tag.split(/-|\s/)).length + tag.substr(tag.length-2);
DP_abr_list.push(tag_abr);
});
return DP_abr_list;
}
// Private method
DP_Tab.prototype._interactionInit = function() {
if(this._created) {
this._DP_primary_btn = document.querySelectorAll(".btn-secondary-group > .btn")[this._DP_id-1];
this._DP_sub_ul = document.querySelectorAll(".btn-secondary-group > .btn-sub-list")[this._DP_id-1];
this._DP_sub_li = this._DP_sub_ul.querySelectorAll("li"); // NodeList object
// bind event listener
this._eventBinding();
}
}
// Private method
DP_Tab.prototype._eventBinding = function() {
let thisDPTag = this;
let DP_abr_list = thisDPTag._DP_abr_list();
let DP_sub_ul = thisDPTag._DP_sub_ul;
let DP_sub_li = thisDPTag._DP_sub_li;
// bind hide/visible event to sub buttons
thisDPTag._DP_sub_li.forEach(li => {
let this_DP_abr = li.getAttribute("id");
li.addEventListener("click", function() {
let targetCards;
if(this.classList.contains("active")) {
this.classList.toggle("active", false);
targetCards = document.querySelectorAll(`.${this_DP_abr}:not(.screened-out)`);
targetCards.forEach(node => {
node.classList.add("screened-out");
$(targetCards).fadeTo(400, 0).hide(1, () => {
if(node.querySelector(".card-inner").classList.contains("trans-3d"))
node.querySelector(".card-inner").classList.remove("trans-3d");
});
});
} else {
this.classList.toggle("active", true);
targetCards = document.querySelectorAll(`.${this_DP_abr}.screened-out`);
targetCards.forEach(node => node.classList.remove("screened-out"));
$(targetCards).show(1).fadeTo(600, 1);
}
});
});
// bind hide/visible event to primary buttons
thisDPTag._DP_primary_btn.addEventListener("click", function () {
let targetCards, this_joint_DP_abr;
if(this.classList.contains("active")){
this.classList.toggle("active", false);
DP_sub_li.forEach(li => li.classList.toggle("active", false));
this_joint_DP_abr = DP_abr_list.map(DP_abr => "." + DP_abr + ":not(.screened-out)").join(",");
targetCards = document.querySelectorAll(this_joint_DP_abr);
$(DP_sub_li).slideToggle(160 + 120 * (DP_sub_li.length/1.75), "easeInOutSine");
targetCards.forEach(node => {
node.classList.add("screened-out");
$(node).fadeTo(400, 0).hide(1, () => {
if(node.querySelector(".card-inner").classList.contains("trans-3d"))
node.querySelector(".card-inner").classList.remove("trans-3d");
});
});
} else {
this.classList.toggle("active", true);
DP_sub_li.forEach(li => li.classList.toggle("active", true));
this_joint_DP_abr = DP_abr_list.map(DP_abr => "." + DP_abr + ".screened-out").join(",");
targetCards = document.querySelectorAll(this_joint_DP_abr);
$(DP_sub_li).slideToggle(160 + 160 * (DP_sub_li.length/1.75), "easeInOutSine");
targetCards.forEach(node => node.classList.remove("screened-out"));
$(targetCards).show(1).fadeTo(600, 1);
}
})
// $(DP_sub_ul).slideToggle(140 + 120 * (DP_sub_btn.length/1.75), function() {
// $(DP_btn).toggleClass("active");
// });
}
// Static method
DP_Tab.DP_abr = function(str) {
str = str.toLowerCase() || "dynamic purpose";
return str.substr(0, 2) + (str.split(/-|\s/)).length + str.substr(str.length-2);
}
//create card display
// void return
function createDisplay(cards_doc) {
console.log('start loading cards');
$.getJSON(cards_doc, function(json) {
let doc_length = cards_doc.length;
$.each(json, function(id, card_doc) {
let card_DA = card_doc.DA_nav_tag.toLowerCase();
let card_DA_joint = $.trim(card_DA).split(" ").join("_");
let card = new AIE_Card(card_doc);
$(`#${card_DA_joint} > .card-deck`).append(card.drawCard());
card.cardCreatingComplete();
if(id == doc_length)
console.log("All cards are loaded.");
});
});
// deckDisplay();
scrollToTop();
}
// construct card class
// input card_object:Object()
// card_id card_title DA_nav_tag DA_class_id DA_class_tag DA_desc DP_tag DP_sub_tag DP_desc eg_arr
function AIE_Card(card_object) {
this._created = 0; // if displayed on screen: > 0, if not: 0
this._current_eg_id = 0; // value range: 0, 1, 2
this._card_id = card_object.card_id || 0;
this._card_title = card_object.card_title || "Design Space";
this._DA_nav_id = card_object.card_id || 0;
this._DA_nav_tag = card_object.DA_nav_tag || "dynamic approaches tag";
this._DA_class_id = card_object.DA_class_id || 500;
this._DA_class_tag = card_object.DA_class_tag || "dynamic approaches sub tag";
this._DA_desc = card_object.DA_desc || "Approach Interpretation";
// this.DA_nav_color = this._colorSync(this.DA_class_id, DA_color_hash);
this._DP_tag = card_object.DP_tag || "dynamic purposes";
this._DP_sub_id = card_object.DP_sub_tag.substr(0, 2).trim() || "00";
this._DP_sub_tag = card_object.DP_sub_tag.substr(3).trim() || "Dynamic Purposes Sub Tag";
this._DP_desc = card_object.DP_desc || "Purpose Interpretation";
// this.DP_code = this._DP_abr(DP_sub_tag);
this._eg_arr = card_object.eg_arr || [{"eg_id":"1000", "eg_source":"Video.com", "eg_year":"2020", "eg_designer":"Mr. Designer", "eg_url":"https://www.dribbble.com"},{"eg_id":"1001", "eg_source":"Video.com", "eg_year":"2020", "eg_designer":"Miss Designer", "eg_url":"https://www.dribbble.com"},{"eg_id":"1002", "eg_source":"Video.com", "eg_year":"2020", "eg_designer":"Ms. Designer", "eg_url":"https://www.dribbble.com"}];
this._color_hash = DA_Nav.ColorHash.init();
this._card_color = this._color_hash.get_color(this._DA_class_id);
}
// Private method
// calc card header bg-color
// AIE_Card.prototype._colorSync = function() {
// let DA_class_id = this._DA_class_id || 500;
// let get_color = DA_Nav._color_hash.get_color;
// let card_color = get_color(DA_class_id)
// console.log(card_color);
// return card_color || "#999999";
// }
// AIE_Card.prototype._colorSync = function(hash_list) {
// let DA_class_id = this._DA_class_id || 500;
// hash_list = hash_list || { 500 : "#999999" };
// return hash_list[DA_class_id] || "#999999";
// }
// Private method
AIE_Card.prototype._DP_abr = function() {
let str = this._DP_sub_tag.toLowerCase() || "dynamic purpose";
return str.substr(0, 2) + (str.split(/-|\s/)).length + str.substr(str.length-2);
}
// Private method
AIE_Card.prototype._getEgLength = function() {
return this._eg_arr.length || 0;
}
// Private method
// record a card as 'created' after being put on screen
AIE_Card.prototype.cardCreatingComplete = function() {
if(this._created <= 0) {
this._created = new Date().getTime();
this._interactionInit();
return true;
} else {
console.warn(`Card No.${this._card_id} has already been created before.`);
return false;
}
}
// Private method
// initiate interactive parts
AIE_Card.prototype._interactionInit = function() {
if(this._created) {
this._Card = $(`[name='card_${this._card_id}']`).get(0);
let CardFront = $(this._Card).find(".front").get(0);
let CardBack = $(this._Card).find(".back").get(0);
this._FrontGif = $(CardFront).find(".card-frontImg").get(0);
// this._FrontTurningBtn = $(CardFront).find(".card-footer > .btn").get(0);
this._BackCarousel = $(CardBack).find(".carousel").get(0);
this._BackCaption = $(CardBack).find(".caption").get(0);
// this._BackTurningBtn = $(CardBack).find(".card-footer > .btn").get(0);
this._CarouselControlPrev = $(this._BackCarousel).find(".carousel-control-prev").get(0);
this._CarouselControlNext = $(this._BackCarousel).find(".carousel-control-next").get(0);
this._CarouselFullScreen = $(CardBack).find(".img-overlay").get(0);
this._CardTurningBtns = $(this._Card).find(".card-footer > .btn");
// bind event listener
this._eventBinding();
}
}
AIE_Card.prototype._eventBinding = function() {
let thisCard = this; // data object
let Card = thisCard._Card; // DOM object
let CardInner = $(thisCard._Card).find(".card-inner").get(0);
let modalWindowCarousel = $("#carouselModal").get(0); // carousel in modal frame
let frontImg = $(thisCard._FrontGif).get(0);
// bind with footer buttons
$(thisCard._CardTurningBtns).click(function() {
$(CardInner).toggleClass("trans-3d");
});
// bind with carousel
$(thisCard._BackCarousel).on("slide.bs.carousel", function(event) {
// event.direction = "left" / "right"
let aim_eg_id = thisCard._carouselChangeId(event.direction);
let aim_eg_info = thisCard._eg_arr[aim_eg_id];
let aim_eg_designer = thisCard.__appendCaption("Designer", aim_eg_info["eg_designer"]);
let aim_eg_year = thisCard.__appendCaption("Year", aim_eg_info["eg_year"]);
let aim_eg_url = $("<a></a>").attr({"href": aim_eg_info["eg_url"], "target": "_blank"}).addClass("text-decoration-none").text("URL");
let caption = thisCard._BackCaption;
$(caption).fadeOut("fast", function() {
$(caption).empty();
$(caption)
.append(aim_eg_designer)
.append(aim_eg_year)
.append($("<div></div>").append(aim_eg_url));
$(caption).fadeIn("normal");
});
})
// bind with modal window
$(thisCard._CarouselFullScreen).on("click", function(event) {
$(modalWindowCarousel).attr("data-provider", $(Card).attr("name"));
let eg_info = thisCard._eg_arr;
let current_eg_id = thisCard._current_eg_id;
let carouselInner = $(modalWindowCarousel).find(".carousel-inner").get(0);
$(carouselInner).empty();
eg_info.forEach(function(eg, index, arr) {
let gif_ori_path = `./assets/back_gif_compressed/back_${eg["eg_id"]}.gif`;
let carouselImg = $("<img />")
.addClass("d-block")
.attr("src", gif_ori_path);
let carouselItem = $("<div></div>").addClass("carousel-item").append(carouselImg);
if(index === current_eg_id)
carouselItem.addClass("active");
carouselItem.appendTo(carouselInner);
});
});
// bind with gif hover listener
// const ffGif = new Freezeframe($(frontImg), {
// trigger: "hover",
// overlay: false,
// responsive: true,
// warnings: false
// });
// bind front img preview
$(frontImg).hover(
// hover in
function() {
$(frontImg).children(".front-prev").css("opacity", 0);
$(frontImg).removeClass("inactive");
},
// hover out
function() {
let gif_path = $($(frontImg).children(".front-gif").get(0)).attr("src");
$(frontImg).children(".front-prev").fadeTo("fast", 1, function() {
$(frontImg).addClass("inactive");
$($(frontImg).children(".front-gif").get(0)).attr( "src", gif_path );
});
}
);
}
// Public method
// get carousel gif doc name array
// AIE_Card.prototype.getEgGifArray = function() {
// let eg_gif_array = this._eg_arr.map(eg => {
// let eg_id = eg["eg_id"] || 0;
// return `back_${eg_id}.gif`;
// });
// return eg_gif_array;
// }
// Private method
// carousel backward/forward button response
// direction: right -> 1, left -> 0
AIE_Card.prototype._carouselChangeId = function(direction) {
direction = direction || 1;
// get current example ID
let current_eg_id = this._current_eg_id;
let eg_length = this._getEgLength();
let aim_eg_id = current_eg_id;
if(direction === "right")
// prev_eg_id
aim_eg_id = parseInt(((current_eg_id + eg_length - 1) % eg_length))
else if(direction === "left")
// next_eg_id
aim_eg_id = parseInt((current_eg_id + 1) % eg_length);
this._current_eg_id = aim_eg_id;
//return a 'Map type' example
return aim_eg_id;
// let aim_eg = this._eg_arr[aim_eg_id];
// change caption info
// ... ...
}
// *** CARD DRAWING PROCESS ***
// Public method
AIE_Card.prototype.drawCard = function() {
let DP_code = this._DP_abr();
let innerCard = $("<div></div>").addClass("card-inner")
.append(this._cardFront())
.append(this._cardBack());
// return a single card element
return $("<div></div>").addClass("col mb-5 position-relative")
.addClass(DP_code)
.attr("name", "card_" + this._card_id)
.append(innerCard);
}
// Private method
AIE_Card.prototype._cardFront = function() {
let front_elem = $("<div></div>").addClass("card shadow front");
let card_header = this.__cardHeader();
let front_gif = $("<img />").addClass("card-img front-gif")
.attr({
// src: "assets/media/loading_light.svg",
// "data-echo": "assets/front_gif/front_" + card_id + ".gif",
// "onerror": "assets/media/fail_loading_light.svg"
src: `./assets/front_gif_preview/front_${this._card_id}.gif`
});
let front_gif_prev = $("<img />").addClass("card-img front-prev")
.attr({
// src: "assets/media/loading_light.svg",
// "data-echo": "assets/front_gif/front_" + card_id + ".gif",
// "onerror": "assets/media/fail_loading_light.svg"
src: `./assets/front_prev/static_${this._card_id}.jpg`
});
// let prevImg = $("<img />").addClass("card-frontPrev")
// .attr({
// src: "assets/media/loading_light.svg",
// "data-target": "assets/front_gif_preview/front_" + card_id + ".png",
// "onerror": "assets/media/fail_loading_light.svg"
// });
let front_card_img = $("<div></div>")
.addClass("card-frontImg inactive")
.append(front_gif)
.append(front_gif_prev);
let front_card_body = this.__cardFrontBody();
let card_footer = this.__cardFooter(1);
// return card front part
// frontElem.append(card_header).append(prevImg).append(frontGif).append(card_body).append(card_footer);
return front_elem.append(card_header).append(front_card_img).append(front_card_body).append(card_footer);
}
// Private method
AIE_Card.prototype._cardBack = function() {
let back_elem = $("<div></div>").addClass("card shadow back");
let card_header = this.__cardHeader();
let back_gif_carousel = this.__cardBackCarousel(this._current_eg_id);
let back_card_body = this.__cardBackBody(this._current_eg_id);
let card_footer = this.__cardFooter(-1);
// return card back part
return back_elem
.append(card_header)
.append(back_gif_carousel)
.append(back_card_body)
.append(card_footer);
}
// Private method
AIE_Card.prototype.__cardHeader = function() {
let DP_sub_tag = this._DP_sub_tag;
let card_color = this._card_color;
let header_elem = $("<div></div>").addClass("card-header");
let head_title = $("<h4></h4>").text(this._card_title);
let head_p = $("<p></p>").text(DP_sub_tag);
// return card header
return header_elem
.css("background", card_color)
.append(head_title)
.append(head_p);
// .append($("<span></span>").css({
// background: "url(assets/media/in" + EL_abr(EL_tag) + ".svg) no-repeat",
// "background-size": "cover"
// }));
}
// Private method
// x: >0 -> front, <=0 -> back
AIE_Card.prototype.__cardFooter = function(x) {
x = x || 1;
let card_bottom = $("<div></div>").addClass("card-footer");
let card_footer_button = $("<button></button>").addClass("btn btn-sm rounded-pill");
if(x > 0 ) {
card_footer_button.text("Examples");
// let counter = $("<span></span>").addClass("card-num").text("NO. " + card_id);
card_bottom.append(card_footer_button);
// .append(counter);
} else {
card_footer_button.text("Back to Front");
// let superLink = $("<a></a>").attr({"href": url, target: "_blank"}).addClass("text-decoration-none").text("URL");
card_bottom.append(card_footer_button);
// .append(superLink);
}
// return card footer
return card_bottom;
}
// Private method
AIE_Card.prototype.__cardFrontBody = function() {
let front_body_elem = $("<div></div>").addClass("card-body");
let approach_id = _prefixZero(this._DA_nav_id, 2);
let approach_title = `Approach : ${approach_id} ${this._card_title}`;
let purpose_id = _prefixZero(this._DP_sub_id, 2);
let purpose_title = `Purpose : ${purpose_id} ${this._DP_sub_tag}`;
front_body_elem.append(
$("<div></div>").addClass("card-subtitle").text(approach_title)
).append(
$("<p></p>").addClass("card-text").text(this._DA_desc)
).append(
$("<div></div>").addClass("card-subtitle").text(purpose_title)
)
.append(
$("<p></p>").addClass("card-text").text(this._DP_desc)
);
// return card front body
return front_body_elem;
}
// Private method
AIE_Card.prototype.__cardBackBody = function(current_eg_id) {
current_eg_id = current_eg_id || 0;
let back_body_elem = $("<div></div>").addClass("card-body");
let designer = this._eg_arr[current_eg_id]["eg_designer"] || "Mr. Designer";
let year = this._eg_arr[current_eg_id]["eg_year"] || "2020";
let url = this._eg_arr[current_eg_id]["eg_url"] || "https://www.dribbble.com";
let super_link = $("<a></a>").attr({"href": url, "target": "_blank"}).addClass("text-decoration-none").text("URL");
let caption = $("<div></div>").addClass("caption")
.append(this.__appendCaption("Designer", designer))
.append(this.__appendCaption("Year", year))
.append($("<div></div>").append(super_link));
// return card back body
return back_body_elem.append(caption);
}
// *** CARD BACK DRAWING ***
// Private method
// current_eg_id -> start index : 0, 1, 2 ... ...
AIE_Card.prototype.__cardBackCarousel = function(current_eg_id) {
current_eg_id = current_eg_id || 0;
let back_img = $("<div></div>").addClass("card-img-box position-relative");
let carousel = $("<div></div>")
.addClass("card-img carousel slide")
.attr({
"id": "eg-carousel-" + this._card_id,
"data-ride": "carousel",
"data-interval": "false"
});
let cover = $("<div></div>")
.addClass("img-cover")
.append(
$("<div></div>").addClass("mask position-absolute")
).append(
$("<span></span>").addClass("img-overlay").attr("type", "button")
);
let carousel_inner = $("<div></div>").addClass("carousel-inner");
this._eg_arr.forEach(function(eg, index, arr) {
let eg_id = eg["eg_id"];
// let eg_gif_path = `./assets/back_gif/back_${eg_id}.gif`;
let eg_gif_path = `./assets/back_gif_compressed/back_${eg_id}.gif`;
let carousel_item = $("<div></div>")
.addClass("carousel-item")
.append($("<img />").addClass("d-block").attr("src", eg_gif_path));
if(index === current_eg_id)
carousel_item.addClass("active");
carousel_item.appendTo(carousel_inner);
});
carousel.append(carousel_inner);
// direction: previous / next;
let carousel_control = function(direction, card_id) {
direction = direction.toLowerCase() || "next";
let direc = direction.substr(0, 4);
return $("<a></a>")
.addClass("carousel-control-" + direc)
.attr({
"href": "#eg-carousel-" + card_id,
"role": "button",
"data-slide": direc
}).append(
$("<span></span>").addClass(`carousel-control-${direc}-icon`).attr("aria-hidden", "true")
).append(
$("<span></span>").addClass("sr-only").text(direction)
);
}
let carousel_control_prev = carousel_control("previous", this._card_id);
let carousel_control_next = carousel_control("next", this._card_id);
// return all gif within one carousel
return back_img.append(
carousel.append(carousel_control_prev).append(carousel_control_next)
).append(cover);
}
// Private method
AIE_Card.prototype.__appendCaption = function(key, content) {
key = key || "Caption keyword";
content = content || "Caption content."
// return a single caption to the back of the card
return `<div><span>${key}: </span>${content}</div>`;
}
// make 9 to 09
function _prefixZero(num, n) {
num = num || 0;
n = n || 2;
return (Array(n).join(0) + num).slice(-n);
}
// activate / inactivate DP primary filter
function DP_filter() {
// $("input.form-control").val("Search");
// $(".search-result").fadeOut("fast", function(){
// $(this).text("");
// $(this).show(1);
// });
let DP_btn = this;
let DP_sub_chosen = $(DP_btn).hasClass("active") ? ".active" : "";
let DP_sub_ul = $(DP_btn).next().get(0);
let DP_sub_btn = $(DP_sub_ul).children(DP_sub_chosen);
$(DP_sub_ul).slideToggle(140 + 120 * (DP_sub_btn.length/1.75), function() {
$(DP_btn).toggleClass("active");
});
$(DP_sub_btn).each(function(index, btn) {
$(btn).trigger("click");
});
}
//activate / inactivate DP sub filter
function DP_sub_filter() {
console.log("sub_filter:", new Date().getTime());
// $("input.form-control").val("Search");
// $(".search-result").fadeOut("fast", function(){
// $(this).text("");
// $(this).show(1);
// });
let DP_sub_tag = $(this).attr("id");
if($(this).hasClass("active")){
$(this).removeClass("active");
// turn back card
// $(`.${EL_tag} > .back:visible .btn`).click();
// $(`.${DP_sub_tag} > .card-inner.trans-3d`).removeClass("trans-3d");
//check scroll panel
if(DP_sub_tag) {
console.log(-1);
scrollCheck(DP_sub_tag, -1);
}
} else {
// need rectification
if($(".btn-primary-group > .btn.active").length == 0 || $(".btn-primary-group-sm > .btn.active").length == 0) {
$(".btn-primary-group > .btn:first-child").addClass("active");
$(".btn-primary-group-sm > .btn:first-child").addClass("active");
}
$(this).addClass("active");
//check scroll panel
if(DP_sub_tag) {
console.log(1);
scrollCheck(DP_sub_tag, 1);
}
}
// deckDisplay();
}
// check scroll panel and para descriptions
function scrollCheck(DP_sub_tag, x) {
DP_sub_tag = DP_sub_tag || "";
x = x || 1;
if(x < 0) {
// console.log("x<1", new Date().getTime())
$(`#card-display .${DP_sub_tag}:visible`).addClass("to-fade");
// $(".to-fade").each(function(index, elem) {
// console.log($(elem).attr("name"));
// })
$(".card-deck").each(function(index, elem){
// elem: a single card deck
let DA_tag = $($(elem).parent().get(0)).attr("id");
if($(elem).children(':visible:not(.to-fade)').length === 0) {
console.log("Here for ", DA_tag);
$("#" + DA_tag).fadeOut("normal", () => {
DP_fitting();
// $(`.${DP_sub_tag} > .card-inner.trans-3d`).removeClass("trans-3d");
$(this).find(".card-inner.trans-3d").removeClass("trans-3d");
});
$("." + DA_tag).addClass("disabled");
} else {
$("#card-display ." + DP_sub_tag).fadeOut(400, function() {
$(this).find(".card-inner.trans-3d").removeClass("trans-3d");
});
}
// $(elem).children(".to-fade").removeClass("to-fade");
});
$(".to-fade").removeClass("to-fade");
} else {
$("#card-display ." + DP_sub_tag).each(function(index, elem){
// elem: a single card
let targetSet = $(elem).parentsUntil("#card-display");
let NS_tag = $(targetSet[targetSet.length-1]).attr("id");
$(".disabled." + NS_tag).removeClass("disabled");
$(`#${NS_tag}:hidden:not(.to-show)`).addClass("to-show");
$(elem).fadeIn("slow");
});
DP_fitting();
$(".to-show").fadeIn("normal", function(){
$("#card-display > .to-show").removeClass("to-show");
});
}
// NS_active_fitting();
}
// make DA fitting to display pattern
function DP_fitting() {
if($("#card-display > div:visible").length === 0) {
$(".btn-primary-group > .btn.active").removeClass("active");
$(".btn-primary-group-sm > .btn.active").removeClass("active");
$(".search-fail").fadeIn("normal");
} else {
$(".search-fail").css("display", "none");
}
}
// avoid NS .disabled.active
function DA_active_fitting() {
var targetSet = $(".btn-primary-group").find(".disabled.active") || $(".btn-primary-group-sm").find(".disabled.active");
// length only equals 1 / 0
if(targetSet.length > 0) {
$(targetSet[0]).removeClass("active");
var nextSet = $(targetSet[0]).nextAll(".btn:not(.disabled)");
var preSet = $(targetSet[0]).prevAll(".btn:not(.disabled)");
if(preSet.length > 0) {
// $(preSet[0]).click();
$(preSet[0]).trigger("click");
return ;
} else if(nextSet.length > 0) {
// $(nextSet[0]).click();
console.log("next");
$(nextSet[0]).trigger("click");
return ;
} else {
// $("#card-display").text("Sorry, you haven't chosen any Editorial Layers yet~");
$(".btn-primary-group > .btn").removeClass("active");
$(".btn-primary-group-sm > .btn").removeClass("active");
}
}
}
// NS buttons control #card-display
function DA_scroller() {
// var screenH = $(window).height() - $("#card-display").offset().top;
var targetId = $(this).attr("href");
var target = $(targetId).position().top + $("#card-display").height() - $("#card-display").outerHeight();
// $(this).parent().find(".active").removeClass("active");
// $(this).addClass("active");
$('html, body').animate({scrollTop: target}, 800, "easeInOutQuart");
}
// spy on display scrolling action
function displaySpy() {
let screenH = $(window).height() - $("#card-display").offset().top; // if screen height is very limited - > bug $("#card-display").outerHeight() + $("#card-display").height();
let DA_class = ".btn-primary-group";
if($(DA_class).length <= 0)
DA_class = ".btn-primary-group-sm";
$("#card-display").children(":not(.search-fail)").each(function(i, item){
let currentPosition = $(item).position().top - $(window).scrollTop();
if($("." + $(item).attr("id")).is(":not(.active)") && (currentPosition < 0.5*screenH) && (($(item).height() + currentPosition) >= 0.5*screenH)) {
$(`${DA_class} > .btn.active`).removeClass("active");
$(`${DA_class} > .btn:not(.disabled).` + $(item).attr("id")).addClass("active");
// $(".btn-primary-group-sm > .btn.active").removeClass("active");
// $(".btn-primary-group-sm > .btn:not(.disabled)." + $(item).attr("id")).addClass("active");
// deck-reminder info preloading
// $(".deck-reminder").empty();
// $($(item).find(".display-title").get(0)).clone(false).appendTo(".deck-reminder");
// $($(item).find(".display-sub-label").get(0)).clone(false).appendTo(".deck-reminder");
// console.log("once")
}
});
}
// listen to reminder div beneath each card-deck
function reminderSpy() {
// const windowTop = parseInt(Math.round(window.pageYOffset));
let nav = document.querySelector("header");
// let displayHeight = window.innerHeight - nav.offsetHeight;
let current_active_sticky =document.querySelector(".deck-reminder.active-sticky");
let allReminders = Array.from(document.querySelectorAll(".deck-reminder"));
allReminders.some(function(sticky, index, nodeList) {
let reminderToHeader = parseInt(Math.round(sticky.getBoundingClientRect().top)) - nav.offsetHeight;
if(sticky.classList.contains("active-sticky")) {
if(sticky.getBoundingClientRect().bottom <= sticky.nextElementSibling.getBoundingClientRect().top + 5) {
console.log("A");
// console.log(index+1, reminderToHeader);
sticky.classList.remove("active-sticky");
$($(sticky).find(".display-desc").get(0)).slideDown(360);
}
return false;
}
// if(current_active_sticky && (reminderToHeader > (current_active_sticky.offsetHeight + sticky.offsetHeight))) {
if(current_active_sticky && (reminderToHeader >= 1)) {
// console.log("A");
// sticky.classList.remove("active-sticky");
// console.log(index+1, reminderToHeader);
// console.log("B");
$($(sticky).find(".display-desc").get(0)).slideDown(360);
// return false;
}
// if(Math.abs(reminderToHeader) < 5) {
if(Math.abs(reminderToHeader) < 1) {
// console.log(index+1, reminderToHeader);
// console.log("C");
$($(sticky).find(".display-desc").get(0)).slideUp(360);
sticky.classList.add("active-sticky");
if(current_active_sticky) {
current_active_sticky.classList.remove("active-sticky");
}
return true;
}
});
}
function searchFunc() {
var show_list = [];
console.log("Ready to search.");
var read = $("input.form-control").val().toString() || "";
if(read.toLowerCase() == "search") read = "";
readRegOrigin = read.replace(/[.,:;·'"\(\)\[\]\{\}\\\/\|]/g, " ").replace(/\s+/g, " ");
readRegOrigin = $.each((readRegOrigin.split(" ")), function(item){return $.trim(item);});
var readReg = readRegOrigin.filter(function(item, index, arr) {
return arr.indexOf(item, 0) === index;
});
console.log("Search for:", readReg);
if(readReg.length > 0 && (readReg[0] != ("" | " "))) {
//transform string to regexExp
var rex = new RegExp(readReg.join("|"), "ig");
// $.ajaxSettings.async = false;
$.getJSON(card_doc, function(json) {
const doc_length = json.length;
let flag = false;
//get to-be-hidden number array
// $.ajaxSettings.async = false;
$.each(json, function(i, item) {
delete item.card_id;
delete item.eg_arr;
delete item.DA_class_id;
let itemDoc = (Object.values(item)).join(" ");
if(itemDoc.search(rex) > -1) {
show_list.push(`card_${i+1}`);
}
if(i === (doc_length-1)) {
flag = true;
console.log("Search finished");
}
});
if(flag && (show_list.length > 0)) {
console.log(`${show_list.length} results were found: `, show_list);
show_list.forEach(card_name => $(`[name="${card_name}"]`).addClass("as-result"));
$(".btn-sub-list:hidden").slideDown(function() {
$(".btn-secondary-group > .btn").addClass("active"); //activate DP
$(".btn-sub-list > li").addClass("active"); //activate DP
});
$("#card-display > div").fadeOut("normal", function() {
if($(this).is($("#card-display > div").last())) {
searchResultDisplay();
}
$(".search-result").text(`${show_list.length} result${show_list.length > 1 ? "s" : ""}`);
});
} else {
console.log("Nothing found.");
$("#card-display > div").fadeOut("normal", function() {
$(".search-fail").fadeIn("fast");
$(".card-deck > div").fadeOut("normal");
})
$(".search-result").text("0 result");
$(".btn-primary-group > .btn").removeClass("active").addClass("disabled");
$(".btn-primary-group-sm > .btn").removeClass("active").addClass("disabled");
}
});
} else {
$(".search-fail").fadeOut("normal");
if($(".card-deck > div:visible").length == $(".card-deck > div").length) return ;
$("#card-display > div").fadeOut("normal", function() {
$(".card-deck > div").css("display", "block");
$("#card-display > div").fadeIn("normal");
});
$(".btn-primary-group > .btn").removeClass("disabled");
$(".btn-primary-group-sm > .btn").removeClass("disabled");
$(".btn-secondary-group > .btn").addClass("active");
$(".btn-secondary-group-sm > .btn").addClass("active");
$(".search-result").text("");
}
scrollToTop();
}
// layout after searching
function searchResultDisplay() {
$(".card-deck > div").css("display", "none");
$(".card-deck > .as-result").each(function(index, elem) {
let targetSet = $(elem).parentsUntil("#card-display");
let NS_tag = $(targetSet[targetSet.length-1]).attr("id");
$(".disabled." + NS_tag).removeClass("disabled");
$(`#${NS_tag}:not(.as-result)`).addClass("as-result");
$(elem).css("display", "block");
});
$("#card-display > .as-result").fadeIn("normal", function(){
$("#card-display .as-result").removeClass("as-result");
$("#card-display > div:hidden").each(function(index, NS_elem) {
let NS_tag = $(NS_elem).attr("id");
$("." + NS_tag).removeClass("active").addClass("disabled");
});
});
}
// set filter panel
function panelLayout() {
let bannerHeight = $("header").outerHeight();
let panel = $("#filter-panel");
panel.css({
// "position": "sticky",
// "overflow-y": "auto",
// "z-index": 500,
"top": bannerHeight + 1
});
if($(window).outerWidth() >= 768) {
panel.css("height", ($(window).outerHeight() - bannerHeight -1));
return " list-group";
} else {
panel.css("height", "100%");
return "-sm";
}
}
// check NS - Card display relationship
function deckDisplay(list, idString) {
idString = idString || "";
list = list || [];
$("#card-display > div").slideDown(1);
// $(".trans-3d").hide(1);
// $.map(list, function(num) {
// $(idString + " [name=\'card_" + num + "\']").show("fast");
// });
// $("#card-display > div").each(function(i, part) {
// if($(part).find(".trans-3d:visible").length == 0) {
// $(part).slideUp("fast");
// $("." + $(part).attr("id") + ":not(disabled)").addClass("disabled");
// } else {
// $("." + $(part).attr("id")).removeClass("disabled");
// }
// });
// $(".btn-primary-group a").removeClass("active");
// $(".btn-primary-group a:not(.disabled):first-child").addClass("active");
}
// fade in full-screen button
function fullScreenOver(){
$($(this).children(".img-cover")[0]).fadeIn(180);
}
// fade out full-screen button
function fullScreenOut() {
$($(this).children(".img-cover")[0]).fadeOut(240);
}
function scrollToTop() {
$(".btn-primary-group > .btn").first().trigger("click");
}
// fill modal window info
function modalInfo() {
var untilMain = $(this).parentsUntil(".card-deck");
var thisCard = $(untilMain[untilMain.length - 1]);
// var bgColor = $(thisCard.find(".card-header")[0]).css("background");
// var modalTitle = $(thisCard.find("h6")[0]).text();
// var modalURL = $(thisCard.find("a")[0]).attr("href");
var modalNum = $(thisCard).attr("name").substr(5);
// var modalNum = $(thisCard.find(".card-num")[0]).text().substr(4);
// var modalSource = $($(thisCard.find(".caption")[0]).children()[0]).text().replace("Source:", " - ");
// $(".modal-content").css("background", bgColor);
// $(".modal-title").text(modalTitle).attr("href", modalURL);
// $(".modal-header > span").text(modalSource);
// $(".modal-body > img").attr({
// src: "assets/media/loading_light.svg",
// "data-echo": "assets/back_gif/back_" + modalNum + ".gif",
// "onerror": "assets/media/fail_loading_light.svg"
// src: "./assets/back_gif/" + modalNum + ".gif"
// });
}
// action easing for scrolling
jQuery.extend( jQuery.easing,
{
easeInSine: function (x, t, b, c, d) {
return -c * Math.cos(t/d * (Math.PI/2)) + c + b;
},
easeOutSine: function (x, t, b, c, d) {
return c * Math.sin(t/d * (Math.PI/2)) + b;
},
easeInOutSine: function (x, t, b, c, d) {
return -c/2 * (Math.cos(Math.PI*t/d) - 1) + b;
},
easeInQuart: function (x, t, b, c, d) {
return c*(t/=d)*t*t*t + b;
},
easeOutQuart: function (x, t, b, c, d) {
return -c * ((t=t/d-1)*t*t*t - 1) + b;
},
easeInOutQuart: function (x, t, b, c, d) {
if ((t/=d/2) < 1) return c/2*t*t*t*t + b;
return -c/2 * ((t-=2)*t*t*t - 2) + b;
},
});
|
// Created by Adam Kaplan on 8/2/15.
// Copyright 2015 Yahoo.
// Licensed under the terms of the MIT License. See LICENSE file in the project root.
#import <Mantle/Mantle.h>
@interface Stock : MTLModel <MTLJSONSerializing>
@property (nonatomic, readonly) NSString *symbol;
@property (nonatomic, readonly) NSString *name;
@property (nonatomic, readonly) NSNumber *last;
- (instancetype)initWithSymbol:(NSString *)symbol name:(NSString *)name last:(NSNumber *)last;
@end
|
// ____ ______ __
// / __ \ / ____// /
// / /_/ // / / /
// / ____// /___ / /___ PixInsight Class Library
// /_/ \____//_____/ PCL 2.4.9
// ----------------------------------------------------------------------------
// pcl/MercatorProjection.h - Released 2021-04-09T19:40:59Z
// ----------------------------------------------------------------------------
// This file is part of the PixInsight Class Library (PCL).
// PCL is a multiplatform C++ framework for development of PixInsight modules.
//
// Copyright (c) 2003-2021 Pleiades Astrophoto S.L. All Rights Reserved.
//
// Redistribution and use in both source and binary forms, with or without
// modification, is permitted provided that the following conditions are met:
//
// 1. All redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. All redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the names "PixInsight" and "Pleiades Astrophoto", nor the names
// of their contributors, may be used to endorse or promote products derived
// from this software without specific prior written permission. For written
// permission, please contact info@pixinsight.com.
//
// 4. All products derived from this software, in any form whatsoever, must
// reproduce the following acknowledgment in the end-user documentation
// and/or other materials provided with the product:
//
// "This product is based on software from the PixInsight project, developed
// by Pleiades Astrophoto and its contributors (https://pixinsight.com/)."
//
// Alternatively, if that is where third-party acknowledgments normally
// appear, this acknowledgment must be reproduced in the product itself.
//
// THIS SOFTWARE IS PROVIDED BY PLEIADES ASTROPHOTO AND ITS CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PLEIADES ASTROPHOTO OR ITS
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, BUSINESS
// INTERRUPTION; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; AND LOSS OF USE,
// DATA OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// ----------------------------------------------------------------------------
#ifndef __PCL_MercatorProjection_h
#define __PCL_MercatorProjection_h
/// \file pcl/MercatorProjection.h
#include <pcl/Defs.h>
#include <pcl/ProjectionBase.h>
/*
* Based on original work contributed by Andrés del Pozo.
*/
namespace pcl
{
// ----------------------------------------------------------------------------
/*!
* \class MercatorProjection
* \brief Mercator projection system
*
* \ingroup astrometry_support
*/
class PCL_CLASS MercatorProjection : public ProjectionBase
{
public:
/*!
* Default constructor.
*/
MercatorProjection() = default;
/*!
* Copy constructor.
*/
MercatorProjection( const MercatorProjection& ) = default;
/*!
* Returns a dynamically allocated duplicate of this object.
*/
ProjectionBase* Clone() const override
{
return new MercatorProjection( *this );
}
/*!
* Returns the WCS projection identifier for this projection system.
*/
IsoString ProjCode() const override
{
return "MER";
}
/*!
* Returns the readable name of this projection system.
*/
IsoString Name() const override
{
return "Mercator";
}
protected:
bool Project( DPoint& pW, const DPoint& pN ) const noexcept override;
bool Unproject( DPoint& pN, const DPoint& pW ) const noexcept override;
private:
double m_r0 = Const<double>::deg();
double m_x0 = 0;
double m_y0 = 0;
};
// ----------------------------------------------------------------------------
} // pcl
#endif // __PCL_MercatorProjection_h
// ----------------------------------------------------------------------------
// EOF pcl/MercatorProjection.h - Released 2021-04-09T19:40:59Z
|
//
// TKReachability+Private.h
// TravelKit
//
// Created by Michal Zelinka on 23/05/17.
// Copyright © 2017 Tripomatic. All rights reserved.
//
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
typedef NS_ENUM(NSUInteger, TKNetworkStatus) {
TKNetworkStatusNotReachable = 0,
TKNetworkStatusReachableViaWiFi,
TKNetworkStatusReachableViaWWAN,
};
typedef NS_ENUM(NSUInteger, TKConnectionCellularType) {
TKConnectionCellularTypeUnknown = 0,
TKConnectionCellularType2G,
TKConnectionCellularType3G,
TKConnectionCellularTypeLTE,
};
@interface TKReachability : NSObject
+ (BOOL)isConnected;
+ (BOOL)isCellular;
+ (BOOL)isWifi;
#if TARGET_OS_IOS
+ (TKConnectionCellularType)cellularType;
#endif
@end
NS_ASSUME_NONNULL_END
|
"use strict";
function expand(element) {
// only if necessary
const expanded = JSON.parse(element.getAttribute("aria-expanded"));
if (!expanded)
element.click();
}
function get_all_users() {
const users = document.getElementsByClassName("userName--6aS3s");
return users;
}
function get_current_user() {
const users = get_all_users();
for (let user of users) {
// match with "You" in aria-label
const aria_labels = user.getAttribute("aria-label").split(" ");
if (aria_labels.indexOf("You") > -1 || aria_labels.indexOf("Sie") > -1)
return user;
}
// default return first user
console.log("can't find current user; defaulting to first user.");
return users[0];
}
function get_status(user) {
const parent = user.parentElement;
const avatar = parent.getElementsByClassName("avatar--Z2lyL8K")[0];
const avatar_status = avatar.children[1];
// none status if no icon
if (avatar_status.childElementCount == 0)
return 1;
// other status according to displayed icon
const avatar_icon = avatar_status.children[0];
for (let icon in status_icons)
if (avatar_icon.classList.contains(icon))
return status_icons[icon];
throw new Error("unable to find status icon");
}
// get most popular status under all users
function get_best_status(forbidden_statuses) {
const users = get_all_users();
let scores = [
[1, 0],
[2, 0],
[3, 0],
[4, 0],
[5, 0],
[6, 0],
[7, 0],
[8, 0],
[9, 0],
[10, 0],
];
for (let user of users) {
const status = get_status(user);
// forbidden statuses get ignored
if (!forbidden_statuses[status])
scores[get_status(user) - 1][1]++;
}
scores.sort((a, b) => {
return b[1] - a[1];
});
return scores[0][0];
}
function update_status(status) {
const current_user = get_current_user();
// already satisfied?
let current_status = get_status(current_user);
if (status == current_status)
return;
expand(current_user);
// get pallet
const tertiary_parent = current_user.parentElement
.parentElement.parentElement;
const pallet = tertiary_parent.getElementsByClassName("verticalList--Ghtxj")[0];
const pallet_options = pallet.children;
if (
// should be collapsed?
(status == 1 && pallet_options.length == 11) ||
// should be expanded?
(status != 1 && pallet_options.length != 11))
pallet_options[0].click();
// click status icon or remove status button
pallet_options[status].click();
}
const status_icons = {
"icon-bbb-time": 2,
"icon-bbb-hand": 3,
"icon-bbb-undecided": 4,
"icon-bbb-confused": 5,
"icon-bbb-sad": 6,
"icon-bbb-happy": 7,
"icon-bbb-applause": 8,
"icon-bbb-thumbs_up": 9,
"icon-bbb-thumbs_down": 10,
};
// todo: debug
document.body.style.border = "5px solid green";
// handle content message
browser.runtime.onMessage.addListener((msg) => {
switch (msg.command) {
case "update_status": {
update_status(msg.status);
break;
}
case "blend_in": {
let best_status = get_best_status(msg.forbidden_statuses);
update_status(best_status);
console.log("best status is: " + best_status);
break;
}
case "toggle_raise": {
const current_user = get_current_user();
const current_status = get_status(current_user);
if (current_status == 3)
update_status(1);
else
update_status(3);
}
}
});
//# sourceMappingURL=content.js.map
|
# This first line is provided for you
hours = input("Enter Hours:")
payRate = input("Enter Pay Rate:")
print('Pay: ' + str(int(hours) * float(payRate)))
|
const db = require('../db');
async function getAllUser() {
const users = await db('users').select();
return users;
}
async function getUser(criteria) {
const user = await db('users')
.select()
.where(criteria)
.first();
return user;
}
async function createUser(user) {
const id = await db('users').insert(user);
return id;
}
async function updateUser({ criteria, data }) {
return db('users')
.where(criteria)
.update(data);
}
async function destroyUser(criteria) {
return db('users')
.where(criteria)
.delete();
}
module.exports = { getAllUser, getUser, createUser, updateUser, destroyUser };
|
/*
* V4L2 video capture example, modified by Derek Molloy for the Logitech C920 camera
* Modifications, added the -F mode for H264 capture and associated help detail
* www.derekmolloy.ie
*
* V4L2 video capture example
*
* This program can be used and distributed without restrictions.
*
* This program is provided with the V4L2 API
* see http://linuxtv.org/docs.php for more information
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <getopt.h> /* getopt_long() */
#include <fcntl.h> /* low-level i/o */
#include <unistd.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <linux/videodev2.h>
#define CLEAR(x) memset(&(x), 0, sizeof(x))
enum io_method {
IO_METHOD_READ,
IO_METHOD_MMAP,
IO_METHOD_USERPTR,
};
struct buffer {
void *start;
size_t length;
};
static char *dev_name;
static enum io_method io = IO_METHOD_MMAP;
static int fd = -1;
struct buffer *buffers;
static unsigned int n_buffers;
static int out_buf;
static int force_format = 0;
static int frame_count = 100;
static void errno_exit(const char *s)
{
fprintf(stderr, "%s error %d, %s\n", s, errno, strerror(errno));
exit(EXIT_FAILURE);
}
static int xioctl(int fh, int request, void *arg)
{
int r;
do {
r = ioctl(fh, request, arg);
} while (-1 == r && EINTR == errno);
return r;
}
static void process_image(const void *p, int size)
{
if (out_buf)
fwrite(p, size, 1, stdout);
fflush(stderr);
fprintf(stderr, ".");
fflush(stdout);
}
static int read_frame(void)
{
struct v4l2_buffer buf;
unsigned int i;
switch (io) {
case IO_METHOD_READ:
if (-1 == read(fd, buffers[0].start, buffers[0].length)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
errno_exit("read");
}
}
process_image(buffers[0].start, buffers[0].length);
break;
case IO_METHOD_MMAP:
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
errno_exit("VIDIOC_DQBUF");
}
}
assert(buf.index < n_buffers);
process_image(buffers[buf.index].start, buf.bytesused);
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
break;
case IO_METHOD_USERPTR:
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_USERPTR;
if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
errno_exit("VIDIOC_DQBUF");
}
}
for (i = 0; i < n_buffers; ++i)
if (buf.m.userptr == (unsigned long)buffers[i].start
&& buf.length == buffers[i].length)
break;
assert(i < n_buffers);
process_image((void *)buf.m.userptr, buf.bytesused);
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
break;
}
return 1;
}
static void mainloop(void)
{
unsigned int count;
unsigned int loopIsInfinite = 0;
if (frame_count == 0) loopIsInfinite = 1; //infinite loop
count = frame_count;
while ((count-- > 0) || loopIsInfinite) {
for (;;) {
fd_set fds;
struct timeval tv;
int r;
FD_ZERO(&fds);
FD_SET(fd, &fds);
/* Timeout. */
tv.tv_sec = 2;
tv.tv_usec = 0;
r = select(fd + 1, &fds, NULL, NULL, &tv);
if (-1 == r) {
if (EINTR == errno)
continue;
errno_exit("select");
}
if (0 == r) {
fprintf(stderr, "select timeout\n");
exit(EXIT_FAILURE);
}
if (read_frame())
break;
/* EAGAIN - continue select loop. */
}
}
}
static void stop_capturing(void)
{
enum v4l2_buf_type type;
switch (io) {
case IO_METHOD_READ:
/* Nothing to do. */
break;
case IO_METHOD_MMAP:
case IO_METHOD_USERPTR:
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMOFF, &type))
errno_exit("VIDIOC_STREAMOFF");
break;
}
}
static void start_capturing(void)
{
unsigned int i;
enum v4l2_buf_type type;
switch (io) {
case IO_METHOD_READ:
/* Nothing to do. */
break;
case IO_METHOD_MMAP:
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
errno_exit("VIDIOC_STREAMON");
break;
case IO_METHOD_USERPTR:
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_USERPTR;
buf.index = i;
buf.m.userptr = (unsigned long)buffers[i].start;
buf.length = buffers[i].length;
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
errno_exit("VIDIOC_STREAMON");
break;
}
}
static void uninit_device(void)
{
unsigned int i;
switch (io) {
case IO_METHOD_READ:
free(buffers[0].start);
break;
case IO_METHOD_MMAP:
for (i = 0; i < n_buffers; ++i)
if (-1 == munmap(buffers[i].start, buffers[i].length))
errno_exit("munmap");
break;
case IO_METHOD_USERPTR:
for (i = 0; i < n_buffers; ++i)
free(buffers[i].start);
break;
}
free(buffers);
}
static void init_read(unsigned int buffer_size)
{
buffers = calloc(1, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
buffers[0].length = buffer_size;
buffers[0].start = malloc(buffer_size);
if (!buffers[0].start) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
}
static void init_mmap(void)
{
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
"memory mapping\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
}
}
if (req.count < 2) {
fprintf(stderr, "Insufficient buffer memory on %s\n",
dev_name);
exit(EXIT_FAILURE);
}
buffers = calloc(req.count, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = n_buffers;
if (-1 == xioctl(fd, VIDIOC_QUERYBUF, &buf))
errno_exit("VIDIOC_QUERYBUF");
buffers[n_buffers].length = buf.length;
buffers[n_buffers].start =
mmap(NULL /* start anywhere */,
buf.length,
PROT_READ | PROT_WRITE /* required */,
MAP_SHARED /* recommended */,
fd, buf.m.offset);
if (MAP_FAILED == buffers[n_buffers].start)
errno_exit("mmap");
}
}
static void init_userp(unsigned int buffer_size)
{
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_USERPTR;
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
"user pointer i/o\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
}
}
buffers = calloc(4, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
for (n_buffers = 0; n_buffers < 4; ++n_buffers) {
buffers[n_buffers].length = buffer_size;
buffers[n_buffers].start = malloc(buffer_size);
if (!buffers[n_buffers].start) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
}
}
static void init_device(void)
{
struct v4l2_capability cap;
struct v4l2_cropcap cropcap;
struct v4l2_crop crop;
struct v4l2_format fmt;
unsigned int min;
if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &cap)) {
if (EINVAL == errno) {
fprintf(stderr, "%s is no V4L2 device\n",
dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_QUERYCAP");
}
}
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
fprintf(stderr, "%s is no video capture device\n",
dev_name);
exit(EXIT_FAILURE);
}
switch (io) {
case IO_METHOD_READ:
if (!(cap.capabilities & V4L2_CAP_READWRITE)) {
fprintf(stderr, "%s does not support read i/o\n",
dev_name);
exit(EXIT_FAILURE);
}
break;
case IO_METHOD_MMAP:
case IO_METHOD_USERPTR:
if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
fprintf(stderr, "%s does not support streaming i/o\n",
dev_name);
exit(EXIT_FAILURE);
}
break;
}
/* Select video input, video standard and tune here. */
CLEAR(cropcap);
cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 == xioctl(fd, VIDIOC_CROPCAP, &cropcap)) {
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
crop.c = cropcap.defrect; /* reset to default */
if (-1 == xioctl(fd, VIDIOC_S_CROP, &crop)) {
switch (errno) {
case EINVAL:
/* Cropping not supported. */
break;
default:
/* Errors ignored. */
break;
}
}
} else {
/* Errors ignored. */
}
CLEAR(fmt);
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fprintf(stderr, "Force Format %d\n", force_format);
if (force_format) {
if (force_format==2){
fmt.fmt.pix.width = 1920;
fmt.fmt.pix.height = 1080;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
}
else if(force_format==1){
fmt.fmt.pix.width = 640;
fmt.fmt.pix.height = 480;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
}
if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt))
errno_exit("VIDIOC_S_FMT");
/* Note VIDIOC_S_FMT may change width and height. */
} else {
/* Preserve original settings as set by v4l2-ctl for example */
if (-1 == xioctl(fd, VIDIOC_G_FMT, &fmt))
errno_exit("VIDIOC_G_FMT");
}
/* Buggy driver paranoia. */
min = fmt.fmt.pix.width * 2;
if (fmt.fmt.pix.bytesperline < min)
fmt.fmt.pix.bytesperline = min;
min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
if (fmt.fmt.pix.sizeimage < min)
fmt.fmt.pix.sizeimage = min;
switch (io) {
case IO_METHOD_READ:
init_read(fmt.fmt.pix.sizeimage);
break;
case IO_METHOD_MMAP:
init_mmap();
break;
case IO_METHOD_USERPTR:
init_userp(fmt.fmt.pix.sizeimage);
break;
}
}
static void close_device(void)
{
if (-1 == close(fd))
errno_exit("close");
fd = -1;
}
static void open_device(void)
{
struct stat st;
if (-1 == stat(dev_name, &st)) {
fprintf(stderr, "Cannot identify '%s': %d, %s\n",
dev_name, errno, strerror(errno));
exit(EXIT_FAILURE);
}
if (!S_ISCHR(st.st_mode)) {
fprintf(stderr, "%s is no device\n", dev_name);
exit(EXIT_FAILURE);
}
fd = open(dev_name, O_RDWR /* required */ | O_NONBLOCK, 0);
if (-1 == fd) {
fprintf(stderr, "Cannot open '%s': %d, %s\n",
dev_name, errno, strerror(errno));
exit(EXIT_FAILURE);
}
}
static void usage(FILE *fp, int argc, char **argv)
{
fprintf(fp,
"Usage: %s [options]\n\n"
"Version 1.3\n"
"Options:\n"
"-d | --device name Video device name [%s]\n"
"-h | --help Print this message\n"
"-m | --mmap Use memory mapped buffers [default]\n"
"-r | --read Use read() calls\n"
"-u | --userp Use application allocated buffers\n"
"-o | --output Outputs stream to stdout\n"
"-f | --format Force format to 640x480 YUYV\n"
"-F | --formatH264 Force format to 1920x1080 H264\n"
"-c | --count Number of frames to grab [%i] - use 0 for infinite\n"
"\n"
"Example usage: capture -F -o -c 300 > output.raw\n"
"Captures 300 frames of H264 at 1920x1080 - use raw2mpg4 script to convert to mpg4\n",
argv[0], dev_name, frame_count);
}
static const char short_options[] = "d:hmruofFc:";
static const struct option
long_options[] = {
{ "device", required_argument, NULL, 'd' },
{ "help", no_argument, NULL, 'h' },
{ "mmap", no_argument, NULL, 'm' },
{ "read", no_argument, NULL, 'r' },
{ "userp", no_argument, NULL, 'u' },
{ "output", no_argument, NULL, 'o' },
{ "format", no_argument, NULL, 'f' },
{ "formatH264", no_argument, NULL, 'F' },
{ "count", required_argument, NULL, 'c' },
{ 0, 0, 0, 0 }
};
int main(int argc, char **argv)
{
dev_name = "/dev/video0";
for (;;) {
int idx;
int c;
c = getopt_long(argc, argv,
short_options, long_options, &idx);
if (-1 == c)
break;
switch (c) {
case 0: /* getopt_long() flag */
break;
case 'd':
dev_name = optarg;
break;
case 'h':
usage(stdout, argc, argv);
exit(EXIT_SUCCESS);
case 'm':
io = IO_METHOD_MMAP;
break;
case 'r':
io = IO_METHOD_READ;
break;
case 'u':
io = IO_METHOD_USERPTR;
break;
case 'o':
out_buf++;
break;
case 'f':
force_format=1;
break;
case 'F':
force_format=2;
break;
case 'c':
errno = 0;
frame_count = strtol(optarg, NULL, 0);
if (errno)
errno_exit(optarg);
break;
default:
usage(stderr, argc, argv);
exit(EXIT_FAILURE);
}
}
open_device();
init_device();
start_capturing();
mainloop();
stop_capturing();
uninit_device();
close_device();
fprintf(stderr, "\n");
return 0;
}
|
""""""
import tensorflow as tf
import numpy as np
from pprint import pprint
import os
import utils
import random
# from conv_ca import ConvCA
# from StringIO import StringIO
# def conv_ca_model(run_path, args=None):
# """Links inputs and starts a training session and
# performs logging at certain steps."""
# tf.reset_default_graph()
# sess = tf.Session()
# # INPUTS
# # ------
# with tf.name_scope("inputs"):
# # inputs, labels = input_pipeline(
# # DATADIR, FLAGS.batch_size,
# # shape=FLAGS.grid_shape,
# # num_threads=FLAGS.num_threads, istrain=True, name="train_pipe")
# # inputs_valid, labels_valid = input_pipeline(
# # DATADIR, FLAGS.batch_size,
# # shape=FLAGS.grid_shape,
# # num_threads=FLAGS.num_threads, istrain=False, name="valid_pipe")
# # Keep probability for dropout layer
# keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# inputs_pl = tf.placeholder(
# tf.float32, shape=[None, WIDTH, HEIGHT, DEPTH], name="inputs")
# labels_pl = tf.placeholder(tf.int32, shape=[None], name="labels")
# # GRAPH
# # -----
# # Make a template out of model to create two models that share the same graph
# # and variables
# shared_model = tf.make_template("model", ConvCA)
# with tf.name_scope("train"):
# train = shared_model(inputs_pl, labels_pl, keep_prob)
# with tf.name_scope("valid"):
# valid = shared_model(inputs_pl, labels_pl, keep_prob, istrain=False)
# # Create writer to write summaries to file
# writer = tf.summary.FileWriter(run_path, sess.graph)
# writer.add_graph(sess.graph)
# # EMBEDDING
# # ---------
# # This embedding attemps to reduce a batch of dimensions of the grid into a
# # single point so that a single point represent a single grid that can be
# # plotted into a 2/3 dimensinoal space with t-SNE.
# if FLAGS.embedding:
# embedding = tf.Variable(tf.zeros([FLAGS.num_embeddings,
# valid.embedding_size]),
# name="embedding")
# assignment = embedding.assign(valid.embedding_input)
# x_embedding, y_embedding = embedding_metadata(
# DATADIR, FLAGS.grid_shape, FLAGS.num_embeddings)
# setup_embedding_projector(embedding, writer)
# # REST OF GRAPH
# # -------------
# # Create op to merge all summaries into one for writing to disk
# merged_summary = tf.summary.merge_all()
# # Save checkpoints for evaluations
# saver = tf.train.Saver()
# filename = os.path.join(run_path, "train.ckpt")
# sess.run(tf.global_variables_initializer())
# # RUN
# # ---
# # Create coordinator and start all threads from input_pipeline
# # The queue will feed our model with data, so no placeholders are necessary
# coord = tf.train.Coordinator()
# step = 0
# epoch = 0
# start_time = time()
# tot_running_time = start_time
# try:
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# # fetch embedding images for t-SNE visualization
# # x_embedding_batch, y_embedding_batch = sess.run([x_embedding,
# # y_embedding])
# # Training loop
# # -------------
# # Training loop runs until coordinator have got a requested to stop
# tot_accuracy = 0.0
# tot_valid_accuracy = 0.0
# accuracies = []
# accuracies_valid = []
# while not coord.should_stop():
# # training
# x_batch, y_batch = sess.run([inputs, labels])
# feed_dict = {inputs_pl: x_batch,
# labels_pl: y_batch, keep_prob: FLAGS.dropout}
# _, loss, accuracy = sess.run(
# [train.optimizer, train.loss, train.prediction], feed_dict)
# # validation
# x_batch, y_batch = sess.run([inputs_valid, labels_valid])
# feed_dict = {inputs_pl: x_batch,
# labels_pl: y_batch, keep_prob: 1.0}
# valid_accuracy, snap = sess.run(
# [valid.prediction, valid.activation_snapshot], feed_dict)
# tot_accuracy += accuracy
# tot_valid_accuracy += valid_accuracy
# step += 1
# # logging
# # -------
# if step % 10 == 0:
# summary = sess.run(merged_summary, feed_dict)
# writer.add_summary(summary, step)
# elif step % 500 == 499:
# if FLAGS.run_metadata:
# # Optionally write run metadata into the checkoint file,
# # such as resource usage, memory consumption runtimes etc.
# run_options = tf.RunOptions(
# trace_level=tf.RunOptions.FULL_TRACE)
# run_metadata = tf.RunMetadata()
# summary = sess.run(
# merged_summary, feed_dict, run_options, run_metadata)
# writer.add_run_metadata(run_metadata, "step%d" % step)
# writer.add_summary(summary, step)
# # Create the Timeline object, and write it to a json
# # tl = tf.python.client.timeline.Timeline(run_metadata.step_stats)
# # ctf = tl.generate_chrome_trace_format()
# # with open("timeline.json", "w") as f:
# # f.write(ctf)
# # Request to close threads and stop at max_steps
# if FLAGS.max_steps == step:
# coord.request_stop()
# if step % FLAGS.log_frequency == 0:
# save_activation_snapshot(snap, step, args[0])
# current_time = time()
# duration = current_time - start_time
# start_time = current_time
# avg_accuracy = tot_accuracy / FLAGS.log_frequency
# avg_accuracy_valid = tot_valid_accuracy / FLAGS.log_frequency
# tot_accuracy = 0.0
# tot_valid_accuracy = 0.0
# # save logs of [[wall time, step, avg_accuracy]]
# accuracies.append([time(), step, avg_accuracy])
# accuracies_valid.append([time(), step, avg_accuracy])
# if avg_accuracy_valid > FLAGS.max_valid_accuracy:
# coord.request_stop()
# tag = "avg_accuracy/train"
# value = avg_accuracy
# s = tf.Summary(
# value=[tf.Summary.Value(tag=tag, simple_value=value)])
# writer.add_summary(s, step)
# tag = "avg_accuracy/valid"
# value = avg_accuracy_valid
# s = tf.Summary(
# value=[tf.Summary.Value(tag=tag, simple_value=value)])
# writer.add_summary(s, step)
# epoch += FLAGS.batch_size * FLAGS.log_frequency / FLAGS.num_examples
# examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
# sec_per_batch = float(duration / FLAGS.log_frequency)
# format_str = ("%s step %d/%d, epoch %.2f, loss: %.4f, avg. accuracy: %.4f (%.4f) "
# "(%.1fex/s; %.3fs/batch)")
# print(format_str % (datetime.now().strftime("%m/%d %H:%M:%S"),
# step, FLAGS.max_steps, epoch, loss, avg_accuracy, avg_accuracy_valid,
# examples_per_sec, sec_per_batch))
# progress = float(step / FLAGS.max_steps)
# estimated_duration = (
# FLAGS.max_steps * FLAGS.batch_size) * (1 - progress) / examples_per_sec
# t = timedelta(seconds=int(estimated_duration))
# format_str = "Estimated duration: %s (%.1f%%)"
# print(format_str % (str(t), progress * 100))
# if step % 500 == 0:
# if FLAGS.embedding:
# # Assign embeddings to variable
# feed_dict = {inputs_pl: x_embedding,
# labels_pl: y_embedding, keep_prob: 1.0}
# sess.run(assignment, feed_dict)
# # Save the model once on a while
# saver.save(sess, filename, global_step=step)
# except Exception as e:
# coord.request_stop(e)
# finally:
# # SAVE AND REPORT
# # ---------------
# save_path = saver.save(sess, filename, global_step=step)
# print("Model saved in file: %s" % save_path)
# coord.request_stop()
# # Wait for threads to finish
# coord.join(threads)
# # Print some last stats
# tot_duration = time() - tot_running_time
# t = timedelta(seconds=int(tot_duration))
# print("Total running time: %s" % t)
# print("Layers: %d State dims: %d" %
# (FLAGS.num_layers, FLAGS.state_size))
# writer.close()
# sess.close()
# # report some stats so gridsearch can save them
# if FLAGS.save_data:
# dump = [{
# "timestamp": "%s" % datetime.now(),
# "runtime": "%s" % t,
# "training": accuracies,
# "validation": accuracies_valid,
# "batch_size": FLAGS.batch_size,
# "learning_rate": FLAGS.learning_rate,
# "layers": FLAGS.num_layers,
# "state_size": FLAGS.state_size,
# "num_examples": FLAGS.num_examples,
# "epochs": epoch,
# "dropout": FLAGS.dropout
# }]
# save_json(dump)
class InputTests(tf.test.TestCase):
def testSlice(self):
sess = tf.InteractiveSession()
x = tf.constant(np.arange(18), tf.float32, shape=[2, 3, 3])
print(x.eval())
# s = tf.slice(i, [0, 0, 0, 0], [-1, 1, 3, 1])
# s = tf.gather(i, [0, 0, 0, 0])
idx = tf.constant([[[2]]])
idx_flattened = tf.range(0, x.shape[0] * x.shape[1]) * x.shape[2] + idx
y = tf.gather(tf.reshape(x, [-1]), # flatten input
idx_flattened) # use flattened indices
print("*****")
print(y.eval())
sess.close()
# def testGenerator(self):
# x, y = utils.generate_constrained_dataset((20, 20), 8, stone_probability=0.4)
# # pprint(x)
# pprint(y)
# def blobs(self):
# width = 5
# height = 5
# k = 1
# # def index(i, j)
# grid = np.zeros((width * height), dtype=np.uint8)
# for i in range(k):
# r = srandom.randint(len(grid))
# grid[r] = 1
# print(grid.shape)
# filled = set()
# ----------------------------
# def testGenerator(self):
# utils.save_emdedding_metadata("tmp/data/20x20", (20, 20, 1), 1024)
# ----------------------------
# def testPng(self):
# # import struct
# def write_png(buf, width, height):
# """ buf: must be bytes or a bytearray in Python3.x,
# a regular string in Python2.x.
# """
# import zlib
# import struct
# # reverse the vertical line order and add null bytes at the start
# width_byte_4 = width * 4
# raw_data = b''.join(b'\x00' + buf[span:span + width_byte_4]
# for span in range((height - 1) * width_byte_4, -1, - width_byte_4))
# def png_pack(png_tag, data):
# chunk_head = png_tag + data
# return (struct.pack("!I", len(data)) +
# chunk_head +
# struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
# return b''.join([
# b'\x89PNG\r\n\x1a\n',
# png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
# png_pack(b'IDAT', zlib.compress(raw_data, 9)),
# png_pack(b'IEND', b'')])
# def saveAsPNG(array, filename):
# import struct
# if any([len(row) != len(array[0]) for row in array]):
# raise ValueError ("Array should have elements of equal size")
# #First row becomes top row of image.
# flat = []
# map(flat.extend, reversed(array))
# #Big-endian, unsigned 32-byte integer.
# buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) )
# for i32 in flat]) #Rotate from ARGB to RGBA.
# data = write_png(buf, len(array[0]), len(array))
# with open(filename, 'wb') as f:
# f.write(data)
# a = np.random.randint(0, 4, [29, 29, 3, 1], np.uint32)
# saveAsPNG(a, "asdf.png")
# ----------------------------
# def testEmbeddings(self):
# """test projector and word embeddings"""
# slim = tf.contrib.slim
# data = np.random.randint(0, 2, size=(50, 1, 3, 3, 1))
# labels = np.random.randint(0, 2, size=(50, 1))
# x = tf.placeholder(tf.float32, [None, 3, 3, 1])
# y = tf.placeholder(tf.int32, [None])
# ----------------------------
# # net = slim.conv2d(x, 10, [3, 3])
# with tf.variable_scope("conv", initializer=tf.contrib.layers.xavier_initializer()):
# w = tf.Variable(tf.random_normal([3, 3, 1, 10], stddev=1.0), name="weights")
# b = tf.Variable(tf.zeros([10]), name="biases")
# conv = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding="SAME")
# net = tf.nn.relu(conv + b)
# net = slim.conv2d(net, 1, [1, 1])
# y_ = slim.fully_connected(tf.reshape(net, [1, 9]), 2)
# loss = slim.losses.sparse_softmax_cross_entropy(y_, y)
# train_op = tf.train.AdamOptimizer(0.01).minimize(loss)
# pred = tf.reduce_mean(tf.cast(tf.nn.in_top_k(y_, y, 1), tf.float32))
# logdir = "tmp/test"
# path = os.path.join(logdir, "model.ckpt")
# projector = tf.contrib.tensorboard.plugins.projector
# config = projector.ProjectorConfig()
# embedding = config.embeddings.add()
# embedding.tensor_name = w.name
# # Link this tensor to its metadata file (e.g. labels).
# # embedding.metadata_path = os.path.join(logdir, 'metadata.tsv')
# writer = tf.summary.FileWriter(logdir)
# projector.visualize_embeddings(writer, config)
# saver = tf.train.Saver()
# with self.test_session() as sess:
# sess.run(tf.global_variables_initializer())
# step = 0
# acc = 0
# for i in range(20):
# feed_dict={x: data[i], y: labels[i]}
# sess.run(train_op, feed_dict=feed_dict)
# acc += sess.run(pred, feed_dict=feed_dict)
# step += 1
# print (acc / step)
# s = saver.save(sess, path, step)
# print ("saved in ", s, i)
# ----------------------------
# def testCkpts(self):
# """Testing saving and loading of checkoints"""
# path = os.path.join(self.get_temp_dir(), "train.ckpt")
# a = tf.constant([1])
# b = tf.constant([1])
# c = tf.Variable(a + b)
# init = tf.global_variables_initializer()
# saver = tf.train.Saver()
# with self.test_session() as sess:
# sess.run(init)
# for i in range(10):
# cc = sess.run(c)
# if i % 5 == 0:
# s = saver.save(sess, path, i)
# print ("saved in ", s, i)
# print ("var: ", cc)
# # ckpt_path = tf.train.latest_checkpoint(self.get_temp_dir())
# ckpt = tf.train.get_checkpoint_state(self.get_temp_dir())
# # tf.train.get_checkpoint_path()
# print (ckpt.model_checkpoint_path)
# # print (ckpt.get_checkpoint_path)
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(sess, ckpt.model_checkpoint_path)
# print("Model restored from ", ckpt.model_checkpoint_path)
# step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
# print (step)
# ----------------------------
# def testTemplateNames(self):
# """Testing template names"""
# def var_name(x, name):
# a = tf.get_variable(name,
# shape=[2, 2],
# initializer=tf.constant_initializer(1))
# return a
# var_name_foo = tf.make_template("foo", var_name, name='foo')
# a = var_name_foo(tf.constant([1]))
# var_name_bar = tf.make_template("bar", var_name, name='foo')
# b = var_name_bar(tf.constant([0]))
# print (a, b)
# with self.test_session() as sess:
# sess.run(tf.global_variables_initializer())
# aaa, bbb = sess.run([a, b])
# self.assertAllEqual(aaa, bbb)
# ----------------------------
# def testScope(self):
# def s(name=None):
# with tf.variable_scope("scope") as vs:
# a = tf.get_variable(name, [1, 2, 3], initializer=tf.constant_initializer(1))
# vs.reuse_variables()
# b = tf.get_variable(name, [1, 2, 3], initializer=tf.constant_initializer(0))
# return a, b
# scope_copy = tf.make_template("scope", s)
# a, b = scope_copy("foo")
# aa, bb = scope_copy("foo")
# with self.test_session() as sess:
# sess.run(tf.global_variables_initializer())
# aaa, bbb = sess.run([a, bb])
# self.assertAllEqual(aaa, bbb)
# ----------------------------
# def testTemplate(self):
# k = tf.placeholder(tf.float32, name="keep_prob")
# g = tf.Variable(0, trainable=False, name="global_step")
# # m = ConvCA(g, k)
# it = tf.placeholder(tf.float32, [64, 9, 9, 1])
# iv = tf.placeholder(tf.float32, [64, 9, 9, 1])
# il = tf.placeholder(tf.int64, [64, ])
# t = tf.make_template("model", ConvCA)
# # model = t(g, k)
# valid = t(iv, il, g, k)
# train = t(it, il, g, k)
# data1 = np.ones((64, 9, 9, 1))
# data2 = np.zeros((64, 9, 9, 1))
# label = np.ones((64, ))
# with self.test_session() as sess:
# sess.run(tf.global_variables_initializer())
# a, b = sess.run([train.prediction, valid.prediction], feed_dict={k: 1.0, it: data1, iv: data2, il: label})
# self.assertEqual(train, valid)
# self.assertNotEqual(a, b)
# ----------------------------
# def testReader(self):
# filepath = os.path.join(self.get_temp_dir(), "test.bin")
# shape = (5, 5, 1)
# image = np.arange(25, dtype=np.int8).reshape(shape)
# label = np.ones((1), dtype=np.int8)
# batch_i = np.reshape(image, (1, 5, 5, 1))
# batch_l = np.reshape(label, (1, 1))
# utils._convert_to_tfrecords(batch_i, shape, batch_l, filepath)
# with self.test_session() as sess:
# q = tf.FIFOQueue(99, tf.string)
# q.enqueue([filepath]).run()
# q.close().run()
# tf.train.start_queue_runners(sess)
# i, l = utils.read_and_decode(q, shape)
# ii, ll = sess.run([i, l])
# self.assertAllEqual(ii, image)
# self.assertAllEqual(ll, label)
# ----------------------------
# def testGenerator(self):
# filepath = os.path.join(self.get_temp_dir(), "test.bin")
# shape = (20, 20, 1)
# num_examples = 4
# x, y = utils.generate_constrained_dataset(None, shape, num_examples, False, 0.5)
# utils._convert_to_tfrecords(x, shape, y, filepath)
# # print(x[3].reshape((5, 5)), y[3])
# with self.test_session() as sess:
# q = tf.FIFOQueue(99, tf.string)
# q.enqueue([filepath]).run()
# q.close().run()
# tf.train.start_queue_runners(sess)
# xx, yy = utils.read_and_decode(q, shape)
# yy = tf.squeeze(yy)
# for i in range(num_examples):
# image, label = sess.run([xx, yy])
# # print (image.reshape((5, 5)), label)
# self.assertAllEqual(x[i], image)
# self.assertAllEqual(y[i], label)
# Add tests for correct label and image shape sizes
if __name__ == "__main__":
tf.test.main()
|
/*
* This header is generated by classdump-dyld 1.0
* on Saturday, June 1, 2019 at 6:44:34 PM Mountain Standard Time
* Operating System: Version 12.1.1 (Build 16C5050a)
* Image Source: /System/Library/PrivateFrameworks/NetworkServiceProxy.framework/NetworkServiceProxy
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos.
*/
@protocol NEAppProxyProviderContainerDelegate
@required
-(void)container:(id)arg1 didStartWithError:(id)arg2;
-(void)container:(id)arg1 didRequestFlowDivertControlSocketWithCompletionHandler:(/*^block*/id)arg2;
-(void)container:(id)arg1 didFailWithError:(id)arg2;
-(void)container:(id)arg1 didSetTunnelConfiguration:(id)arg2 completionHandler:(/*^block*/id)arg3;
@end
|
import React, { useState, useRef } from "react";
import Carousel from "react-elastic-carousel";
import styled from "styled-components";
const Item = styled.div`
display: flex;
justify-content: center;
align-items: center;
color: #fff;
background-color: green;
width: 100%;
height: 150px;
margin: 15px;
`;
const Layout = styled.div`
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
height: 100vh;
`;
const ControlsLayout = styled.div`
display: flex;
flex-direction: column;
margin: 25px;
`;
const StyledControlFields = styled.div`
display: flex;
margin: 5px;
`;
const breakPoints = [
{ width: 200, itemsToShow: 1 },
{ width: 600, itemsToShow: 2 },
];
const toggle = (updater) => () => updater((o) => !o);
const CheckBox = ({ label, onToggle, ...rest }) => {
return (
<StyledControlFields>
<label htmlFor={label}>{label}</label>
<input {...rest} id={label} type="checkbox" onChange={toggle(onToggle)} />
</StyledControlFields>
);
};
const DemoApp = () => {
const [items, setItems] = useState([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
const [itemsToShow, setItemsToShow] = useState(3);
const [showArrows, setShowArrows] = useState(true);
const [pagination, setPagination] = useState(true);
const [verticalMode, setVerticalMode] = useState(false);
const carouselRef = useRef();
const addItem = () => {
setItems((currentItems) => [...currentItems, currentItems.length + 1]);
};
const removeItem = () => {
setItems((currentItems) => currentItems.slice(0, currentItems.length - 1));
};
const updateItemsToShow = ({ target }) =>
setItemsToShow(Number(target.value));
const goTo = ({ target }) => carouselRef.current.goTo(Number(target.value));
return (
<Layout>
<ControlsLayout>
<StyledControlFields>
<button onClick={addItem}>Add Item</button>
<button onClick={removeItem}>Remove Item</button>
</StyledControlFields>
<StyledControlFields>
<label>goTo</label>
<input type="number" onChange={goTo} />
</StyledControlFields>
<StyledControlFields>
<label>itemsToShow</label>
<input
type="number"
value={itemsToShow}
onChange={updateItemsToShow}
/>
</StyledControlFields>
<CheckBox
label="showArrows"
checked={showArrows}
onToggle={setShowArrows}
/>
<CheckBox
label="pagination"
checked={pagination}
onToggle={setPagination}
/>
<CheckBox
label="verticalMode"
checked={verticalMode}
onToggle={setVerticalMode}
/>
</ControlsLayout>
<Carousel
ref={carouselRef}
verticalMode={verticalMode}
itemsToShow={itemsToShow}
showArrows={showArrows}
pagination={pagination}
>
{items.map((item) => (
<Item key={item}>{item}</Item>
))}
</Carousel>
</Layout>
);
};
export default DemoApp;
|
require('@babel/register')
require('./server/server')
|
import * as Icons from 'wix-ui-icons-common';
import * as SystemIcons from 'wix-ui-icons-common/system';
import * as wsr from '../../src/index';
import * as editorX from '../../src/Themes/editorX';
import * as businessDashboard from '../../src/Themes/businessDashboard';
import * as floatingPanels from '../../src/Themes/floatingPanels';
import AtlasAddressInput from '../../src/AtlasAddressInput';
import { StorybookComponents } from 'wix-storybook-utils/StorybookComponents';
import { theme as businessDashboardTheme } from '../../src/Themes/businessDashboard';
import { theme as editorXTheme } from '../../src/Themes/editorX';
import { theme as floatingPanelsTheme } from '../../src/Themes/floatingPanels';
export const themes = {
businessDashboardTheme,
editorXTheme,
floatingPanelsTheme,
};
// Internal Wix components which depend on private Wix dependencies
const privateComponents = {
AtlasAddressInput,
};
/*
* This object contains all wix-style-react components including icons
* It is used mainly for documentation in LiveCodeExample and code section.
*/
const defaultComponents = {
...wsr,
...privateComponents,
...themes,
StorybookComponents,
Icons,
SystemIcons,
};
export default defaultComponents;
export const floatingPanelsComponents = {
...defaultComponents,
...floatingPanels,
};
export const editorXComponents = {
...defaultComponents,
...editorX,
};
export const businessDashboardComponents = {
...defaultComponents,
...businessDashboard,
};
|
from models.NeuMF.model import NeuMF,NeuMFModel
|
/**
* 首页左边导航菜单构建
*/
LeftPanel = Ext.extend(Ext.Panel, {
constructor: function(){
this.initializeTree();
LeftPanel.superclass.constructor.call(this, {
id: 'left-panel',
region: "west",
width:'20%',
split: true,
collapseMode: 'mini',
layout: 'border',
items: [this.tree, {
region: 'north',
height: 24,
id: this.northId,
titleCollapse: true,
border: false,
bodyStyle: 'padding: 1px 1px 2px 1px;background: #f0f0f0;',
layout: 'fit',
items: [{
xtype: 'textfield',
emptyText: '搜索菜单',
enableKeyEvents: true,
listeners:{
keydown: {
fn: this.doFilterTree,
buffer: 350,
scope: this
}
}
}]
}],
tbar: new Ext.Toolbar({
cls: '_toolbar',
items: ['-',{
id: 'report_search_btn_id',
tooltip: '搜索菜单',
hidden: true,
enableToggle: true,
iconCls: 'icon-search',
scope: this,
handler: this.doToggleSearchText
},'-',{
id: 'report_expan_btn_id',
tooltip: '展开菜单',
hidden: true,
iconCls: 'icon-expand',
scope: this,
handler : this.doExpandAll
},' ',{
id: 'report_less_btn_id',
tooltip: '收缩菜单',
hidden: true,
iconCls: 'icon-collapse',
scope: this,
handler : this.doCollapseAll
},'-',{
id: 'report_fav_btn_id',
tooltip: '打开收藏夹',
hidden: true,
iconCls: 'icon-fav'
},'-','->','-',{
id: 'report_task_btn_id',
tooltip: '查询任务',
hidden: true,
iconCls:'icon-search',
xtype:'button',
handler:this.doQueryTask
},'-', {
id: 'report_new_btn_id',
tooltip: '新增报表',
hidden: true,
iconCls: 'icon-add-report',
scope:this,
handler:this.doAddReport
},'-']
})
});
},
initEvents : function(){
this.tree.on("click" , function( node , e){
if(!node.isLeaf()) return;
this.addTabPanel(node.id, node.text);
} , this);
LeftPanel.superclass.initEvents.call(this);
},
addTabPanel: function(id , text){
if(Ext.getCmp( id + 'quieetab')){
App.page.activate( id + 'quieetab');
}else{
if(!Ext.getCmp( id + 'MainPanel')){
App.page.add({
id : id + 'MainPanel',
title : text,
layout: 'fit',
closable: true,
items: new MainPanel( id , text)
});
}
App.page.activate(id + 'MainPanel');
}
},
doAddReport:function(){
//判断该操作员是否有权限新增报表
Ext.Ajax.request({
url : root + '/query/RepDesign!queryRepDefine.action',
params : {rep_id:''},
scope : this,
success : function(res, opt) {
if(Ext.getCmp('deployReportId')){
App.getApp().page.remove(Ext.getCmp('deployReportId'));
}
App.page.add({
title : '增加新报表',
id : 'deployReportId',
closable: true,
items : [new DeployReportForm()]
});
App.page.activate('deployReportId');
}
});
},
doQueryTask: function(){
var win = Ext.getCmp('taskQueryWinId');
if(!win){
win = new TaskQueryWin();
}
win.show();
},
initializeTree: function(){
this.tree = new Ext.tree.TreePanel({
region: 'center',
border: false,
bodyStyle:'padding:3px',
useArrows: true,
autoScroll: true,
animate: false,
enableDD: false,
containerScroll: true,
border: false,
loader: new Ext.tree.TreeLoader({
autoLoad: true,
url : root+"/system/Index!queryTreeNodes.action",
baseParams : { sub_system_id: '7' },
listeners:{
scope:this,
load: function(treeLoader, node, reponseText){
//权限控制按钮
node.eachChild(function (child) {
var btnId = child.attributes.others['handler'];
if(!Ext.isEmpty(btnId)){
child.ui.hide();
Ext.getCmp(btnId).show();
}
});
}
}
}),
root: {
id: '-1',
iconCls: 'x-tree-root-icon',
nodeType: 'async',
singleClickExpand: true,
text: '报表资源菜单',
expanded: true
}
});
this.filter = new Ext.tree.TreeFilter( this.tree , {
clearBlank: true,
autoClear : true
});
},
doToggleSearchText: function(){
var northPanel = this.items.get(1);
if(northPanel.isVisible()){
northPanel.hide();
}else{
northPanel.show();
}
this.doLayout(false, false);
},
doExpandAll: function(){
this.tree.expandAll();
},
doCollapseAll: function(){
this.tree.collapseAll();
},
doFilterTree : function( t , e ){
var text = t.getValue(), filter = this.filter ;
if(!text){
filter.clear();
return;
}
this.doExpandAll();
var re = new RegExp('^.*' + text + '.*$');
filter.filterBy( function( n ){
return !n.leaf || re.test( n.text ) || re.test(n.id);
});
}
});
// 首页工作区
RightTabPanel = Ext.extend( Ext.TabPanel, {
welcomePanel: null,
constructor: function(){
this.welcomePanel = new WelcomePortal();
RightTabPanel.superclass.constructor.call(this, {
region:'center',
resizeTabs: true,
minTabWidth: 135,
tabWidth: 135,
maxTabWidth: 160,
enableTabScroll: true,
//tabPosition: 'bottom',
activeTab: 0,
defaults: {autoScroll: true},
items: this.welcomePanel
});
}
});
//Index
ViewportCenterPanel = Ext.extend( Ext.Panel, {
constructor: function(){
MainPanel.superclass.constructor.call(this, {
region : 'center',
layout : 'border',
style: 'padding: 0px 1px 2px 1px;',
border: false,
id: 'main-panel',
tbar: new Ext.Toolbar({
height: 29,
cls: 'top-toolbar',
items: ['-', App.LoginInfo, '-' ,'->','-',{
tooltip: '全屏显示',
iconCls: 'icon-full',
enableToggle: true,
handler: this.doFullView
},'-']
}),
items: [App.left, App.page]
});
},
doFullView: function(){
if(this.pressed){
App.top.hide();
App.left.hide();
}else{
App.top.show();
App.left.show();
}
App.viewport.doLayout();
}
});
//顶部面板事件处理函数
TopHelper = function(){
var tpl = new Ext.XTemplate(
'<ul>',
'<tpl for=".">',
'<li class="btn fl">',
'<a href="#" onclick="TopHelper.f.doToggle(\'{itemId}\', \'{text}\');">',
'<img src='+ Ext.BLANK_IMAGE_URL +' class="{iconCls}" border="0"/>',
'<p>{text}</p>',
'</a>',
'</li>',
'</tpl>',
'</ul>');
var sss = null, ssp = null;
function _ff(dpy){
sss.dom.style.display = dpy;
ssp.dom.style.display = dpy;
}
var F = {
defaultSystem: 7,
url: root + '/system/Index!queryAllSubSystem.action',
initialize: function(){
Ext.get("MenuSystem").on("click", _ff.createDelegate(this, ["block"]));
Ext.get("ss-close").on("click", _ff.createDelegate(this, ["none"]));
App.data.sysId = F.defaultSystem;
//load data
F.loadAndOverride(Ext.get("ss-bts"));
},
loadAndOverride: function(el){
Ext.Ajax.request({
url: F.url,
scope: this,
success: function(res,opt){
var data = Ext.decode(res.responseText);
var html = F.apply(data);
el.dom.innerHTML = html;
}
});
},
apply: function( data ){
var items = [];
for(var i = 0; i< data.length; i++){
var o = {
text: data[i]["sub_system_name"],
itemId: data[i]["sub_system_id"],
url: data[i]["sub_system_url"],
root: data[i]["sub_system_host"],
iconCls: data[i]["iconcls"]
};
if(o.itemId == App.data.sysId){
o["text"] = "<b><font color=gray>" + o["text"] + "</font></b>";
o["disabled"] = true;
}
items.push( o );
}
// apply template
return tpl.apply(items);
},
doToggle: function(itemId, text){
if(itemId == App.data.sysId){
return ;
}
Confirm("确定要切换系统至 "+ text +"", null,function(){
App.href(regourl + '/rego?tokenId=' + token_id + '&sub_system_id=' + itemId);
});
}
};
var TH = {
f: F,
initEvents: function(){
sss = Ext.get("ss-shadow");
ssp = Ext.get("ss-panel");
//切换系统
TH.f.initialize();
//其它按钮
Ext.get("MenuUpdate").on("click", TH.userUpdate);
Ext.get("MenuLogout").on("click", TH.logout);
},
logout: function(){
Confirm("确定要退出系统",null,function(){
App.href(Constant.ROOT_PATH + "/gologin");
});
},
userUpdate:function(){
var win = Ext.getCmp('optrDataWinId');
if(!win)
win = new OptrDataWin();
win.show();
}
};
return TH;
}();
//报表下载窗口
AlertReport= function( msg , fn , scope ){
var m = Ext.Msg ;
return m.show({
title: m.title,
msg: msg ,
icon: m.INFO ,
buttons: m.CANCEL,
closable : false,
fn : fn ,
scope: scope
});
};
//滚动条处理
Show = function( anim ){
App.currentBar = Ext.MessageBox.show({
msg: '正在提交数据... <a href=#>[...]</a>',
wait: true,
waitConfig: { interval: 150 },
icon:'icon-download',
animEl: anim
});
return App.currentBar;
}
// store 字段中文排序 补丁 以及使报表的合计在排序的保持位置不变化
Ext.data.Store.prototype.applySort = function() {
if (this.sortInfo && !this.remoteSort) {
var s = this.sortInfo, f = s.field;
var st = this.fields.get(f).sortType;
var fn = function(r1, r2) {
if(!Ext.isEmpty(r1.data['issumrow_report'])&&r1.data['issumrow_report']=='T'){
return 0;
}else if(!Ext.isEmpty(r2.data['issumrow_report'])&&r2.data['issumrow_report']=='T'){
return 0;
}else{
var v1 = st(r1.data[f]), v2 = st(r2.data[f]);
// 添加:修复汉字排序异常的Bug
if (typeof(v1) == "string") { // 若为字符串,
return v1.localeCompare(v2);// 则用 localeCompare 比较汉字字符串, Firefox
// 与 IE 均支持
}
// 添加结束
return v1 > v2 ? 1 : (v1 < v2 ? -1 : 0);
}
};
this.data.sort(s.direction, fn);
if (this.snapshot && this.snapshot != this.data) {
this.snapshot.sort(s.direction, fn);
}
}
};
//去掉文本框验证
Ext.apply(Ext.form.TextField.prototype,{
vtype:''
});
/*gridpanel单元格复制*/
if (!Ext.grid.GridView.prototype.templates) {
Ext.grid.GridView.prototype.templates = {};
}
Ext.grid.GridView.prototype.templates.cell = new Ext.Template(
'<td class="x-grid3-col x-grid3-cell x-grid3-td-{id} x-selectable {css}" style="{style}" tabIndex="0" {cellAttr}>' ,
'<div class="x-grid3-cell-inner x-grid3-col-{id}" {attr}>{value}</div>' ,
'</td>'
);
TaskQueryWin = Ext.extend(Ext.Window, {
taskStore: null,
pageSize: 15,
editRep: false,
constructor: function(){
this.taskStore = new Ext.data.JsonStore({
url: root+"/query/Report!queryTasks.action",
root:'page.records',
totalProperty:'page.totalProperty',
fields: ['task_id','task_name','rep_id','rep_name','task_type','task_type_text','task_execday','is_dowload','is_delete','keylist',
'optr_id','optr_name','status','status_text','exec_result','exec_start_time','exec_query_id','exec_end_time','is_waitexec','create_time','remark']
});
this.grid = new Ext.grid.GridPanel({
border:false,
store:this.taskStore,
columns:[
{header:'任务ID',dataIndex:'task_id',width:50},
{header:'任务名称',dataIndex:'task_name',width:100,renderer:App.qtipValue},
{header:'报表名称',dataIndex:'rep_name',width:100,renderer:App.qtipValue},
{header:'任务类型',dataIndex:'task_type_text',width:85},
{header:'执行日',dataIndex:'task_execday',width:90,renderer:App.qtipValue},
{header:'状态',dataIndex:'status_text',width:75,renderer:function(v,meta,record){
if(record.get('is_waitexec') == 'T'){
v = '待执行';
}
return v;
}},
{header:'操作员',dataIndex:'optr_name',width:90,renderer:App.qtipValue},
{header:'操作',dataIndex:'task_id',width:75,scope:this,renderer:function(v,meta,record){
var res = "";
if(record.get('is_dowload') == 'T'){
res = "<a href='#' onclick=Ext.getCmp('taskQueryWinId').doDown("+record.get('rep_id')+","+record.get('exec_query_id')+")>下载</a> ";
}
if(record.get('is_delete') == 'T'){
res += "<a href='#' onclick=Ext.getCmp('taskQueryWinId').doDel("+v+")>删除</a>";
}
return res;
}
},
{header:'实际开始时间',dataIndex:'exec_start_time',width:130},
{header:'实际结束时间',dataIndex:'exec_end_time',width:130},
{header:'备注',dataIndex:'remark',width:100,renderer:App.qtipValue},
{header:'查询条件',dataIndex:'keylist',width:75,renderer:App.qtipValue},
{header:'执行标记',dataIndex:'exec_result',width:75,renderer:App.qtipValue},
{header:'执行结果ID',dataIndex:'exec_query_id',width:75},
{header:'创建日期',dataIndex:'create_time',width:130}
],
tbar: [
'-','输入关键字 ',
new Ext.ux.form.SearchField({
store: this.taskStore,
width: 210,
hasSearch : true,
emptyText: '支持任务名册和报表名称模糊查询'
}),'-'
],
bbar: new Ext.PagingToolbar({store: this.taskStore, pageSize: this.pageSize})
});
TaskQueryWin.superclass.constructor.call(this, {
id:'taskQueryWinId',
title:'任务配置',
closeAction:'close',
width:700,
height:400,
layout:'fit',
items:this.grid
});
this.taskStore.load({
params: {
start: 0,
limit: this.pageSize
}
});
},
doDown: function(repId, queryId){
var mask = Show();//进度条
Ext.Ajax.request({
//scope : this,
timeout:9999999999,
url:root+"/query/Show!createExp.action",
params:{query_id:queryId},
success:function(res){
mask.hide();
mask=null;
AlertReport("     <a href="+root+"/query/Show!downloadExp.action?query_id="
+queryId+"&rep_id="+repId+" >点击下载</a>");
}
});
},
doDel: function(taskId){
Confirm('确定删除?', this, function(){
Ext.Ajax.request({
url: root+"/query/Report!deleteRepTask.action",
params:{task_id: taskId},
scope:this,
success: function(res){
var data = Ext.decode(res.responseText);
if(data === true){
Alert('删除成功');
this.taskStore.load({
start: 0,
limit: this.pageSize
});
}
}
});
});
}
});
|
/*
* Common board functions for siemens AM335X based boards
* (C) Copyright 2013 Siemens Schweiz AG
* (C) Heiko Schocher, DENX Software Engineering, hs@denx.de.
*
* Based on:
* U-Boot file:/board/ti/am335x/board.c
* Copyright (C) 2011, Texas Instruments, Incorporated - http://www.ti.com/
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <errno.h>
#include <spl.h>
#include <asm/arch/cpu.h>
#include <asm/arch/hardware.h>
#include <asm/arch/omap.h>
#include <asm/arch/ddr_defs.h>
#include <asm/arch/clock.h>
#include <asm/arch/gpio.h>
#include <asm/arch/mmc_host_def.h>
#include <asm/arch/sys_proto.h>
#include <asm/io.h>
#include <asm/emif.h>
#include <asm/gpio.h>
#include <i2c.h>
#include <miiphy.h>
#include <cpsw.h>
#include <watchdog.h>
#include <asm/mach-types.h>
#include "../common/factoryset.h"
DECLARE_GLOBAL_DATA_PTR;
#ifdef CONFIG_SPL_BUILD
void set_uart_mux_conf(void)
{
enable_uart0_pin_mux();
}
void set_mux_conf_regs(void)
{
/* Initalize the board header */
enable_i2c0_pin_mux();
i2c_set_bus_num(0);
/* enable early the console */
gd->baudrate = CONFIG_BAUDRATE;
serial_init();
gd->have_console = 1;
if (read_eeprom() < 0)
puts("Could not get board ID.\n");
enable_board_pin_mux();
}
void sdram_init(void)
{
spl_siemens_board_init();
board_init_ddr();
return;
}
#endif /* #ifdef CONFIG_SPL_BUILD */
#ifndef CONFIG_SPL_BUILD
/*
* Basic board specific setup. Pinmux has been handled already.
*/
int board_init(void)
{
#if defined(CONFIG_HW_WATCHDOG)
hw_watchdog_init();
#endif /* defined(CONFIG_HW_WATCHDOG) */
i2c_set_bus_num(0);
if (read_eeprom() < 0)
puts("Could not get board ID.\n");
#ifdef CONFIG_MACH_TYPE
gd->bd->bi_arch_number = CONFIG_MACH_TYPE;
#endif
gd->bd->bi_boot_params = CONFIG_SYS_SDRAM_BASE + 0x100;
#ifdef CONFIG_FACTORYSET
factoryset_read_eeprom(CONFIG_SYS_I2C_EEPROM_ADDR);
#endif
gpmc_init();
#ifdef CONFIG_NAND_CS_INIT
board_nand_cs_init();
#endif
#ifdef CONFIG_VIDEO
board_video_init();
#endif
return 0;
}
#endif /* #ifndef CONFIG_SPL_BUILD */
#define OSC (V_OSCK/1000000)
const struct dpll_params dpll_ddr = {
DDR_PLL_FREQ, OSC-1, 1, -1, -1, -1, -1};
const struct dpll_params *get_dpll_ddr_params(void)
{
return &dpll_ddr;
}
#ifndef CONFIG_SPL_BUILD
#define MAX_NR_LEDS 10
#define MAX_PIN_NUMBER 128
#define STARTUP 0
#if defined(BOARD_DFU_BUTTON_GPIO)
unsigned char get_button_state(char * const envname, unsigned char def)
{
int button = 0;
int gpio;
char *ptr_env;
/* If button is not found we take default */
ptr_env = env_get(envname);
if (NULL == ptr_env) {
gpio = def;
} else {
gpio = (unsigned char)simple_strtoul(ptr_env, NULL, 0);
if (gpio > MAX_PIN_NUMBER)
gpio = def;
}
gpio_request(gpio, "");
gpio_direction_input(gpio);
if (gpio_get_value(gpio))
button = 1;
else
button = 0;
gpio_free(gpio);
return button;
}
/**
* This command returns the status of the user button on
* Input - none
* Returns - 1 if button is held down
* 0 if button is not held down
*/
static int
do_userbutton(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
int button = 0;
button = get_button_state("button_dfu0", BOARD_DFU_BUTTON_GPIO);
button |= get_button_state("button_dfu1", BOARD_DFU_BUTTON_GPIO);
return button;
}
U_BOOT_CMD(
dfubutton, CONFIG_SYS_MAXARGS, 1, do_userbutton,
"Return the status of the DFU button",
""
);
#endif
static int
do_usertestwdt(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
printf("\n\n\n Go into infinite loop\n\n\n");
while (1)
;
return 0;
};
U_BOOT_CMD(
testwdt, CONFIG_SYS_MAXARGS, 1, do_usertestwdt,
"Sends U-Boot into infinite loop",
""
);
/**
* Get led gpios from env and set them.
* The led define in environment need to need to be of the form ledN=NN,S0,S1
* where N is an unsigned integer from 0 to 9 and S0 and S1 is 0 or 1. S0
* defines the startup state of the led, S1 the special state of the led when
* it enters e.g. dfu mode.
*/
void set_env_gpios(unsigned char state)
{
char *ptr_env;
char str_tmp[5]; /* must contain "ledX"*/
char num[1];
unsigned char i, idx, pos1, pos2, ccount;
unsigned char gpio_n, gpio_s0, gpio_s1;
for (i = 0; i < MAX_NR_LEDS; i++) {
strcpy(str_tmp, "led");
sprintf(num, "%d", i);
strcat(str_tmp, num);
/* If env var is not found we stop */
ptr_env = env_get(str_tmp);
if (NULL == ptr_env)
break;
/* Find sperators position */
pos1 = 0;
pos2 = 0;
ccount = 0;
for (idx = 0; ptr_env[idx] != '\0'; idx++) {
if (ptr_env[idx] == ',') {
if (ccount++ < 1)
pos1 = idx;
else
pos2 = idx;
}
}
/* Bad led description skip this definition */
if (pos2 <= pos1 || ccount > 2)
continue;
/* Get pin number and request gpio */
memset(str_tmp, 0, sizeof(str_tmp));
strncpy(str_tmp, ptr_env, pos1*sizeof(char));
gpio_n = (unsigned char)simple_strtoul(str_tmp, NULL, 0);
/* Invalid gpio number skip definition */
if (gpio_n > MAX_PIN_NUMBER)
continue;
gpio_request(gpio_n, "");
if (state == STARTUP) {
/* get pin state 0 and set */
memset(str_tmp, 0, sizeof(str_tmp));
strncpy(str_tmp, ptr_env+pos1+1,
(pos2-pos1-1)*sizeof(char));
gpio_s0 = (unsigned char)simple_strtoul(str_tmp, NULL,
0);
gpio_direction_output(gpio_n, gpio_s0);
} else {
/* get pin state 1 and set */
memset(str_tmp, 0, sizeof(str_tmp));
strcpy(str_tmp, ptr_env+pos2+1);
gpio_s1 = (unsigned char)simple_strtoul(str_tmp, NULL,
0);
gpio_direction_output(gpio_n, gpio_s1);
}
} /* loop through defined led in environment */
}
static int do_board_led(cmd_tbl_t *cmdtp, int flag, int argc,
char *const argv[])
{
if (argc != 2)
return CMD_RET_USAGE;
if ((unsigned char)simple_strtoul(argv[1], NULL, 0) == STARTUP)
set_env_gpios(0);
else
set_env_gpios(1);
return 0;
};
U_BOOT_CMD(
draco_led, CONFIG_SYS_MAXARGS, 2, do_board_led,
"Set LEDs defined in environment",
"<0|1>"
);
#endif /* !CONFIG_SPL_BUILD */
|
import pytest
import numpy as np
import os
from cli.ACME_simulator import *
@pytest.mark.skip
def test_plot_exp():
exp = np.zeros((11,11))
exp[5,5] = 1
save = True
save_path = 'data/test_plot_for_plot_exp'
plot_exp(exp,save=save, save_path=save_path)
#check if plot exists
assert os.path.isfile('data/test_plot_for_plot_exp.png')
def test_add_peak():
exp = np.zeros((11,11))
peaks = {'time_idx':[5], 'mass_idx': [6], 'mass_width_idx': [5],'time_width_idx': [5],'height': [3]}
peaks = pd.DataFrame(data=peaks)
for peak in peaks.itertuples():
exp, volume = add_peak(exp, peak)
# volume should be approx. 2*pi*peak.height*sigma_x*sigma_y = 13.1041 (https://en.wikipedia.org/wiki/Gaussian_function)
assert volume < 13.5
assert volume > 12.5
sum_exp = np.sum(exp)
assert sum_exp < 13.5
assert sum_exp > 12.5
slice = np.round(exp[6,:],3)
expected_slice = np.array([0., 0., 0.005, 0.168, 1.46 , 3., 1.46 , 0.168, 0.005, 0., 0.])
np.testing.assert_array_equal(slice, expected_slice)
slice = np.round(exp[:,5],3)
expected_slice = np.array([0., 0., 0., 0.005, 0.168, 1.46, 3., 1.46, 0.168,0.005, 0.])
np.testing.assert_array_equal(slice, expected_slice)
exp = np.zeros((11,11))
peaks = {'time_idx':[5], 'mass_idx': [6], 'mass_width_idx': [5],'time_width_idx': [3],'height': [3]}
peaks = pd.DataFrame(data=peaks)
for peak in peaks.itertuples():
exp, volume = add_peak(exp, peak) # volume should be approx 7.862
assert volume > 7.5
assert volume < 8.5
assert np.max(exp) == 3
exp = np.ones((11,11))
peaks = {'time_idx':[5], 'mass_idx': [6], 'mass_width_idx': [5],'time_width_idx': [3],'height': [3]}
peaks = pd.DataFrame(data=peaks)
for peak in peaks.itertuples():
exp, volume = add_peak(exp, peak)
assert volume > 7.5 # volume should be approx 7.862
assert volume < 8.5
sum_exp = np.sum(exp) #should equal volume + 11*11
assert sum_exp > 128.5
assert sum_exp < 129.5
@pytest.mark.skip
def test_add_stripe():
exp = np.zeros((11,101))
stripes = {'stripe_noise': [10], 'stripe_offset': [100], 'stripe_width': [3],'stripe_mass_idx': [5]}
stripes = pd.DataFrame(data=stripes)
cliffs = np.array([0,30,60,101])
for stripe in stripes.itertuples():
exp = add_stripe(exp, stripe, cliffs)
max_exp = np.max(exp)
assert max_exp < 150
assert max_exp > 100
min_exp = np.min(exp)
assert min_exp <= 0
def test_add_background_offset():
exp = np.zeros((11,101))
background_offsets = np.array([5,50,5])
cliffs = np.array([0,30,60,101])
exp = add_background_offset(exp, background_offsets, cliffs)
slice = exp[:5,50]
expected_slice = np.array([50., 50., 50., 50., 50.])
np.testing.assert_array_equal(slice, expected_slice)
slice = exp[:5,80]
expected_slice = np.array([5., 5., 5., 5., 5.])
np.testing.assert_array_equal(slice, expected_slice)
#TODO add test for main program of simulator
|
export const c_drawer_m_panel_bottom_m_inline__panel_PaddingTop = {
"name": "--pf-c-drawer--m-panel-bottom--m-inline__panel--PaddingTop",
"value": "1px",
"var": "var(--pf-c-drawer--m-panel-bottom--m-inline__panel--PaddingTop)"
};
export default c_drawer_m_panel_bottom_m_inline__panel_PaddingTop;
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-26 01:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
"""token_utils.py
------------------
A collection of useful functions and methods to deal with tokenizing
source code.
"""
import ast
import keyword
import sys
import tokenize as py_tokenize
from io import StringIO
from typing import Any, Iterable, List, Sequence, Tuple, Union
from . import debug_helper
_TokenInfo = Union[
py_tokenize.TokenInfo, Tuple[int, str, Tuple[int, int], Tuple[int, int], str]
]
_token_format = "type={type} string={string} start={start} end={end} line={line}"
class Token:
"""Token as generated from Python's tokenize.generate_tokens written here in
a more convenient form, and with some custom methods.
The various parameters are::
type: token type
string: the token written as a string
start = (start_row, start_col)
end = (end_row, end_col)
line: entire line of code where the token is found.
Token instances are mutable objects. Therefore, given a list of tokens,
we can change the value of any token's attribute, untokenize the list and
automatically obtain a transformed source. Almost always, the attribute
to be transformed will be the string attribute.
"""
def __init__(self, token: _TokenInfo) -> None:
self.type = token[0]
self.string = token[1]
self.start = self.start_row, self.start_col = token[2]
self.end = self.end_row, self.end_col = token[3]
self.line = token[4]
def copy(self) -> "Token":
"""Makes a copy of a given token"""
return Token((self.type, self.string, self.start, self.end, self.line))
def __eq__(self, other: object) -> bool:
"""Compares a Token with another object; returns true if
self.string == other.string or if self.string == other.
"""
return self.string == str(other)
def __repr__(self) -> str: # pragma: no cover
"""Nicely formatted token to help with debugging session.
Note that it does **not** print a string representation that could be
used to create a new ``Token`` instance, which is something you should
never need to do other than indirectly by using the functions
provided in this module.
"""
return _token_format.format(
type="%s (%s)" % (self.type, py_tokenize.tok_name[self.type]),
string=repr(self.string),
start=str(self.start),
end=str(self.end),
line=repr(self.line),
)
def __str__(self) -> str:
"""Returns the string attribute."""
return self.string
def is_comment(self) -> bool:
"""Returns True if the token is a comment."""
return self.type == py_tokenize.COMMENT
def is_identifier(self) -> bool:
"""Returns ``True`` if the token represents a valid Python identifier
excluding Python keywords.
Note: this is different from Python's string method ``isidentifier``
which also returns ``True`` if the string is a keyword.
"""
return self.string.isidentifier() and not self.is_keyword()
def is_name(self) -> bool:
"""Returns ``True`` if the token is a type NAME"""
return self.type == py_tokenize.NAME
def is_keyword(self) -> bool:
"""Returns True if the token represents a Python keyword."""
return keyword.iskeyword(self.string) or self.string in ["__debug__", "..."]
def is_number(self) -> bool:
"""Returns True if the token represents a number of any type"""
return self.type == py_tokenize.NUMBER
def is_operator(self) -> bool:
"""Returns true if the token is of type OP"""
return self.type == py_tokenize.OP
def is_float(self) -> bool:
"""Returns True if the token represents a float"""
return self.is_number() and isinstance(ast.literal_eval(self.string), float)
def is_integer(self) -> bool:
"""Returns True if the token represents an integer"""
return self.is_number() and isinstance(ast.literal_eval(self.string), int)
def is_complex(self) -> bool:
"""Returns True if the token represents a complex number"""
return self.is_number() and isinstance(ast.literal_eval(self.string), complex)
def is_space(self) -> bool:
"""Returns True if the token indicates a change in indentation,
the end of a line, or the end of the source
(``INDENT``, ``DEDENT``, ``NEWLINE``, ``NL``, and ``ENDMARKER``).
Note that spaces, including tab characters ``\\t``, between tokens
on a given line are not considered to be tokens themselves.
"""
return self.type in (
py_tokenize.INDENT,
py_tokenize.DEDENT,
py_tokenize.NEWLINE,
py_tokenize.NL,
py_tokenize.ENDMARKER,
)
def is_string(self) -> bool:
"""Returns True if the token is a string"""
return self.type == py_tokenize.STRING
def immediately_before(self, other: Any) -> bool:
"""Return True if the current token is immediately before other,
without any intervening space in between the two tokens.
"""
if not isinstance(other, Token): # pragma: no cover
return False
return self.end_row == other.start_row and self.end_col == other.start_col
def immediately_after(self, other: Any) -> bool:
"""Return True if the current token is immediately after other,
without any intervening space in between the two tokens.
"""
if not isinstance(other, Token): # pragma: no cover
return False
return other.immediately_before(self)
def is_assignment(op: Union[str, Token]) -> bool:
"""Returns True if op (string or Token) is an assigment or augmented assignment."""
ops = [
"=",
"+=",
"-=",
"*=",
"@=",
"/=",
"//=",
"%=",
"**=",
">>=",
"<<=",
"&=",
"^=",
"|=",
]
if sys.version_info >= (3, 8):
ops.append(":=")
return str(op) in ops
def is_bitwise(op: Union[str, Token]) -> bool:
"""Returns True if op (string or Token) is a bitwise operator."""
ops = ["^", "&", "|", "<<", ">>", "~"]
return str(op) in ops
def is_comparison(op: Union[str, Token]) -> bool:
"""Returns True if op (string or Token) is a comparison operator."""
ops = ["<", ">", "<=", ">=", "==", "!="]
return str(op) in ops
def is_math_op(op: Union[str, Token]) -> bool:
"""Returns True if op (string or Token) is an operator that can be used
as a binary operator in a mathematical operation.
"""
ops = ["+", "-", "*", "**", "@", "/", "//", "%"]
return str(op) in ops
def is_operator(op: Union[str, Token]) -> bool:
"""Returns True if op (string or token) is or could be part of one
of the following: assigment operator, mathematical operator,
bitwise operator, comparison operator."""
part_ops = ["!", ":"]
return (
is_assignment(op)
or is_bitwise(op)
or is_comparison(op)
or is_math_op(op)
or str(op) in part_ops
)
def fix_empty_line(source: str, tokens: Sequence[Token]) -> None:
"""Python's tokenizer drops entirely a last line if it consists only of
space characters and/or tab characters. To ensure that we can always have::
untokenize(tokenize(source)) == source
we correct the last token content if needed.
"""
nb = 0
for char in reversed(source):
if char in (" ", "\t"):
nb += 1
else:
break
tokens[-1].string = source[-nb:]
def tokenize(source: str) -> List[Token]:
"""Transforms a source (string) into a list of Tokens.
If an exception is raised by Python's tokenize module, the list of tokens
accumulated up to that point is returned.
"""
tokens = []
try:
for tok in py_tokenize.generate_tokens(StringIO(source).readline):
token = Token(tok)
tokens.append(token)
except IndentationError as e:
try:
_ignore, linenumber, col, line = e.args[1]
type_ = py_tokenize.NAME # Not really relevant what we set here
# except that ERRORTOKEN would cause problems later on.
start = (linenumber, col)
end = (linenumber, len(line))
string = line[col:].strip()
token = Token((type_, string, start, end, line))
tokens.append(token)
return tokens
except Exception as e: # pragma: no cover
debug_helper.log(
"after IndentationError, error from token_utils.tokenize()"
)
debug_helper.log(repr(e))
return tokens
except (py_tokenize.TokenError, Exception):
return tokens
if source.endswith((" ", "\t")):
fix_empty_line(source, tokens)
return tokens
def get_significant_tokens(source: str) -> List[Token]:
"""Gets a list of tokens from a source (str), ignoring comments
as well as any token whose string value is either null or
consists of spaces, newline or tab characters.
"""
try:
tokens = tokenize(source)
except Exception as e: # pragma: no cover
debug_helper.log("Exception from token_utils.get_significant_tokens()")
debug_helper.log_error(e)
return []
return remove_meaningless_tokens(tokens)
def remove_meaningless_tokens(tokens: Iterable[Token]) -> List[Token]:
"""Given a list of tokens, remove all space-like tokens and comments."""
new_tokens = []
for tok in tokens:
if not tok.string.strip() or tok.is_comment():
continue
new_tokens.append(tok)
return new_tokens
def get_lines(source: str) -> List[List[Token]]:
"""Transforms a source (string) into a list of Tokens, with each
(inner) list containing all the tokens found on a given line of code.
"""
lines: List[List[Token]] = []
current_row = -1
new_line: List[Token] = []
try:
for tok in py_tokenize.generate_tokens(StringIO(source).readline):
token = Token(tok)
if token.start_row != current_row:
current_row = token.start_row
if new_line:
lines.append(new_line)
new_line = []
new_line.append(token)
lines.append(new_line)
except (py_tokenize.TokenError, Exception): # pragma: no cover
debug_helper.log("Exception raise in token_utils.get_lines")
return lines
if source.endswith((" ", "\t")):
fix_empty_line(source, lines[-1])
return lines
def strip_comment(line: str) -> str:
"""Removes comments from a line"""
tokens = []
try:
for tok in py_tokenize.generate_tokens(StringIO(line).readline):
token = Token(tok)
if token.is_comment():
continue
tokens.append(token)
except py_tokenize.TokenError:
pass
return untokenize(tokens)
def find_substring_index(main: str, substring: str) -> int:
"""Somewhat similar to the find() method for strings,
this function determines if the tokens for substring appear
as a subsequence of the tokens for main. If so, the index
of the first token in returned, otherwise -1 is returned.
"""
main_tokens = [tok.string for tok in get_significant_tokens(main)]
sub_tokens = [tok.string for tok in get_significant_tokens(substring)]
for index, token in enumerate(main_tokens):
if token == sub_tokens[0]:
for i, tok in enumerate(main_tokens[index : index + len(sub_tokens)]):
if tok != sub_tokens[i]:
break
else:
return index
return -1
def dedent(tokens: Iterable[Union[str, Token]], nb: int) -> List[Token]:
"""Given a list of tokens, produces an equivalent list corresponding
to a line of code with the first nb characters removed.
"""
line = untokenize(tokens)
line = line[nb:]
return tokenize(line)
def indent(
tokens: Iterable[Union[str, Token]], nb: int, tab: bool = False
) -> List[Token]:
"""Given a list of tokens, produces an equivalent list corresponding
to a line of code with nb space characters inserted at the beginning.
If ``tab`` is specified to be ``True``, ``nb`` tab characters are inserted
instead of spaces.
"""
line = untokenize(tokens)
line = "\t" * nb + line if tab else " " * nb + line
return tokenize(line)
def untokenize(tokens: Iterable[Union[str, Token]]) -> str:
"""Return source code based on tokens.
This is similar to Python's own tokenize.untokenize(), except that it
preserves spacing between tokens, by using the line
information recorded by Python's tokenize.generate_tokens.
As a result, if the original source code had multiple spaces between
some tokens or if escaped newlines were used or if tab characters
were present in the original source, those will also be present
in the source code produced by untokenize.
Thus ``source == untokenize(tokenize(source))``.
Note: if you you modifying tokens from an original source:
Instead of full token object, ``untokenize`` will accept simple
strings; however, it will only insert them *as is* without taking them
into account when it comes with figuring out spacing between tokens.
"""
# Adapted from https://github.com/myint/untokenize,
# Copyright (C) 2013-2018 Steven Myint, MIT License (same as this project).
words = []
previous_line = ""
last_row = 0
last_column = -1
last_non_whitespace_token_type = None
for token in tokens:
if isinstance(token, str): # pragma: no cover
words.append(token)
continue
if token.type == py_tokenize.ENCODING: # pragma: no cover
continue
# Preserve escaped newlines.
if (
last_non_whitespace_token_type != py_tokenize.COMMENT
and token.start_row > last_row
and previous_line.endswith(("\\\n", "\\\r\n", "\\\r"))
):
words.append(previous_line[len(previous_line.rstrip(" \t\n\r\\")) :])
# Preserve spacing.
if token.start_row > last_row:
last_column = 0
if token.start_col > last_column:
words.append(token.line[last_column : token.start_col])
words.append(token.string)
previous_line = token.line
last_row = token.end_row
last_column = token.end_col
if not token.is_space():
last_non_whitespace_token_type = token.type
return "".join(words)
TextOrTokens = Union[str, Sequence[Union[str, Token]]]
def print_tokens(source: TextOrTokens) -> None: # pragma: no cover
"""Prints tokens found in source, excluding spaces and comments.
``source`` is either a string to be tokenized, or a list of Token objects.
This is occasionally useful as a debugging tool.
"""
if isinstance(source[0], Token):
source = untokenize(source)
for lines in get_lines(source): # type: ignore
for token in lines:
print(repr(token))
print()
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
__all__ = ['build_swin_transformer']
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
# for reshape
self.img_size = img_size
self.down_sample_ratio = 32
self.h = img_size // 32
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
B,L,C = x.shape
x = x.transpose(1,2).reshape(B, C, self.h , self.h)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.avgpool(x).squeeze(-1).squeeze(-1) # B C 1
# import ipdb; ipdb.set_trace()
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
def build_swin_transformer(modelname, img_size, num_classes=1000):
assert modelname in ['swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']
model_para_dict = {
'swin_B_224_22k': dict(
embed_dim=128,
depths=[ 2, 2, 18, 2 ],
num_heads=[ 4, 8, 16, 32 ],
window_size=7
),
'swin_B_384_22k': dict(
embed_dim=128,
depths=[ 2, 2, 18, 2 ],
num_heads=[ 4, 8, 16, 32 ],
window_size=12
),
'swin_L_224_22k': dict(
embed_dim=192,
depths=[ 2, 2, 18, 2 ],
num_heads=[ 6, 12, 24, 48 ],
window_size=7
),
'swin_L_384_22k': dict(
embed_dim=192,
depths=[ 2, 2, 18, 2 ],
num_heads=[ 6, 12, 24, 48 ],
window_size=12
),
}
model = SwinTransformer(img_size=img_size, num_classes=num_classes, **model_para_dict[modelname])
return model
|
/* */
var baseHas = require('./_baseHas'),
keys = require('./keys');
var PARTIAL_COMPARE_FLAG = 2;
function equalObjects(object, other, equalFunc, customizer, bitmask, stack) {
var isPartial = bitmask & PARTIAL_COMPARE_FLAG,
objProps = keys(object),
objLength = objProps.length,
othProps = keys(other),
othLength = othProps.length;
if (objLength != othLength && !isPartial) {
return false;
}
var index = objLength;
while (index--) {
var key = objProps[index];
if (!(isPartial ? key in other : baseHas(other, key))) {
return false;
}
}
var stacked = stack.get(object);
if (stacked) {
return stacked == other;
}
var result = true;
stack.set(object, other);
var skipCtor = isPartial;
while (++index < objLength) {
key = objProps[index];
var objValue = object[key],
othValue = other[key];
if (customizer) {
var compared = isPartial ? customizer(othValue, objValue, key, other, object, stack) : customizer(objValue, othValue, key, object, other, stack);
}
if (!(compared === undefined ? (objValue === othValue || equalFunc(objValue, othValue, customizer, bitmask, stack)) : compared)) {
result = false;
break;
}
skipCtor || (skipCtor = key == 'constructor');
}
if (result && !skipCtor) {
var objCtor = object.constructor,
othCtor = other.constructor;
if (objCtor != othCtor && ('constructor' in object && 'constructor' in other) && !(typeof objCtor == 'function' && objCtor instanceof objCtor && typeof othCtor == 'function' && othCtor instanceof othCtor)) {
result = false;
}
}
stack['delete'](object);
return result;
}
module.exports = equalObjects;
|
from django.urls import re_path
from guests.views import GuestListView, guest_importer, test_email, export_guests, \
invitation, rsvp_confirm, dashboard, rsvp
urlpatterns = [
re_path(r'^guests/$', GuestListView.as_view(), name='guest-list'),
re_path(r'^dashboard/$', dashboard, name='dashboard'),
re_path(r'^guests/export$', export_guests, name='export-guest-list'),
re_path(r'^invite/', invitation, name='invitation'),
re_path(r'^rsvp/confirm/', rsvp_confirm, name='rsvp-confirm'),
re_path(r'^rsvp/', rsvp, name='rsvp'),
re_path(r'^importer', guest_importer, name='guest_importer')
]
|
/****************************************************************************
Copyright (c) 2013-2014 Chukong Technologies Inc.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#ifndef __EDITOR_SUPPORT_COCOSTUDIO_H__
#define __EDITOR_SUPPORT_COCOSTUDIO_H__
#include "cocostudio/CCActionFrame.h"
#include "cocostudio/CCActionFrameEasing.h"
#include "cocostudio/CCActionManagerEx.h"
#include "cocostudio/CCActionNode.h"
#include "cocostudio/CCActionObject.h"
#include "cocostudio/CCArmature.h"
#include "cocostudio/CCBone.h"
#include "cocostudio/CCArmatureAnimation.h"
#include "cocostudio/CCProcessBase.h"
#include "cocostudio/CCTween.h"
#include "cocostudio/CCDatas.h"
#include "cocostudio/CCBatchNode.h"
#include "cocostudio/CCDecorativeDisplay.h"
#include "cocostudio/CCDisplayFactory.h"
#include "cocostudio/CCDisplayManager.h"
#include "cocostudio/CCSkin.h"
#include "cocostudio/CCColliderDetector.h"
#include "cocostudio/CCArmatureDataManager.h"
#include "cocostudio/CCArmatureDefine.h"
#include "cocostudio/CCDataReaderHelper.h"
#include "cocostudio/CCSpriteFrameCacheHelper.h"
#include "cocostudio/CCTransformHelp.h"
#include "cocostudio/CCUtilMath.h"
#include "cocostudio/CCComBase.h"
#include "cocostudio/CCComAttribute.h"
#include "cocostudio/CCComAudio.h"
#include "cocostudio/CCComController.h"
#include "cocostudio/CCComRender.h"
#include "cocostudio/CCInputDelegate.h"
#include "cocostudio/DictionaryHelper.h"
#include "cocostudio/CCSGUIReader.h"
#include "cocostudio/CCSSceneReader.h"
#include "cocostudio/TriggerBase.h"
#endif
|
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_PROFILE_PICKER_H_
#define CHROME_BROWSER_UI_PROFILE_PICKER_H_
#include <vector>
#include "base/callback_forward.h"
#include "base/feature_list.h"
#include "base/time/time.h"
#include "chrome/browser/ui/webui/signin/enterprise_profile_welcome_ui.h"
#include "third_party/skia/include/core/SkColor.h"
#include "url/gurl.h"
class GURL;
namespace base {
class FilePath;
} // namespace base
namespace content {
class BrowserContext;
}
namespace views {
class View;
class WebView;
} // namespace views
// Kill switch to disable showing the picker on startup. Has no effect if
// features::kNewProfilePicker is disabled.
extern const base::Feature kEnableProfilePickerOnStartupFeature;
class ProfilePicker {
public:
// Only work when passed as the argument 'on_select_profile_target_url' to
// ProfilePicker::Show.
static const char kTaskManagerUrl[];
// An entry point that triggers the profile picker window to open.
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
enum class EntryPoint {
kOnStartup = 0,
kProfileMenuManageProfiles = 1,
kProfileMenuAddNewProfile = 2,
kOpenNewWindowAfterProfileDeletion = 3,
// A new session was sarted while Chrome was already running (e.g. by
// clicking on the tray icon on Windows).
kNewSessionOnExistingProcess = 4,
kProfileLocked = 5,
kUnableToCreateBrowser = 6,
kBackgroundModeManager = 7,
kMaxValue = kBackgroundModeManager,
};
// Values for the ProfilePickerOnStartupAvailability policy. Should not be
// re-numbered. See components/policy/resources/policy_templates.json for
// documentation.
enum class AvailabilityOnStartup {
kEnabled = 0,
kDisabled = 1,
kForced = 2,
kMax = kForced
};
// Shows the Profile picker for the given `entry_point` or re-activates an
// existing one. In the latter case, the displayed page and the target url
// on profile selection is not updated.
static void Show(EntryPoint entry_point,
const GURL& on_select_profile_target_url = GURL());
// Starts the sign-in flow. The layout of the window gets updated for the
// sign-in flow. At the same time, the new profile is created (with
// `profile_color`) and the sign-in page is rendered using the new profile.
// `switch_finished_callback` gets informed whether the creation of the new
// profile succeeded and the sign-in page gets displayed.
static void SwitchToSignIn(
SkColor profile_color,
base::OnceCallback<void(bool)> switch_finished_callback);
// Cancel the sign-in flow and returns back to the main picker screen (if the
// original EntryPoint was to open the picker). Must only be called from
// within the sign-in flow. This will delete the profile previously created
// for the sign-in flow.
static void CancelSignIn();
// Finishes the sign-in flow by moving to the sync confirmation screen. It
// uses the same new profile created by `SwitchToSignIn()`.
static void SwitchToSyncConfirmation();
// Finishes the sign-in flow by moving to the enterprise profile welcome
// screen. It uses the same new profile created by `SwitchToSignIn()`.
static void SwitchToEnterpriseProfileWelcome(
EnterpriseProfileWelcomeUI::ScreenType type,
base::OnceCallback<void(bool)> proceed_callback);
// When the sign-in flow cannot be completed because another profile at
// `profile_path` is already syncing with a chosen account, shows the profile
// switch screen. It uses the system profile.
static void SwitchToProfileSwitch(const base::FilePath& profile_path);
// Shows a dialog where the user can auth the profile or see the
// auth error message. If a dialog is already shown, this destroys the current
// dialog and creates a new one.
static void ShowDialog(content::BrowserContext* browser_context,
const GURL& url,
const base::FilePath& profile_path);
// Hides the dialog if it is showing.
static void HideDialog();
// Getter of the path of profile which is selected in profile picker for force
// signin.
static base::FilePath GetForceSigninProfilePath();
// Getter of the target page url. If not empty and is valid, it opens on
// profile selection instead of the new tab page.
static GURL GetOnSelectProfileTargetUrl();
// Getter of the path of profile which is displayed on the profile switch
// screen.
static base::FilePath GetSwitchProfilePath();
// Hides the profile picker.
static void Hide();
// Returns whether the profile picker is currently open.
static bool IsOpen();
// Returns whether the Profile picker is showing and active.
static bool IsActive();
// Returns the global profile picker view for testing.
static views::View* GetViewForTesting();
// Returns the web view (embedded in the picker) for testing.
static views::WebView* GetWebViewForTesting();
// Returns the simple toolbar (embedded in the picker) for testing.
static views::View* GetToolbarForTesting();
// Add a callback that will be called the next time the picker is opened.
static void AddOnProfilePickerOpenedCallbackForTesting(
base::OnceClosure callback);
// Overrides the timeout delay for waiting for extended account info.
static void SetExtendedAccountInfoTimeoutForTesting(base::TimeDelta timeout);
// Returns a pref value indicating whether the profile picker has ever been
// shown to the user.
static bool Shown();
// Returns whether to show profile picker at launch. This can be called on
// startup or when Chrome is re-opened, e.g. when clicking on the dock icon on
// MacOS when there are no windows, or from Windows tray icon.
// This returns true if the user has multiple profiles and has not opted-out.
static bool ShouldShowAtLaunch();
private:
DISALLOW_COPY_AND_ASSIGN(ProfilePicker);
};
// Dialog that will be displayed when a locked profile is selected in the
// ProfilePicker when force-signin is enabled.
class ProfilePickerForceSigninDialog {
public:
// Dimensions of the reauth dialog displaying the password-separated signin
// flow.
static constexpr int kDialogHeight = 512;
static constexpr int kDialogWidth = 448;
// Shows a dialog where the user logs into their profile for the first time
// via the profile picker, when force signin is enabled.
static void ShowForceSigninDialog(content::BrowserContext* browser_context,
const base::FilePath& profile_path);
// Show the dialog and display local sign in error message without browser.
static void ShowDialogAndDisplayErrorMessage(
content::BrowserContext* browser_context);
// Display local sign in error message without browser.
static void DisplayErrorMessage();
// Hides the dialog if it is showing.
static void HideDialog();
};
#endif // CHROME_BROWSER_UI_PROFILE_PICKER_H_
|
/*
Copyright (c) 2003-2019, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang( 'liststyle', 'oc', {
bulletedTitle: 'Proprietats de la lista de piuses',
circle: 'Cercle',
decimal: 'Decimal (1, 2, 3, etc.)',
disc: 'Disc',
lowerAlpha: 'Letras minusculas (a, b, c, d, e, etc.)',
lowerRoman: 'Chifras romanas minusculas (i, ii, iii, iv, v, etc.)',
none: 'Pas cap',
notset: '<indefinit>',
numberedTitle: 'Proprietats de la lista numerotada',
square: 'Carrat',
start: 'Començament',
type: 'Tipe',
upperAlpha: 'Letras majusculas (A, B, C, D, E, etc.)',
upperRoman: 'Chifras romanas majusculas (I, II, III, IV, V, etc.)',
validateStartNumber: 'Lo primièr element de la lista deu èsser un nombre entièr.'
} );
|
# Presense of __init__.py makes directory a Python package
|
/**
*
* Asynchronously loads the component for SendMailAssessment
*
*/
import loadable from 'utils/loadable';
export default loadable(() => import('./index'));
|