text
stringlengths 3
1.05M
|
|---|
/*
MIT License
Copyright (c) 2021 Marcin Borowicz <marcinbor85@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "inc/ush_node.h"
#include "inc/ush_utils.h"
#include <string.h>
struct ush_node_object* ush_node_get_by_path(struct ush_object *self, const char *path)
{
USH_ASSERT(self != NULL);
USH_ASSERT(path != NULL);
char level_path[self->desc->path_max_length];
size_t levels = ush_utils_get_path_levels_count(path);
if (levels == 0)
return self->root;
if (self->root == NULL)
return NULL;
struct ush_node_object *curr = self->root->childs;
for (size_t i = 1; i <= levels; i++) {
ush_utils_get_path_level(i, path, level_path);
bool found = false;
while (curr != NULL) {
if (strcmp(curr->path, level_path) == 0) {
found = true;
break;
}
curr = curr->next;
}
if (found == false)
break;
if (i == levels)
return curr;
curr = curr->childs;
}
return NULL;
}
void ush_node_get_absolute_path(struct ush_object *self, const char *in_path, char *out_path)
{
char abs_path[self->desc->path_max_length];
USH_ASSERT(self != NULL);
USH_ASSERT(in_path != NULL);
USH_ASSERT(out_path != NULL);
if (in_path[0] == '/') {
strcpy(abs_path, in_path);
} else {
strcpy(abs_path, self->current_node->path);
if (strcmp(self->current_node->path, "/") != 0)
strcat(abs_path, "/");
strcat(abs_path, in_path);
}
size_t abs_path_len = strlen(abs_path);
if ((abs_path_len > 1) && (abs_path[abs_path_len - 1] == '/'))
abs_path[abs_path_len - 1] = '\0';
ush_utils_get_collapse_path(abs_path, out_path);
}
void ush_node_deinit_recursive(struct ush_object *self, struct ush_node_object *node)
{
USH_ASSERT(self != NULL);
struct ush_node_object *curr = node;
while (curr != NULL) {
if (curr->childs != NULL)
ush_node_deinit_recursive(self, curr->childs);
struct ush_node_object *tmp = curr->next;
memset((uint8_t*)curr, 0, sizeof(struct ush_node_object));
curr = tmp;
}
}
|
# -*- coding: utf-8 -*-
"""Collect artifacts from the local filesystem."""
from __future__ import unicode_literals
import os
from dftimewolf.lib.module import BaseModule
class FilesystemCollector(BaseModule):
"""Collect artifacts from the local filesystem.
input: None, takes input from parameters only.
output: A list of existing file paths.
"""
def __init__(self, state):
super(FilesystemCollector, self).__init__(state)
self._paths = None
def setup(self, paths=None): # pylint: disable=arguments-differ
"""Sets up the _paths attribute.
Args:
paths: Comma-separated list of strings representing the paths to collect.
"""
if not paths:
self.state.add_error(
'No `paths` argument provided in recipe, bailing', critical=True)
else:
self._paths = [path.strip() for path in paths.strip().split(',')]
def cleanup(self):
pass
def process(self):
"""Checks whether the paths exists and updates the state accordingly."""
for path in self._paths:
if os.path.exists(path):
self.state.output.append((path, path))
else:
self.state.add_error(
'Path {0:s} does not exist'.format(str(path)), critical=False)
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CONTRIB_LITE_JAVA_TENSORFLOW_LITE_JNI_H_
#define TENSORFLOW_CONTRIB_LITE_JAVA_TENSORFLOW_LITE_JNI_H_
#include <jni.h>
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
/*
* Class: org_tensorflow_lite_TensorFlowLite
* Method: version
* Signature: ()Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL
Java_org_tensorflow_lite_TensorFlowLite_version(JNIEnv*, jclass);
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
#endif // TENSORFLOW_CONTRIB_LITE_JAVA_TENSORFLOW_LITE_JNI_H_
|
/*!
* Copyright (c) 2017 by Contributors
* \file op_util.h
* \brief Common utility used in operator construction.
*/
#ifndef TVM_OP_OP_UTIL_H_
#define TVM_OP_OP_UTIL_H_
#include <tvm/expr.h>
#include <tvm/schedule.h>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "../pass/ir_util.h"
#include "../pass/arg_binder.h"
#include "../schedule/message_passing.h"
namespace TVM {
namespace op {
using ir::MergeNest;
/*!
* \brief Build loop nest for stage.
*
* \param stage The stage to create a loop nest.
* \param dom_map The range of each iter var.
* \param begin_iter_pos The beginning position of leaf_iter_vars to generate loop.
* \param new_loop_var Whether create new loop variable.
* \param skip_iter Whether skip certain iteration.
* \param p_value_map The result value of each IterVar.
* \param del_trivial_loop Whether eliminate trivial loops with extent of 1
*/
std::vector<std::vector<Stmt> >
MakeLoopNest(const Stage& stage,
const std::unordered_map<IterVar, Range>& dom_map,
size_t begin_iter_pos,
bool new_loop_var,
const std::unordered_set<IterVar>& skip_iter,
std::unordered_map<IterVar, Expr>* p_value_map,
bool del_trivial_loop);
/*!
* \brief Create a nest of if checking the predicates.
*
* \param predicates The predicates to be checked.
* \return List of If nest that checks the predicates.
*/
std::vector<Stmt> MakeIfNest(const std::vector<Expr>& predicates);
/*!
* \brief Replace the tensor reference in stmt by the replace map.
* \param stmt The statement to be processed.
* \param replace The replacement rule.
*/
Stmt ReplaceTensor(Stmt stmt,
const std::unordered_map<Tensor, Tensor>& replace);
/*!
* \brief Replace the tensor reference in expr by the replace map.
* \param expr The expression to be processed.
* \param replace The replacement rule.
*/
Expr ReplaceTensor(Expr expr,
const std::unordered_map<Tensor, Tensor>& replace);
/*!
* \brief Substitute the variables of stmt by value map.
* \param stmt the statment
* \param value_map The value map.
* \return Substituted result.
*/
Stmt Substitute(Stmt stmt,
const std::unordered_map<IterVar, Expr>& value_map);
Stmt Substitute(Stmt stmt,
const std::unordered_map<const Variable*, Expr>& value_map);
} // namespace op
} // namespace TVM
#endif // TVM_OP_OP_UTIL_H_
|
import anki_vector
from anki_vector.messaging.messages_pb2 import BatteryStateResponse
def main():
args = anki_vector.util.parse_command_args()
with anki_vector.Robot(args.serial) as robot:
battery_state: BatteryStateResponse = robot.get_battery_state()
if battery_state:
print("Robot battery voltage: {0}".format(battery_state.battery_volts))
print("Robot battery Level: {0}".format(battery_state.battery_level))
print("Robot battery is charging: {0}".format(battery_state.is_charging))
print("Robot is on charger platform: {0}".format(battery_state.is_on_charger_platform))
print("Robot's suggested charger time: {0}".format(battery_state.suggested_charger_sec))
if __name__ == "__main__":
main()
|
import command from '@percy/cli-command';
export const validate = command('validate', {
description: 'Validate a Percy config file',
args: [{
name: 'filepath',
description: 'Config filepath, detected by default'
}],
examples: [
'$0',
'$0 ./config/percy.yml'
]
}, async ({ args, log, exit }) => {
let PercyConfig = await import('@percy/config');
// verify a config file can be located
let { config, filepath } = PercyConfig.search(args.filepath);
if (!config) exit(1, 'Config file not found');
// when `bail` is true, .load() returns undefined when validation fails
let result = PercyConfig.load({
path: filepath,
print: true,
bail: true
});
// exit 1 when config is empty
if (!result) exit(1);
});
export default validate;
|
from ex115.lib.interface import *
from ex115.lib.arquivo import *
arq = 'Cadastro.txt'
if not arquivo_existe('Cadastro.txt'):
criar_arq('Cadastro.txt')
while True:
resposta = menu(['Ver pessoas cadastradas', 'Cadastrar nova pessoa', 'Sair do sistema'])
if resposta == 1:
cabecalho('EXIBINDO PESSOAS CADASTRADAS')
ler_arq(arq)
elif resposta == 2:
cabecalho('NOVO CADASTRO')
escrever_arq(arq)
elif resposta == 3:
cabecalho('Saindo do sistema... Ate logo!')
break
else:
print("Erro! Digite uma opcao valida!")
|
# -*- coding: utf-8 -*-
"""
Created on 09/05/2020
@author: Carlos Eduardo Barbosa
Radial profile of Lick indices
"""
import os
import numpy as np
from astropy.io import fits
from astropy.table import Table, join, vstack
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.patches import Rectangle
from matplotlib.colorbar import Colorbar
import scipy.ndimage as ndimage
from tqdm import tqdm
import context
def make_table_obs(filenames, licktype=None, indnames=None):
""" Join tables with Lick indices from observations. """
licktype = "Ia" if licktype is None else licktype
if indnames is None:
indnames = ['bTiO_muse', 'H_beta', 'Fe5015', 'Mg_1', 'Mg_2', 'Mg_b',
'Fe5270', 'Fe5335', 'Fe5406', 'Fe5709', 'Fe5782', 'aTiO',
'Na_D', 'TiO_1', 'TiO_2_muse', 'CaH_1',
'CaH_2_muse', 'TiO_3', 'TiO_4', 'NaI', 'CaT1', 'CaT2',
'CaT3']
ts = []
for fname in tqdm(filenames, desc="Reading tables with indices"):
table = Table.read(fname)
t = Table()
t["BIN"] = [fname.split("_")[2]]
names = [_ for _ in table["name"]]
for i in indnames:
t[i] = table[licktype].data[names.index(i)]
t["{}err".format(i)] = table["{}err".format(licktype,)].data[
names.index(i)]
ts.append(t)
t = vstack(ts)
return t
def make_table_bsf(filenames):
""" Join tables with Lick indices from models. """
ts = []
for fname in tqdm(filenames, desc="Reading bsf tables:"):
ts.append(Table.read(fname))
t = vstack(ts)
return t
def lick_profiles(table_obs, table_models, outimg, indnames=None,
figsize=(7.24, 5)):
if indnames is None:
indnames = ['bTiO_muse', 'H_beta', 'Fe5015', 'Mg_1', 'Mg_2', 'Mg_b',
'Fe5270', 'Fe5335', 'Fe5406', 'Fe5709', 'Fe5782', 'aTiO',
'Na_D', 'TiO_1', 'TiO_2_muse', 'CaH_1',
'CaH_2_muse', 'TiO_3', 'TiO_4', 'NaI', 'CaT1', 'CaT2',
'CaT3']
temptable = templates_table()
gs = gridspec.GridSpec(6, 4, hspace=0.04, left=0.05, right=0.99, top=0.995,
wspace=0.27, bottom=0.065)
fig = plt.figure(figsize=figsize)
ylabels = [_.replace("_", "").replace("muse", "*") for _ in indnames]
for i, index in enumerate(indnames):
ax = plt.subplot(gs[i])
ax.errorbar(table_obs["R"], table_obs[index],
yerr=table_obs["{}err".format(
index)], fmt="o", ecolor="C0", mec="w", mew=0.5, c="C0",
elinewidth=0.5, ms=4, label="NGC 3311")
yerr = [table_models["{}_lowerr".format(index)].data,
table_models["{}_upper".format(index)].data]
ax.errorbar(table_models["R"], table_models[index],
yerr=yerr, fmt="o", ecolor="C1", mec="w", mew=0.5,
c="C1", elinewidth=0.5, ms=4, label="SSP Models")
ax.set_ylabel("{} (\\r{{A}})".format(ylabels[i]))
ax.axhline(temptable[index].min(), c="k", ls="--", lw=0.5)
ax.axhline(temptable[index].max(), c="k", ls="--", lw=0.5,
label="Model limits")
if i > 18:
ax.set_xlabel("R (kpc)")
else:
ax.xaxis.set_ticklabels([])
plt.legend(loc=(1.2, -0.2), prop={"size": 9})
for ext in ["png", "pdf"]:
plt.savefig("{}.{}".format(outimg, ext), dpi=250)
plt.close()
return
def templates_table(w1=4500, w2=10000, sigma=315, licktype="Ia", velscale=None,
sample="all", indnames=None):
velscale = int(context.velscale) if velscale is None else velscale
if indnames is None:
indnames = ['bTiO_muse', 'H_beta', 'Fe5015', 'Mg_1', 'Mg_2', 'Mg_b',
'Fe5270', 'Fe5335', 'Fe5406', 'Fe5709', 'Fe5782', 'aTiO',
'Na_D', 'TiO_1', 'TiO_2_muse', 'CaH_1',
'CaH_2_muse', 'TiO_3', 'TiO_4', 'NaI', 'CaT1', 'CaT2',
'CaT3']
templates_file = os.path.join(context.home, "templates",
"lick_vel{}_w{}_{}_{}_sig{}_{}.fits".format(
velscale, w1, w2, sample, sigma,
licktype))
temptable = Table.read(templates_file)
return temptable
def run_ngc3311(targetSN=250, licktype=None, sigma=315, redo=False,
loglike=None):
licktype = "Ia" if licktype is None else licktype
loglike = "normal2" if loglike is None else loglike
imgname, cubename = context.get_field_files("fieldA")
wdir = os.path.join(os.path.split(cubename)[0], "sn{}".format(targetSN))
geom = Table.read(os.path.join(wdir, "geom.fits"))
# Make table with Lick measurements
lick_dir = os.path.join(wdir, "lick")
lick_table = os.path.join(wdir, "lick_{}.fits".format(licktype))
filenames = sorted([os.path.join(lick_dir, _) for _ in os.listdir(
lick_dir) if _.endswith("sigma{}.fits".format(sigma))])
if os.path.exists(lick_table) and not redo:
tlick = Table.read(lick_table, format="fits")
else:
tlick = make_table_obs(filenames, licktype=licktype)
tlick = join(geom, tlick, "BIN")
tlick.write(lick_table, overwrite=True)
tlick.write(lick_table, format="fits", overwrite=True)
# Reading table with predictions of the models
bsf_dir = os.path.join(wdir, "bsf_lick_{}".format(loglike))
filenames = sorted([os.path.join(bsf_dir, _) for _ in os.listdir(
bsf_dir) if _.endswith("sigma{}_lick.fits".format(
sigma))])
bsf_table = os.path.join(wdir, "bsf_lick_{}.fits".format(licktype))
if os.path.exists(bsf_table) and not redo:
blick = Table.read(bsf_table, format="fits")
else:
blick = make_table_bsf(filenames)
blick = join(geom, blick, "BIN")
blick.write(bsf_table, overwrite=True)
blick.write(bsf_table, format="fits", overwrite=True)
outimg = os.path.join(wdir, "plots/lick_profiles")
lick_profiles(tlick, blick, outimg)
if __name__ == "__main__":
run_ngc3311(redo=False)
|
# -*- coding: utf-8 -*-
import os
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import definitions
import globalcache
# sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
sns.set()
pd.options.mode.chained_assignment = 'raise'
import votesim
from votesim.benchmarks import simple
from votesim import plots, post
# %% Read
benchmark = simple.simple_base_compare_test()
dirname = definitions.DIR_DATA_BENCHMARKS
dirname = os.path.join(dirname, benchmark.name)
@globalcache.cache_decorate('read')
def read():
return benchmark.read(dirname=dirname)
g = globalcache.create(globals())
p = read()
df = p.dataframe
# %%
tolname = 'args.voter-0.1.set_behavior.tol'
basename = 'args.voter-0.1.set_behavior.base'
yname = 'args.etype'
zname = 'output.winner.regret_efficiency_candidate'
df1 = df[[
tolname, basename, yname, zname]].copy()
df1[zname] = df1[zname] * 100
groupby = df1.groupby(by=basename)
for basename1 in groupby.groups:
dfb = groupby.get_group(basename1)
plt.figure()
plt.title(basename1)
plots.heatmap(x=tolname, y=yname, hue=zname, data=dfb)
|
import React from 'react'
import ReactDOM from 'react-dom'
import {Provider} from 'react-redux'
import {createStore, applyMiddleware, compose} from 'redux'
import reduxThunk from 'redux-thunk'
import reduxLogger from 'redux-logger'
import App from "./containers/AppContainer";
import reducers from './reducers'
const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const store = createStore(reducers,
composeEnhancers(applyMiddleware(reduxThunk, reduxLogger))
);
ReactDOM.render(
<Provider store={store}>
<App/>
</Provider>,
document.getElementById('root')
);
|
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from django.contrib.admin.sites import AdminSite
from mezzanine.utils.importing import import_dotted_path
class LazyAdminSite(AdminSite):
"""
Defers calls to register/unregister until autodiscover is called
to avoid load issues with injectable model fields defined by
``settings.EXTRA_MODEL_FIELDS``.
"""
def __init__(self, *args, **kwargs):
self._deferred = []
super(LazyAdminSite, self).__init__(*args, **kwargs)
def register(self, *args, **kwargs):
for name, deferred_args, deferred_kwargs in self._deferred:
if name == "unregister" and deferred_args[0] == args[0]:
self._deferred.append(("register", args, kwargs))
break
else:
super(LazyAdminSite, self).register(*args, **kwargs)
def unregister(self, *args, **kwargs):
self._deferred.append(("unregister", args, kwargs))
def lazy_registration(self):
for name, deferred_args, deferred_kwargs in self._deferred:
getattr(AdminSite, name)(self, *deferred_args, **deferred_kwargs)
@property
def urls(self):
from django.conf import settings
urls = patterns("", ("", super(LazyAdminSite, self).urls),)
# Filebrowser admin media library.
fb_name = getattr(settings, "PACKAGE_NAME_FILEBROWSER", "")
if fb_name in settings.INSTALLED_APPS:
try:
fb_urls = import_dotted_path("%s.sites.site" % fb_name).urls
except ImportError:
fb_urls = "%s.urls" % fb_name
urls = patterns("", ("^media-library/", include(fb_urls)),) + urls
# Give the urlpatterm for the user password change view an
# actual name, so that it can be reversed with multiple
# languages are supported in the admin.
for admin in self._registry.values():
user_change_password = getattr(admin, "user_change_password", None)
if user_change_password:
urls = patterns("",
url("^auth/user/(\d+)/password/$",
self.admin_view(user_change_password),
name="user_change_password"),
) + urls
break
return urls
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 17:54:18 2015
@author: anderson
"""
from setuptools import setup
import io
import version
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst')
setup(name='pyspyke',
version=version.version,
description='Python Package to analyse Spyke',
long_description=long_description,
url='https://github.com/britodasilva/pyspyke.git',
uthor='Anderson Brito da Silva',
author_email='a.brito-da-silva1@newcastle.ac.uk',
license='MIT',
install_requires=['numpy','matplotlib','scipy','h5py','sklearn'],
zip_safe=False)
|
#coding:utf-8
terminal_servers_conf = [
{
"host": "127.0.0.1",
"port": 8089,
"tag": "word2vec",
},
{
"host": "127.0.0.1",
"port": 8089,
"tag": "user profile",
}
]
|
//won't work because its '..' (outside the allowed folder of access)???
//import "../common.js"
import 'https://sohrabsaran.github.io/softwareSkillsDemos/frontend/common.js'
function changeDom(changeCode, afterChangingDom) {
function afterGettingContent(contentDocHtml) {
console.log('contentDocHtml', contentDocHtml)
//parse the html
let contentDoc = (new DOMParser()).parseFromString(
contentDocHtml, 'text/html')
afterChangingDom(contentDoc)
}
chrome.tabs.query({ active: true }, function (tabs) {
var tab = tabs[0];
chrome.tabs.executeScript(
tab.id, {
code:
`(function() { try {
`+ changeCode +
`
return document.documentElement.outerHTML
}catch(e){return e.message+'\\n'+e.stack}})();
`
},
(result) =>
withErrorLoggingFromChromeRuntime(result, afterGettingContent)
);
});
}
function runTest() {
changeDom(`document.body.style.background='yellow'`,
(contentDoc) => { el("id1").innerText = contentDoc.documentElement.outerHTML })
}
function withErrorLoggingFromChromeRuntime(result, fn) {
fn(result)
const lastErr = chrome.runtime.lastError;
if (lastErr) console.log(' lastError: ' + JSON.stringify(lastErr));
}
$('#runTestButton').click(runTest)
|
import re
from typing import Optional, TYPE_CHECKING
from django.apps import apps
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from django.urls import is_valid_path
from django.utils.deprecation import MiddlewareMixin
from .conf import (
REDIRECTS_IGNORE_PATH_PREFIXES,
)
from .recievers import get_redirect
from .services import (
increase_redirect_counter,
get_redirect_response
)
from .utils import strip_language_from_path
if TYPE_CHECKING:
from django.contrib.sites.models import Site
from django.http.response import HttpResponse
from .models import Redirect
__all__ = (
'RedirectMiddleware',
'extra_slashes_redirect_middleware'
)
REDIRECTS_EXTRA_SLASHES_REDIRECT_EXEMPT_URLS = []
if hasattr(settings, 'REDIRECTS_EXTRA_SLASHES_REDIRECT_EXEMPT_URLS'):
REDIRECTS_EXTRA_SLASHES_REDIRECT_EXEMPT_URLS += [
re.compile(url)
for url in settings.REDIRECTS_EXTRA_SLASHES_REDIRECT_EXEMPT_URLS
]
class RedirectMiddleware(MiddlewareMixin):
def __init__(self, get_response=None):
if not apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured(
"You cannot use RedirectMiddleware when "
"django.contrib.sites is not installed."
)
super().__init__(get_response)
def process_response(self, request, response) -> 'HttpResponse':
language_code: str = getattr(request, 'LANGUAGE_CODE', None)
path: str = strip_language_from_path(path=request.path)
if isinstance(REDIRECTS_IGNORE_PATH_PREFIXES, str):
ignore_prefixes = (REDIRECTS_IGNORE_PATH_PREFIXES, )
else:
ignore_prefixes = tuple(REDIRECTS_IGNORE_PATH_PREFIXES)
if path.startswith(ignore_prefixes):
return response
current_site: 'Site' = get_current_site(request)
r: Optional['Redirect'] = (
get_redirect(site=current_site, old_path=path)
)
if r:
if request.GET and not r.is_ignore_get_params:
r = None
if r is None:
full_path: str = (
strip_language_from_path(
path=request.get_full_path()
)
)
r: 'Redirect' = (
get_redirect(site=current_site, old_path=full_path)
)
if (
r is None
and settings.APPEND_SLASH
and not request.path.endswith('/')
):
r: 'Redirect' = (
get_redirect(
site=current_site,
old_path=request.get_full_path(force_append_slash=True)
)
)
if r is not None:
if (
language_code is not None
and r.languages
and language_code not in r.languages
):
return response
increase_redirect_counter(redirect=r)
return (
get_redirect_response(
redirect=r,
request=request,
response=response
)
)
# No redirect was found. Return the response.
return response
def extra_slashes_redirect_middleware(get_response):
"""
Middleware to redirect from urls with extra slashes
at the end to urls with one slash
"""
def middleware(request):
path = request.get_full_path()
if '//' in path:
path = re.sub(r'(/)\1+', r'\1', path)
return redirect(path, permanent=True)
if not settings.APPEND_SLASH:
return get_response(request)
is_url_exempt = any(
url.match(path.lstrip('/'))
for url in REDIRECTS_EXTRA_SLASHES_REDIRECT_EXEMPT_URLS
)
if is_url_exempt:
return get_response(request)
is_url_to_redirect = all([
not request.GET,
len(path) > 1,
not str(path).endswith('/')
])
if is_url_to_redirect:
urlconf = getattr(request, 'urlconf', None)
# if path without slash is not valid - append slash
if not is_valid_path(path, urlconf):
path += '/'
if is_valid_path(path, urlconf):
return redirect(path, permanent=True)
if path.endswith('?'):
path = path[:-1]
return redirect(path, permanent=True)
return get_response(request)
return middleware
|
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the 'License');
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an 'AS IS' BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Karma configuration
* @param {!Object} config config object from Karma
*/
module.exports = function(config) {
config.set({
basePath: '',
plugins: [
require.resolve('@open-wc/karma-esm'),
'karma-*',
],
frameworks: ['esm', 'jasmine'],
files: [
{
pattern: 'lib/**/*_test.js',
watched: true,
type: 'module'
},
],
// @see https://github.com/open-wc/open-wc/tree/master/packages/karma-esm#configuration
esm: {
nodeResolve: true,
compatibility: 'auto',
preserveSymlinks: true,
},
// Note setting --browsers on the command-line always overrides this list.
browsers: [
'ChromeHeadlessNoSandbox',
],
customLaunchers: {
ChromeHeadlessNoSandbox: {
base: 'ChromeHeadless',
flags: ['--no-sandbox'],
}
}
});
};
|
'''
Created by auto_sdk on 2019.10.21
'''
from dingtalk.api.base import RestApi
class OapiProcessApproversForecastRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.request = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.process.approvers.forecast'
|
dojo.provide("dojox.data.ServiceStore");
// note that dojox.rpc.Service is not required, you can create your own services
// A ServiceStore is a readonly data store that provides a data.data interface to an RPC service.
// var myServices = new dojox.rpc.Service(dojo.moduleUrl("dojox.rpc.tests.resources", "test.smd"));
// var serviceStore = new dojox.data.ServiceStore({service:myServices.ServiceStore});
//
// The ServiceStore also supports lazy loading. References can be made to objects that have not been loaded.
// For example if a service returned:
// {"name":"Example","lazyLoadedObject":{"$ref":"obj2"}}
//
// And this object has accessed using the dojo.data API:
// var obj = serviceStore.getValue(myObject,"lazyLoadedObject");
// The object would automatically be requested from the server (with an object id of "obj2").
//
dojo.declare("dojox.data.ServiceStore",
// ClientFilter is intentionally not required, ServiceStore does not need it, and is more
// lightweight without it, but if it is provided, the ServiceStore will use it.
dojox.data.ClientFilter,
{
constructor: function(options){
//summary:
// ServiceStore constructor, instantiate a new ServiceStore
// A ServiceStore can be configured from a JSON Schema. Queries are just
// passed through to the underlying services
//
// options:
// Keyword arguments
// The *schema* parameter
// This is a schema object for this store. This should be JSON Schema format.
//
// The *service* parameter
// This is the service object that is used to retrieve lazy data and save results
// The function should be directly callable with a single parameter of an object id to be loaded
//
// The *idAttribute* parameter
// Defaults to 'id'. The name of the attribute that holds an objects id.
// This can be a preexisting id provided by the server.
// If an ID isn't already provided when an object
// is fetched or added to the store, the autoIdentity system
// will generate an id for it and add it to the index.
//
// The *estimateCountFactor* parameter
// This parameter is used by the ServiceStore to estimate the total count. When
// paging is indicated in a fetch and the response includes the full number of items
// requested by the fetch's count parameter, then the total count will be estimated
// to be estimateCountFactor multiplied by the provided count. If this is 1, then it is assumed that the server
// does not support paging, and the response is the full set of items, where the
// total count is equal to the numer of items returned. If the server does support
// paging, an estimateCountFactor of 2 is a good value for estimating the total count
// It is also possible to override _processResults if the server can provide an exact
// total count.
//
// The *syncMode* parameter
// Setting this to true will set the store to using synchronous calls by default.
// Sync calls return their data immediately from the calling function, so
// callbacks are unnecessary. This will only work with a synchronous capable service.
//
// description:
// ServiceStore can do client side caching and result set updating if
// dojox.data.ClientFilter is loaded. Do this add:
// | dojo.require("dojox.data.ClientFilter")
// prior to loading the ServiceStore (ClientFilter must be loaded before ServiceStore).
// To utilize client side filtering with a subclass, you can break queries into
// client side and server side components by putting client side actions in
// clientFilter property in fetch calls. For example you could override fetch:
// | fetch: function(args){
// | // do the sorting and paging on the client side
// | args.clientFilter = {start:args.start, count: args.count, sort: args.sort};
// | // args.query will be passed to the service object for the server side handling
// | return this.inherited(arguments);
// | }
// When extending this class, if you would like to create lazy objects, you can follow
// the example from dojox.data.tests.stores.ServiceStore:
// | var lazyItem = {
// | _loadObject: function(callback){
// | this.name="loaded";
// | delete this._loadObject;
// | callback(this);
// | }
// | };
//setup a byId alias to the api call
this.byId=this.fetchItemByIdentity;
this._index = {};
// if the advanced json parser is enabled, we can pass through object updates as onSet events
if(options){
dojo.mixin(this,options);
}
// We supply a default idAttribute for parser driven construction, but if no id attribute
// is supplied, it should be null so that auto identification takes place properly
this.idAttribute = (options && options.idAttribute) || (this.schema && this.schema._idAttr);
this.labelAttribute = this.labelAttribute || "label";
},
schema: null,
idAttribute: "id",
syncMode: false,
estimateCountFactor: 1,
getSchema: function(){
return this.schema;
},
loadLazyValues:true,
getValue: function(/*Object*/ item, /*String*/property, /*value?*/defaultValue){
// summary:
// Gets the value of an item's 'property'
//
// item:
// The item to get the value from
// property:
// property to look up value for
// defaultValue:
// the default value
var value = item[property];
return value || // return the plain value since it was found;
(property in item ? // a truthy value was not found, see if we actually have it
value : // we do, so we can return it
item._loadObject ? // property was not found, maybe because the item is not loaded, we will try to load it synchronously so we can get the property
(dojox.rpc._sync = true) && arguments.callee.call(this,dojox.data.ServiceStore.prototype.loadItem({item:item}) || {}, property, defaultValue) : // load the item and run getValue again
defaultValue);// not in item -> return default value
},
getValues: function(item, property){
// summary:
// Gets the value of an item's 'property' and returns
// it. If this value is an array it is just returned,
// if not, the value is added to an array and that is returned.
//
// item: /* object */
// property: /* string */
// property to look up value for
var val = this.getValue(item,property);
return val instanceof Array ? val : val === undefined ? [] : [val];
},
getAttributes: function(item){
// summary:
// Gets the available attributes of an item's 'property' and returns
// it as an array.
//
// item: /* object */
var res = [];
for(var i in item){
if(item.hasOwnProperty(i) && !(i.charAt(0) == '_' && i.charAt(1) == '_')){
res.push(i);
}
}
return res;
},
hasAttribute: function(item,attribute){
// summary:
// Checks to see if item has attribute
//
// item: /* object */
// attribute: /* string */
return attribute in item;
},
containsValue: function(item, attribute, value){
// summary:
// Checks to see if 'item' has 'value' at 'attribute'
//
// item: /* object */
// attribute: /* string */
// value: /* anything */
return dojo.indexOf(this.getValues(item,attribute),value) > -1;
},
isItem: function(item){
// summary:
// Checks to see if the argument is an item
//
// item: /* object */
// attribute: /* string */
// we have no way of determining if it belongs, we just have object returned from
// service queries
return (typeof item == 'object') && item && !(item instanceof Date);
},
isItemLoaded: function(item){
// summary:
// Checks to see if the item is loaded.
//
// item: /* object */
return item && !item._loadObject;
},
loadItem: function(args){
// summary:
// Loads an item and calls the callback handler. Note, that this will call the callback
// handler even if the item is loaded. Consequently, you can use loadItem to ensure
// that an item is loaded is situations when the item may or may not be loaded yet.
// If you access a value directly through property access, you can use this to load
// a lazy value as well (doesn't need to be an item).
//
// example:
// store.loadItem({
// item: item, // this item may or may not be loaded
// onItem: function(item){
// // do something with the item
// }
// });
var item;
if(args.item._loadObject){
args.item._loadObject(function(result){
item = result; // in synchronous mode this can allow loadItem to return the value
delete item._loadObject;
var func = result instanceof Error ? args.onError : args.onItem;
if(func){
func.call(args.scope, result);
}
});
}else if(args.onItem){
// even if it is already loaded, we will use call the callback, this makes it easier to
// use when it is not known if the item is loaded (you can always safely call loadItem).
args.onItem.call(args.scope, args.item);
}
return item;
},
_currentId : 0,
_processResults : function(results, deferred){
// this should return an object with the items as an array and the total count of
// items (maybe more than currently in the result set).
// for example:
// | {totalCount:10,[{id:1},{id:2}]}
// index the results, assigning ids as necessary
if (results && typeof results == 'object'){
var id = results.__id;
if(!id){// if it hasn't been assigned yet
if(this.idAttribute){
// use the defined id if available
id = results[this.idAttribute];
}else{
id = this._currentId++;
}
if(id !== undefined){
var existingObj = this._index[id];
if(existingObj){
for(var j in existingObj){
delete existingObj[j]; // clear it so we can mixin
}
results = dojo.mixin(existingObj,results);
}
results.__id = id;
this._index[id] = results;
}
}
for (var i in results){
results[i] = this._processResults(results[i], deferred).items;
}
}
var count = results.length;
return {totalCount: deferred.request.count == count ? (deferred.request.start || 0) + count * this.estimateCountFactor : count, items: results};
},
close: function(request){
return request && request.abort && request.abort();
},
fetch: function(args){
// summary:
// See dojo.data.api.Read.fetch
//
// The *queryOptions.cache* parameter
// If true, indicates that the query result should be cached for future use. This is only available
// if dojox.data.ClientFilter has been loaded before the ServiceStore
//
// The *syncMode* parameter
// Indicates that the call should be fetch synchronously if possible (this is not always possible)
//
// The *clientFetch* parameter
// This is a fetch keyword argument for explicitly doing client side filtering, querying, and paging
args = args || {};
if("syncMode" in args ? args.syncMode : this.syncMode){
dojox.rpc._sync = true;
}
var self = this;
var scope = args.scope || self;
var defResult = this.cachingFetch ? this.cachingFetch(args) : this._doQuery(args);
defResult.request = args;
defResult.addCallback(function(results){
if(args.clientFetch){
results = self.clientSideFetch({query:args.clientFetch,sort:args.sort,start:args.start,count:args.count},results);
}
var resultSet = self._processResults(results, defResult);
results = args.results = resultSet.items;
if(args.onBegin){
args.onBegin.call(scope, resultSet.totalCount, args);
}
if(args.onItem){
for(var i=0; i<results.length;i++){
args.onItem.call(scope, results[i], args);
}
}
if(args.onComplete){
args.onComplete.call(scope, args.onItem ? null : results, args);
}
return results;
});
defResult.addErrback(args.onError && dojo.hitch(scope, args.onError));
args.abort = function(){
// abort the request
defResult.ioArgs.xhr.abort();
};
args.store = this;
return args;
},
_doQuery: function(args){
var query= typeof args.queryStr == 'string' ? args.queryStr : args.query;
return this.service(query);
},
getFeatures: function(){
// summary:
// return the store feature set
return {
"dojo.data.api.Read": true,
"dojo.data.api.Identity": true,
"dojo.data.api.Schema": this.schema
};
},
getLabel: function(item){
// summary
// returns the label for an item. Just gets the "label" attribute.
//
return this.getValue(item,this.labelAttribute);
},
getLabelAttributes: function(item){
// summary:
// returns an array of attributes that are used to create the label of an item
return [this.labelAttribute];
},
//Identity API Support
getIdentity: function(item){
return item.__id;
},
getIdentityAttributes: function(item){
// summary:
// returns the attributes which are used to make up the
// identity of an item. Basically returns this.idAttribute
return [this.idAttribute];
},
fetchItemByIdentity: function(args){
// summary:
// fetch an item by its identity, by looking in our index of what we have loaded
var item = this._index[(args._prefix || '') + args.identity];
if(item && args.onItem){
args.onItem.call(args.scope, item);
}else{
// convert the different spellings
return this.fetch({
query: args.identity,
onComplete: args.onItem,
onError: args.onError,
scope: args.scope
}).results;
}
return item;
}
}
);
|
var model = require('../../user/user'),
expect = require('expect.js');
describe('User', function() {
it('should initialize to a blank object', function() {
var newUser = new model.User();
expect(newUser).not.to.be(undefined);
expect(newUser).not.to.be(null);
expect(newUser.nickname).to.be("");
expect(newUser.ticket).to.be("");
});
it('should perform a copy when passed a single instance', function() {
var copyFromObject = { nickname: "derek webb", ticket: "12345" };
var newUser = new model.User(copyFromObject);
expect(newUser).not.to.be(undefined);
expect(newUser).not.to.be(null);
expect(newUser.nickname).to.be("derek webb");
expect(newUser.ticket).to.be("12345");
});
it('should validate nicknames', function() {
var user = new model.User();
expect(user.isValid()).to.be(false);
user.nickname = null;
expect(user.isValid()).to.be(false);
user.nickname = "alex smith";
expect(user.isValid()).to.be(true);
user.nickname = "ueyJJMC7C37imPxYM7tNrSXvYp6ketNNoDEvb1pGAhEH2Ak0F1kFUoJC1lHhuyCTAhUTNKtsrhKclnSD8l5R2f3KjmRKtLQNOJiu2a3j4UyOofBbxG2IGxoynIOp5IpuHqabzOcQY3ae7WKe5JaFvymb6z8OMMlB3FryBdV6eVp9Twpqm93j6LUcr7ceXbLUUq3ZBkFsmX6tLMA3brdDrw7FJJmcrCwt5FfkuWYk2OW0FbLAS3GiwXrghn2kiDcC";
expect(user.isValid()).to.be(false);
});
});
|
import cloneDeep from '../../../utils/clone-deep';
import looseEqual from '../../../utils/loose-equal';
import { concat } from '../../../utils/array';
import { isFunction, isString, isRegExp } from '../../../utils/inspect';
import { toInteger } from '../../../utils/number';
import { escapeRegExp } from '../../../utils/string';
import { warn } from '../../../utils/warn';
import stringifyRecordValues from './stringify-record-values';
var DEBOUNCE_DEPRECATED_MSG = 'Prop "filter-debounce" is deprecated. Use the debounce feature of "<b-form-input>" instead.';
var RX_SPACES = /[\s\uFEFF\xA0]+/g;
export default {
props: {
filter: {
type: [String, RegExp, Object, Array],
default: null
},
filterFunction: {
type: Function,
default: null
},
filterIgnoredFields: {
type: Array // default: undefined
},
filterIncludedFields: {
type: Array // default: undefined
},
filterDebounce: {
type: [Number, String],
deprecated: DEBOUNCE_DEPRECATED_MSG,
default: 0,
validator: function validator(val) {
return /^\d+/.test(String(val));
}
}
},
data: function data() {
return {
// Flag for displaying which empty slot to show and some event triggering
isFiltered: false,
// Where we store the copy of the filter criteria after debouncing
// We pre-set it with the sanitized filter value
localFilter: this.filterSanitize(this.filter)
};
},
computed: {
computedFilterIgnored: function computedFilterIgnored() {
return this.filterIgnoredFields ? concat(this.filterIgnoredFields).filter(Boolean) : null;
},
computedFilterIncluded: function computedFilterIncluded() {
return this.filterIncludedFields ? concat(this.filterIncludedFields).filter(Boolean) : null;
},
computedFilterDebounce: function computedFilterDebounce() {
var ms = toInteger(this.filterDebounce) || 0;
/* istanbul ignore next */
if (ms > 0) {
warn(DEBOUNCE_DEPRECATED_MSG, 'BTable');
}
return ms;
},
localFiltering: function localFiltering() {
return this.hasProvider ? !!this.noProviderFiltering : true;
},
// For watching changes to `filteredItems` vs `localItems`
filteredCheck: function filteredCheck() {
return {
filteredItems: this.filteredItems,
localItems: this.localItems,
localFilter: this.localFilter
};
},
// Sanitized/normalize filter-function prop
localFilterFn: function localFilterFn() {
// Return `null` to signal to use internal filter function
return isFunction(this.filterFunction) ? this.filterFunction : null;
},
// Returns the records in `localItems` that match the filter criteria
// Returns the original `localItems` array if not sorting
filteredItems: function filteredItems() {
var items = this.localItems || []; // Note the criteria is debounced and sanitized
var criteria = this.localFilter; // Resolve the filtering function, when requested
// We prefer the provided filtering function and fallback to the internal one
// When no filtering criteria is specified the filtering factories will return `null`
var filterFn = this.localFiltering ? this.filterFnFactory(this.localFilterFn, criteria) || this.defaultFilterFnFactory(criteria) : null; // We only do local filtering when requested and there are records to filter
return filterFn && items.length > 0 ? items.filter(filterFn) : items;
}
},
watch: {
// Watch for debounce being set to 0
computedFilterDebounce: function computedFilterDebounce(newVal) {
if (!newVal && this.$_filterTimer) {
clearTimeout(this.$_filterTimer);
this.$_filterTimer = null;
this.localFilter = this.filterSanitize(this.filter);
}
},
// Watch for changes to the filter criteria, and debounce if necessary
filter: {
// We need a deep watcher in case the user passes
// an object when using `filter-function`
deep: true,
handler: function handler(newCriteria) {
var _this = this;
var timeout = this.computedFilterDebounce;
clearTimeout(this.$_filterTimer);
this.$_filterTimer = null;
if (timeout && timeout > 0) {
// If we have a debounce time, delay the update of `localFilter`
this.$_filterTimer = setTimeout(function () {
_this.localFilter = _this.filterSanitize(newCriteria);
}, timeout);
} else {
// Otherwise, immediately update `localFilter` with `newFilter` value
this.localFilter = this.filterSanitize(newCriteria);
}
}
},
// Watch for changes to the filter criteria and filtered items vs `localItems`
// Set visual state and emit events as required
filteredCheck: function filteredCheck(_ref) {
var filteredItems = _ref.filteredItems,
localFilter = _ref.localFilter;
// Determine if the dataset is filtered or not
var isFiltered = false;
if (!localFilter) {
// If filter criteria is falsey
isFiltered = false;
} else if (looseEqual(localFilter, []) || looseEqual(localFilter, {})) {
// If filter criteria is an empty array or object
isFiltered = false;
} else if (localFilter) {
// If filter criteria is truthy
isFiltered = true;
}
if (isFiltered) {
this.$emit('filtered', filteredItems, filteredItems.length);
}
this.isFiltered = isFiltered;
},
isFiltered: function isFiltered(newVal, oldVal) {
if (newVal === false && oldVal === true) {
// We need to emit a filtered event if isFiltered transitions from true to
// false so that users can update their pagination controls.
this.$emit('filtered', this.localItems, this.localItems.length);
}
}
},
created: function created() {
var _this2 = this;
// Create non-reactive prop where we store the debounce timer id
this.$_filterTimer = null; // If filter is "pre-set", set the criteria
// This will trigger any watchers/dependents
// this.localFilter = this.filterSanitize(this.filter)
// Set the initial filtered state in a `$nextTick()` so that
// we trigger a filtered event if needed
this.$nextTick(function () {
_this2.isFiltered = Boolean(_this2.localFilter);
});
},
beforeDestroy: function beforeDestroy()
/* istanbul ignore next */
{
clearTimeout(this.$_filterTimer);
this.$_filterTimer = null;
},
methods: {
filterSanitize: function filterSanitize(criteria) {
// Sanitizes filter criteria based on internal or external filtering
if (this.localFiltering && !this.localFilterFn && !(isString(criteria) || isRegExp(criteria))) {
// If using internal filter function, which only accepts string or RegExp,
// return '' to signify no filter
return '';
} // Could be a string, object or array, as needed by external filter function
// We use `cloneDeep` to ensure we have a new copy of an object or array
// without Vue's reactive observers
return cloneDeep(criteria);
},
// Filter Function factories
filterFnFactory: function filterFnFactory(filterFn, criteria) {
// Wrapper factory for external filter functions
// Wrap the provided filter-function and return a new function
// Returns `null` if no filter-function defined or if criteria is falsey
// Rather than directly grabbing `this.computedLocalFilterFn` or `this.filterFunction`
// we have it passed, so that the caller computed prop will be reactive to changes
// in the original filter-function (as this routine is a method)
if (!filterFn || !isFunction(filterFn) || !criteria || looseEqual(criteria, []) || looseEqual(criteria, {})) {
return null;
} // Build the wrapped filter test function, passing the criteria to the provided function
var fn = function fn(item) {
// Generated function returns true if the criteria matches part
// of the serialized data, otherwise false
return filterFn(item, criteria);
}; // Return the wrapped function
return fn;
},
defaultFilterFnFactory: function defaultFilterFnFactory(criteria) {
var _this3 = this;
// Generates the default filter function, using the given filter criteria
// Returns `null` if no criteria or criteria format not supported
if (!criteria || !(isString(criteria) || isRegExp(criteria))) {
// Built in filter can only support strings or RegExp criteria (at the moment)
return null;
} // Build the RegExp needed for filtering
var regExp = criteria;
if (isString(regExp)) {
// Escape special RegExp characters in the string and convert contiguous
// whitespace to \s+ matches
var pattern = escapeRegExp(criteria).replace(RX_SPACES, '\\s+'); // Build the RegExp (no need for global flag, as we only need
// to find the value once in the string)
regExp = new RegExp(".*".concat(pattern, ".*"), 'i');
} // Generate the wrapped filter test function to use
var fn = function fn(item) {
// This searches all row values (and sub property values) in the entire (excluding
// special `_` prefixed keys), because we convert the record to a space-separated
// string containing all the value properties (recursively), even ones that are
// not visible (not specified in this.fields)
// Users can ignore filtering on specific fields, or on only certain fields,
// and can optionall specify searching results of fields with formatter
//
// TODO: Enable searching on scoped slots (optional, as it will be SLOW)
//
// Generated function returns true if the criteria matches part of
// the serialized data, otherwise false
//
// We set `lastIndex = 0` on the `RegExp` in case someone specifies the `/g` global flag
regExp.lastIndex = 0;
return regExp.test(stringifyRecordValues(item, _this3.computedFilterIgnored, _this3.computedFilterIncluded, _this3.computedFieldsObj));
}; // Return the generated function
return fn;
}
}
};
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This is a small DSL to describe builds of Facebook's open-source projects
that are published to Github from a single internal repo, including projects
that depend on folly, wangle, proxygen, fbthrift, etc.
This file defines the interface of the DSL, and common utilieis, but you
will have to instantiate a specific builder, with specific options, in
order to get work done -- see e.g. make_docker_context.py.
== Design notes ==
Goals:
- A simple declarative language for what needs to be checked out & built,
how, in what order.
- The same specification should work for external continuous integration
builds (e.g. Travis + Docker) and for internal VM-based continuous
integration builds.
- One should be able to build without root, and to install to a prefix.
Non-goals:
- General usefulness. The only point of this is to make it easier to build
and test Facebook's open-source services.
Ideas for the future -- these may not be very good :)
- Especially on Ubuntu 14.04 the current initial setup is inefficient:
we add PPAs after having installed a bunch of packages -- this prompts
reinstalls of large amounts of code. We also `apt-get update` a few
times.
- A "shell script" builder. Like DockerFBCodeBuilder, but outputs a
shell script that runs outside of a container. Or maybe even
synchronously executes the shell commands, `make`-style.
- A "Makefile" generator. That might make iterating on builds even quicker
than what you can currently get with Docker build caching.
- Generate a rebuild script that can be run e.g. inside the built Docker
container by tagging certain steps with list-inheriting Python objects:
* do change directories
* do NOT `git clone` -- if we want to update code this should be a
separate script that e.g. runs rebase on top of specific targets
across all the repos.
* do NOT install software (most / all setup can be skipped)
* do NOT `autoreconf` or `configure`
* do `make` and `cmake`
- If we get non-Debian OSes, part of ccache setup should be factored out.
"""
import os
import re
from shell_quoting import path_join, shell_join, ShellQuoted
def _read_project_github_hashes():
base_dir = "deps/github_hashes/" # trailing slash used in regex below
for dirname, _, files in os.walk(base_dir):
for filename in files:
path = os.path.join(dirname, filename)
with open(path) as f:
m_proj = re.match("^" + base_dir + "(.*)-rev\.txt$", path)
if m_proj is None:
raise RuntimeError("Not a hash file? {0}".format(path))
m_hash = re.match("^Subproject commit ([0-9a-f]+)\n$", f.read())
if m_hash is None:
raise RuntimeError("No hash in {0}".format(path))
yield m_proj.group(1), m_hash.group(1)
class FBCodeBuilder(object):
def __init__(self, **kwargs):
self._options_do_not_access = kwargs # Use .option() instead.
# This raises upon detecting options that are specified but unused,
# because otherwise it is very easy to make a typo in option names.
self.options_used = set()
# Mark 'projects_dir' used even if the build installs no github
# projects. This is needed because driver programs like
# `shell_builder.py` unconditionally set this for all builds.
self._github_dir = self.option("projects_dir")
self._github_hashes = dict(_read_project_github_hashes())
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
", ".join(
"{0}={1}".format(k, repr(v))
for k, v in self._options_do_not_access.items()
),
)
def option(self, name, default=None):
value = self._options_do_not_access.get(name, default)
if value is None:
raise RuntimeError("Option {0} is required".format(name))
self.options_used.add(name)
return value
def has_option(self, name):
return name in self._options_do_not_access
def add_option(self, name, value):
if name in self._options_do_not_access:
raise RuntimeError("Option {0} already set".format(name))
self._options_do_not_access[name] = value
#
# Abstract parts common to every installation flow
#
def render(self, steps):
"""
Converts nested actions to your builder's expected output format.
Typically takes the output of build().
"""
res = self._render_impl(steps) # Implementation-dependent
# Now that the output is rendered, we expect all options to have
# been used.
unused_options = set(self._options_do_not_access)
unused_options -= self.options_used
if unused_options:
raise RuntimeError(
"Unused options: {0} -- please check if you made a typo "
"in any of them. Those that are truly not useful should "
"be not be set so that this typo detection can be useful.".format(
unused_options
)
)
return res
def build(self, steps):
if not steps:
raise RuntimeError(
"Please ensure that the config you are passing " "contains steps"
)
return [self.setup(), self.diagnostics()] + steps
def setup(self):
"Your builder may want to install packages here."
raise NotImplementedError
def diagnostics(self):
"Log some system diagnostics before/after setup for ease of debugging"
# The builder's repr is not used in a command to avoid pointlessly
# invalidating Docker's build cache.
return self.step(
"Diagnostics",
[
self.comment("Builder {0}".format(repr(self))),
self.run(ShellQuoted("hostname")),
self.run(ShellQuoted("cat /etc/issue || echo no /etc/issue")),
self.run(ShellQuoted("g++ --version || echo g++ not installed")),
self.run(ShellQuoted("cmake --version || echo cmake not installed")),
],
)
def step(self, name, actions):
"A labeled collection of actions or other steps"
raise NotImplementedError
def run(self, shell_cmd):
"Run this bash command"
raise NotImplementedError
def set_env(self, key, value):
'Set the environment "key" to value "value"'
raise NotImplementedError
def workdir(self, dir):
"Create this directory if it does not exist, and change into it"
raise NotImplementedError
def copy_local_repo(self, dir, dest_name):
"""
Copy the local repo at `dir` into this step's `workdir()`, analog of:
cp -r /path/to/folly folly
"""
raise NotImplementedError
def python_deps(self):
return [
"wheel",
"cython==0.28.6",
]
def debian_deps(self):
return [
"autoconf-archive",
"bison",
"build-essential",
"cmake",
"curl",
"flex",
"git",
"gperf",
"joe",
"libboost-all-dev",
"libcap-dev",
"libdouble-conversion-dev",
"libevent-dev",
"libgflags-dev",
"libgoogle-glog-dev",
"libkrb5-dev",
"libpcre3-dev",
"libpthread-stubs0-dev",
"libnuma-dev",
"libsasl2-dev",
"libsnappy-dev",
"libsqlite3-dev",
"libssl-dev",
"libtool",
"netcat-openbsd",
"pkg-config",
"sudo",
"unzip",
"wget",
"python3-venv",
]
#
# Specific build helpers
#
def install_debian_deps(self):
actions = [
self.run(
ShellQuoted("apt-get update && apt-get install -yq {deps}").format(
deps=shell_join(
" ", (ShellQuoted(dep) for dep in self.debian_deps())
)
)
),
]
gcc_version = self.option("gcc_version")
# Make the selected GCC the default before building anything
actions.extend(
[
self.run(
ShellQuoted("apt-get install -yq {c} {cpp}").format(
c=ShellQuoted("gcc-{v}").format(v=gcc_version),
cpp=ShellQuoted("g++-{v}").format(v=gcc_version),
)
),
self.run(
ShellQuoted(
"update-alternatives --install /usr/bin/gcc gcc {c} 40 "
"--slave /usr/bin/g++ g++ {cpp}"
).format(
c=ShellQuoted("/usr/bin/gcc-{v}").format(v=gcc_version),
cpp=ShellQuoted("/usr/bin/g++-{v}").format(v=gcc_version),
)
),
self.run(ShellQuoted("update-alternatives --config gcc")),
]
)
actions.extend(self.debian_ccache_setup_steps())
return self.step("Install packages for Debian-based OS", actions)
def create_python_venv(self):
actions = []
if self.option("PYTHON_VENV", "OFF") == "ON":
actions.append(
self.run(
ShellQuoted("python3 -m venv {p}").format(
p=path_join(self.option("prefix"), "venv")
)
)
)
return actions
def python_venv(self):
actions = []
if self.option("PYTHON_VENV", "OFF") == "ON":
actions.append(
ShellQuoted("source {p}").format(
p=path_join(self.option("prefix"), "venv", "bin", "activate")
)
)
actions.append(
self.run(
ShellQuoted("python3 -m pip install {deps}").format(
deps=shell_join(
" ", (ShellQuoted(dep) for dep in self.python_deps())
)
)
)
)
return actions
def enable_rust_toolchain(self, toolchain="stable", is_bootstrap=True):
choices = set(["stable", "beta", "nightly"])
assert toolchain in choices, (
"while enabling rust toolchain: {} is not in {}"
).format(toolchain, choices)
rust_toolchain_opt = (toolchain, is_bootstrap)
prev_opt = self.option("rust_toolchain", rust_toolchain_opt)
assert prev_opt == rust_toolchain_opt, (
"while enabling rust toolchain: previous toolchain already set to"
" {}, but trying to set it to {} now"
).format(prev_opt, rust_toolchain_opt)
self.add_option("rust_toolchain", rust_toolchain_opt)
def rust_toolchain(self):
actions = []
if self.option("rust_toolchain", False):
(toolchain, is_bootstrap) = self.option("rust_toolchain")
rust_dir = path_join(self.option("prefix"), "rust")
actions = [
self.set_env("CARGO_HOME", rust_dir),
self.set_env("RUSTUP_HOME", rust_dir),
self.set_env("RUSTC_BOOTSTRAP", "1" if is_bootstrap else "0"),
self.run(
ShellQuoted(
"curl -sSf https://build.travis-ci.com/files/rustup-init.sh"
" | sh -s --"
" --default-toolchain={r} "
" --profile=minimal"
" --no-modify-path"
" -y"
).format(p=rust_dir, r=toolchain)
),
self.set_env(
"PATH",
ShellQuoted("{p}:$PATH").format(p=path_join(rust_dir, "bin")),
),
self.run(ShellQuoted("rustup update")),
self.run(ShellQuoted("rustc --version")),
self.run(ShellQuoted("rustup --version")),
self.run(ShellQuoted("cargo --version")),
]
return actions
def debian_ccache_setup_steps(self):
return [] # It's ok to ship a renderer without ccache support.
def github_project_workdir(self, project, path):
# Only check out a non-default branch if requested. This especially
# makes sense when building from a local repo.
git_hash = self.option(
"{0}:git_hash".format(project),
# Any repo that has a hash in deps/github_hashes defaults to
# that, with the goal of making builds maximally consistent.
self._github_hashes.get(project, ""),
)
maybe_change_branch = (
[
self.run(ShellQuoted("git checkout {hash}").format(hash=git_hash)),
]
if git_hash
else []
)
local_repo_dir = self.option("{0}:local_repo_dir".format(project), "")
return self.step(
"Check out {0}, workdir {1}".format(project, path),
[
self.workdir(self._github_dir),
self.run(
ShellQuoted("git clone {opts} https://github.com/{p}").format(
p=project,
opts=ShellQuoted(
self.option("{}:git_clone_opts".format(project), "")
),
)
)
if not local_repo_dir
else self.copy_local_repo(local_repo_dir, os.path.basename(project)),
self.workdir(
path_join(self._github_dir, os.path.basename(project), path),
),
]
+ maybe_change_branch,
)
def fb_github_project_workdir(self, project_and_path, github_org="facebook"):
"This helper lets Facebook-internal CI special-cases FB projects"
project, path = project_and_path.split("/", 1)
return self.github_project_workdir(github_org + "/" + project, path)
def _make_vars(self, make_vars):
return shell_join(
" ",
(
ShellQuoted("{k}={v}").format(k=k, v=v)
for k, v in ({} if make_vars is None else make_vars).items()
),
)
def parallel_make(self, make_vars=None):
return self.run(
ShellQuoted("make -j {n} VERBOSE=1 {vars}").format(
n=self.option("make_parallelism"),
vars=self._make_vars(make_vars),
)
)
def make_and_install(self, make_vars=None):
return [
self.parallel_make(make_vars),
self.run(
ShellQuoted("make install VERBOSE=1 {vars}").format(
vars=self._make_vars(make_vars),
)
),
]
def configure(self, name=None):
autoconf_options = {}
if name is not None:
autoconf_options.update(
self.option("{0}:autoconf_options".format(name), {})
)
return [
self.run(
ShellQuoted(
'LDFLAGS="$LDFLAGS -L"{p}"/lib -Wl,-rpath="{p}"/lib" '
'CFLAGS="$CFLAGS -I"{p}"/include" '
'CPPFLAGS="$CPPFLAGS -I"{p}"/include" '
"PY_PREFIX={p} "
"./configure --prefix={p} {args}"
).format(
p=self.option("prefix"),
args=shell_join(
" ",
(
ShellQuoted("{k}={v}").format(k=k, v=v)
for k, v in autoconf_options.items()
),
),
)
),
]
def autoconf_install(self, name):
return self.step(
"Build and install {0}".format(name),
[
self.run(ShellQuoted("autoreconf -ivf")),
]
+ self.configure()
+ self.make_and_install(),
)
def cmake_configure(self, name, cmake_path=".."):
cmake_defines = {
"BUILD_SHARED_LIBS": "ON",
"CMAKE_INSTALL_PREFIX": self.option("prefix"),
}
# Hacks to add thriftpy3 support
if "BUILD_THRIFT_PY3" in os.environ and "folly" in name:
cmake_defines["PYTHON_EXTENSIONS"] = "True"
if "BUILD_THRIFT_PY3" in os.environ and "fbthrift" in name:
cmake_defines["thriftpy3"] = "ON"
cmake_defines.update(self.option("{0}:cmake_defines".format(name), {}))
return [
self.run(
ShellQuoted(
'CXXFLAGS="$CXXFLAGS -fPIC -isystem "{p}"/include" '
'CFLAGS="$CFLAGS -fPIC -isystem "{p}"/include" '
"cmake {args} {cmake_path}"
).format(
p=self.option("prefix"),
args=shell_join(
" ",
(
ShellQuoted("-D{k}={v}").format(k=k, v=v)
for k, v in cmake_defines.items()
),
),
cmake_path=cmake_path,
)
),
]
def cmake_install(self, name, cmake_path=".."):
return self.step(
"Build and install {0}".format(name),
self.cmake_configure(name, cmake_path) + self.make_and_install(),
)
def cargo_build(self, name):
return self.step(
"Build {0}".format(name),
[
self.run(
ShellQuoted("cargo build -j {n}").format(
n=self.option("make_parallelism")
)
)
],
)
def fb_github_autoconf_install(self, project_and_path, github_org="facebook"):
return [
self.fb_github_project_workdir(project_and_path, github_org),
self.autoconf_install(project_and_path),
]
def fb_github_cmake_install(
self, project_and_path, cmake_path="..", github_org="facebook"
):
return [
self.fb_github_project_workdir(project_and_path, github_org),
self.cmake_install(project_and_path, cmake_path),
]
def fb_github_cargo_build(self, project_and_path, github_org="facebook"):
return [
self.fb_github_project_workdir(project_and_path, github_org),
self.cargo_build(project_and_path),
]
|
#
# Apache mod_python interface
#
# Aaron Klingaman <alk@absarokasoft.com>
# Mark Huang <mlhuang@cs.princeton.edu>
#
# Copyright (C) 2004-2006 The Trustees of Princeton University
#
import sys
import traceback
import xmlrpclib
from mod_python import apache
from sfa.plc.api import SfaAPI
from sfa.util.sfalogging import logger
api = SfaAPI(interface='aggregate')
def handler(req):
try:
if req.method != "POST":
req.content_type = "text/html"
req.send_http_header()
req.write("""
<html><head>
<title>SFA Aggregate API XML-RPC/SOAP Interface</title>
</head><body>
<h1>SFA Aggregate API XML-RPC/SOAP Interface</h1>
<p>Please use XML-RPC or SOAP to access the SFA API.</p>
</body></html>
""")
return apache.OK
# Read request
request = req.read(int(req.headers_in['content-length']))
# mod_python < 3.2: The IP address portion of remote_addr is
# incorrect (always 0.0.0.0) when IPv6 is enabled.
# http://issues.apache.org/jira/browse/MODPYTHON-64?page=all
(remote_ip, remote_port) = req.connection.remote_addr
remote_addr = (req.connection.remote_ip, remote_port)
# Handle request
response = api.handle(remote_addr, request)
# Write response
req.content_type = "text/xml; charset=" + api.encoding
req.send_http_header()
req.write(response)
return apache.OK
except Exception, err:
# Log error in /var/log/httpd/(ssl_)?error_log
logger.log_exc('%r'%err)
return apache.HTTP_INTERNAL_SERVER_ERROR
|
/* $NetBSD: asa.c,v 1.11 1997/09/20 14:55:00 lukem Exp $ */
/*
* Copyright (c) 1993,94 Winning Strategies, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Winning Strategies, Inc.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#if 0
#ifndef lint
__RCSID("$NetBSD: asa.c,v 1.11 1997/09/20 14:55:00 lukem Exp $");
#endif
#endif
__FBSDID("$FreeBSD: src/usr.bin/asa/asa.c,v 1.6 2005/05/21 09:55:04 ru Exp $");
#include <err.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
static void asa(FILE *);
static void usage(void);
int
main(int argc, char *argv[])
{
int ch, exval;
FILE *fp;
const char *fn;
while ((ch = getopt(argc, argv, "")) != -1) {
switch (ch) {
case '?':
default:
usage();
/*NOTREACHED*/
}
}
argc -= optind;
argv += optind;
exval = 0;
if (argc == 0)
asa(stdin);
else {
while ((fn = *argv++) != NULL) {
if ((fp = fopen(fn, "r")) == NULL) {
warn("%s", fn);
exval = 1;
continue;
}
asa(fp);
fclose(fp);
}
}
exit(exval);
}
static void
usage(void)
{
fprintf(stderr, "usage: asa [file ...]\n");
exit(1);
}
static void
asa(FILE *f)
{
size_t len;
char *buf;
if ((buf = fgetln(f, &len)) != NULL) {
if (buf[len - 1] == '\n')
buf[--len] = '\0';
/* special case the first line */
switch (buf[0]) {
case '0':
putchar('\n');
break;
case '1':
putchar('\f');
break;
}
if (len > 1 && buf[0] && buf[1])
printf("%.*s", (int)(len - 1), buf + 1);
while ((buf = fgetln(f, &len)) != NULL) {
if (buf[len - 1] == '\n')
buf[--len] = '\0';
switch (buf[0]) {
default:
case ' ':
putchar('\n');
break;
case '0':
putchar('\n');
putchar('\n');
break;
case '1':
putchar('\f');
break;
case '+':
putchar('\r');
break;
}
if (len > 1 && buf[0] && buf[1])
printf("%.*s", (int)(len - 1), buf + 1);
}
putchar('\n');
}
}
|
import React from 'react';
import { Parallax } from 'react-scroll-parallax';
import './shelty.css';
const Shelty = (props) => {
return (
<div className="shelty-preview" id="shelty">
<div className="line">
<Parallax className="large" y={[30, 80]}>
<img src={props.img1} alt={props.img1} />
</Parallax>
</div>
<div className="line">
<Parallax className="medium" y={[80, 0]}>
<img src={props.img3} alt={props.img3} />
</Parallax>
<Parallax className="small" y={[40, 30]}>
<img src={props.img2} alt={props.img2} />
</Parallax>
</div>
</div>
)
}
export default Shelty;
|
# -*- coding: utf-8 -*-
__doc__ = """Tree GRU aka Recursive Neural Networks."""
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import sys
class Node_tweet(object):
def __init__(self, idx=None):
self.children = []
self.idx = idx
self.word = []
self.index = []
self.parent = None
################################# generate tree structure ##############################
def gen_nn_inputs(root_node):
"""Given a root node, returns the appropriate inputs to NN.
The NN takes in
x: the values at the leaves (e.g. word indices)
tree: a (n x degree) matrix that provides the computation order.
Namely, a row tree[i] = [a, b, c] in tree signifies that a
and b are children of c, and that the computation
f(a, b) -> c should happen on step i.
"""
root_node.idx = 0
X_word, X_index = [root_node.word], [root_node.index]
tree, internal_word, internal_index, leaf_idxs = _get_tree_path(root_node)
X_word.extend(internal_word)
X_index.extend(internal_index)
return (np.array(X_word, dtype='float32'),
np.array(X_index, dtype='int32'),
np.array(tree, dtype='int32'),
np.array(leaf_idxs, dtype='int32')
)
def _get_tree_path(root_node):
"""Get computation order of leaves -> root."""
if not root_node.children:
return [], [], [], [1]
layers = []
layer = [root_node]
while layer:
layers.append(layer[:])
next_layer = []
[next_layer.extend([child for child in node.children if child])
for node in layer]
layer = next_layer
tree = []
word = []
index = []
leafs = []
idx_cnt = root_node.idx
for layer in layers:
for node in layer:
if not node.children:
leafs.append(node.idx)
continue
for child in node.children:
idx_cnt += 1
child.idx = idx_cnt
tree.append([node.idx, child.idx])
word.append(child.word if child.word is not None else -1)
index.append(child.index if child.index is not None else -1)
return tree, word, index, leafs
################################ tree rnn class ######################################
class RvNN(nn.Module):
def __init__(self, word_dim, hidden_dim=5, Nclass=4,
degree=2, momentum=0.9,
trainable_embeddings=True,
labels_on_nonroot_nodes=False,
irregular_tree=True):
super(RvNN, self).__init__()
assert word_dim > 1 and hidden_dim > 1
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.Nclass = Nclass
self.degree = degree # 这里比较奇怪的是,在创建模型的时候是没有对degree进行赋值的
self.momentum = momentum
self.irregular_tree = irregular_tree
self.E_td = nn.parameter.Parameter(self.init_matrix([self.hidden_dim, self.word_dim]), requires_grad=True)
self.W_z_td = nn.parameter.Parameter(self.init_matrix([self.hidden_dim, self.hidden_dim]), requires_grad=True)
self.U_z_td = nn.parameter.Parameter(self.init_matrix([self.hidden_dim, self.hidden_dim]), requires_grad=True)
self.b_z_td = nn.parameter.Parameter(self.init_vector([self.hidden_dim]), requires_grad=True)
self.W_r_td = nn.parameter.Parameter(self.init_matrix([self.hidden_dim, self.hidden_dim]), requires_grad=True)
self.U_r_td = nn.parameter.Parameter(self.init_matrix([self.hidden_dim, self.hidden_dim]), requires_grad=True)
self.b_r_td = nn.parameter.Parameter(self.init_vector([self.hidden_dim]), requires_grad=True)
self.W_h_td = nn.parameter.Parameter(self.init_matrix([self.hidden_dim, self.hidden_dim]), requires_grad=True)
self.U_h_td = nn.parameter.Parameter(self.init_matrix([self.hidden_dim, self.hidden_dim]), requires_grad=True)
self.b_h_td = nn.parameter.Parameter(self.init_vector([self.hidden_dim]), requires_grad=True)
self.W_out_td = nn.parameter.Parameter(self.init_matrix([self.Nclass, self.hidden_dim]), requires_grad=True)
self.b_out_td = nn.parameter.Parameter(self.init_vector([self.Nclass]), requires_grad=True)
def forward(self, x_word, x_index, tree, leaf_idxs, y):
final_state = self.compute_tree_states(x_word, x_index, tree, leaf_idxs)
pred, loss = self.predAndLoss(final_state, y)
return pred, loss
def recursive_unit(self, child_word, child_index, parent_h):
child_xe = self.E_td[:, child_index].mul(torch.tensor(child_word)).sum(dim=1)
z_td = F.sigmoid(self.W_z_td.mul(child_xe).sum(dim=1) + self.U_z_td.mul(parent_h).sum(dim=1) + self.b_z_td)
r_td = F.sigmoid(self.W_r_td.mul(child_xe).sum(dim=1) + self.U_r_td.mul(parent_h).sum(dim=1) + self.b_r_td)
c = F.tanh(self.W_h_td.mul(child_xe).sum(dim=1) + self.U_h_td.mul(parent_h * r_td).sum(dim=1) + self.b_h_td)
h_td = z_td * parent_h + (1 - z_td) * c
return h_td
def compute_tree_states(self, x_word, x_index, tree, leaf_idxs):
def _recurrence(x_word, x_index, tree, node_h):
parent_h = node_h[tree[0]]
child_h = self.recursive_unit(x_word, x_index, parent_h)
node_h = torch.cat((node_h, child_h.view(1, -1)))
return node_h
node_h = torch.zeros([1, self.hidden_dim])
for words, indexs, thislayer in zip(x_word, x_index, tree):
node_h = _recurrence(words, indexs, thislayer, node_h)
return node_h[leaf_idxs].max(dim=0)[0]
def predAndLoss(self, final_state, ylabel):
pred = F.softmax(self.W_out_td.mul(final_state).sum(dim=1) +self.b_out_td)
loss = (torch.tensor(ylabel, dtype=torch.float)-pred).pow(2).sum()
return pred, loss
def init_vector(self, shape):
return torch.zeros(shape)
def init_matrix(self, shape):
return torch.from_numpy(np.random.normal(scale=0.1, size=shape).astype('float32'))
def predict_up(self, x_word, x_index, x_tree, leaf_idxs):
final_state = self.compute_tree_states(x_word, x_index, x_tree, leaf_idxs)
return F.softmax(self.W_out_td.mul(final_state).sum(dim=1) +self.b_out_td)
|
import os
import numpy as np
import nibabel as nb
import cbstools
from ..io import load_volume, save_volume
from ..utils import _output_dir_4saving, _fname_4saving
def profile_sampling(profile_surface_image, intensity_image,
save_data=False, output_dir=None,
file_name=None):
'''Sampling data on multiple intracortical layers
Parameters
-----------
profile_surface_image: niimg
4D image containing levelset representations of different intracortical
surfaces on which data should be sampled
intensity_image: niimg
Image from which data should be sampled
save_data: bool
Save output data to file (default is False)
output_dir: str, optional
Path to desired output directory, will be created if it doesn't exist
file_name: str, optional
Desired base name for output files with file extension
(suffixes will be added)
Returns
-----------
niimg
4D profile image , where the 4th dimension represents the
profile for each voxel (output file suffix _profiles)
Notes
----------
Original Java module by Pierre-Louis Bazin and Juliane Dinse
'''
print('\nProfile sampling')
# make sure that saving related parameters are correct
if save_data:
output_dir = _output_dir_4saving(output_dir, intensity_image)
profile_file = _fname_4saving(file_name=file_name,
rootfile=intensity_image,
suffix='profiles')
# start VM if not already running
try:
cbstools.initVM(initialheap='6000m', maxheap='6000m')
except ValueError:
pass
# initate class
sampler = cbstools.LaminarProfileSampling()
# load the data
surface_img = load_volume(profile_surface_image)
surface_data = surface_img.get_data()
hdr = surface_img.get_header()
aff = surface_img.get_affine()
resolution = [x.item() for x in hdr.get_zooms()]
dimensions = surface_data.shape
intensity_data = load_volume(intensity_image).get_data()
# pass inputs
sampler.setIntensityImage(cbstools.JArray('float')(
(intensity_data.flatten('F')).astype(float)))
sampler.setProfileSurfaceImage(cbstools.JArray('float')(
(surface_data.flatten('F')).astype(float)))
sampler.setResolutions(resolution[0], resolution[1], resolution[2])
sampler.setDimensions(dimensions[0], dimensions[1],
dimensions[2], dimensions[3])
# execute class
try:
sampler.execute()
except:
# if the Java module fails, reraise the error it throws
print("\n The underlying Java code did not execute cleanly: ")
print sys.exc_info()[0]
raise
return
# collecting outputs
profile_data = np.reshape(np.array(
sampler.getProfileMappedIntensityImage(),
dtype=np.float32), dimensions, 'F')
hdr['cal_max'] = np.nanmax(profile_data)
profiles = nb.Nifti1Image(profile_data, aff, hdr)
if save_data:
save_volume(os.path.join(output_dir, profile_file), profiles)
return profiles
|
var cliArgs = require("../");
var optionDefinitions = [
{
name: "verbose", alias: "v", type: Boolean,
description: "If passed this option will cause bucket-loads of text to appear on your screen"
},
{
name: "help", alias: "h", type: Boolean,
description: "Set this option to display the usage information"
},
{
name: "colour", alias: "c", value: "red",
description: "you can specify a colour which will be displayed appropriately"
},
{ name: "number", alias: "n", type: Number },
{
name: "files", type: Array, defaultOption: true,
description: "The default option, a list of files to do nothing about or with"
}
];
var cli = cliArgs(optionDefinitions);
var argv = cli.parse();
var usage = cli.getUsage({
header: require("../package.json").description,
columns: [ 24, 15 ]
});
if (argv.help) console.log(usage); else console.dir(argv);
|
#pragma once
#include "Tiny/Renderer/Framebuffer.h"
namespace Tiny {
class OpenGLFramebuffer : public Framebuffer
{
public:
OpenGLFramebuffer(const FramebufferSpecification& spec);
~OpenGLFramebuffer();
void Bind() override;
void Unbind() override;
void Recreate();
void Resize(uint32_t width, uint32_t height) override;
uint32_t GetColorAttachmentRendererID() const override { return m_ColorAttachment; }
const FramebufferSpecification& GetSpecification() const override { return m_Specification; }
private:
uint32_t m_FramebufferID = 0;
uint32_t m_ColorAttachment = 0, m_DepthAttachment = 0;
FramebufferSpecification m_Specification;
};
}
|
var describe = require('mocha').describe;
var it = require('mocha').it;
var beforeEach = require('mocha').beforeEach;
var expect = require('chai').expect;
var sinon = require('sinon');
var WorkResult = require('../../lib/WorkResult');
describe('[UNIT] WorkResult', function () {
var result;
beforeEach(function () {
result = new WorkResult([1, 2, 3]);
});
it('should allow result only from work domain', function () {
expect(result.supports(1)).to.be.true;
expect(result.supports(2)).to.be.true;
expect(result.supports(3)).to.be.true;
expect(result.supports(4)).to.be.false;
expect(result.supports('1')).to.be.false;
expect(result.supports('something else')).to.be.false;
});
it('should throw error if we want set or get result out of work domain', function () {
expect(function () { result.set(1, 'abc'); }).to.not.throw();
expect(function () { result.set(4, 'abc'); }).to.throw();
expect(function () { result.get(1); }).to.not.throw();
expect(function () { result.get(4); }).to.throw();
});
it('should set and get result', function () {
result.set(1, 'test');
expect(result.has(1)).to.be.true;
expect(result.has(2)).to.be.false;
expect(result.get(1)).to.be.equal('test');
expect(result.get(2)).to.be.undefined;
});
it('should check if we have all result', function () {
expect(result.hasAll()).to.be.false;
result.set(1, 'abc');
expect(result.hasAll()).to.be.false;
result.set(3, 'xyz');
expect(result.hasAll()).to.be.false;
result.set(1, 'efg');
expect(result.hasAll()).to.be.false;
result.set(2, undefined);
expect(result.hasAll()).to.be.false;
result.set(2, 'foo');
expect(result.hasAll()).to.be.true;
});
it('should clear work result', function () {
expect(function () { result.clear(); }).to.not.throw();
result.set(1, 'test');
result.clear();
expect(result.get(1)).to.be.undefined;
result.set(1, 'a');
result.set(2, 'b');
result.set(3, 'c');
result.clear();
expect(result.hasAll()).to.be.false;
});
it('should reduce work result', function () {
result.set(2, 'c');
var reducer = sinon.spy(function (reduced, current) {
return reduced.concat(current);
});
var reduced = result.reduce(reducer, []);
expect(reduced).to.be.a('array');
expect(reduced).to.be.deep.equal([undefined, 'c', undefined]);
expect(reducer.callCount).to.be.equal(3);
expect(reducer.getCall(0).args[0]).to.be.deep.equal([]);
expect(reducer.getCall(0).args[1]).to.be.undefined;
expect(reducer.getCall(1).args[0]).to.be.deep.equal([undefined]);
expect(reducer.getCall(1).args[1]).to.be.equal('c');
expect(reducer.getCall(2).args[0]).to.be.deep.equal([undefined, 'c']);
expect(reducer.getCall(2).args[1]).to.be.equal(undefined);
});
});
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GraphUNet
from torch_geometric.utils import dropout_adj
from .. import BaseModel, register_model
from cogdl.utils import add_remaining_self_loops
@register_model("pyg_unet")
class UNet(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--hidden-size", type=int, default=32)
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument("--dropout", type=float, default=0.92)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(args.num_features, args.hidden_size, args.num_classes, args.num_layers, args.dropout, args.num_nodes)
def __init__(self, in_feats, hidden_size, out_feats, num_layers, dropout, num_nodes):
super(UNet, self).__init__()
self.in_feats = in_feats
self.out_feats = out_feats
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout
self.num_nodes = 0
self.unet = GraphUNet(
self.in_feats, self.hidden_size, self.out_feats, depth=3, pool_ratios=[2000 / num_nodes, 0.5], act=F.elu
)
def forward(self, x, edge_index):
edge_index, _ = dropout_adj(
edge_index, p=0.2, force_undirected=True, num_nodes=x.shape[0], training=self.training
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.unet(x, edge_index)
return x
def predict(self, data):
return self.forward(data.x, data.edge_index)
|
import Vue from 'vue'
import App from './App.vue'
import router from './router'
import store from './store'
import './plugins'
import vuetify from './plugins/vuetify'
import { sync } from 'vuex-router-sync'
import CKEditor from '@ckeditor/ckeditor5-vue'
import interceptor from './interceptor'
interceptor.init()
Vue.use(CKEditor)
sync(store, router)
export const EventBus = new Vue()
Vue.config.productionTip = false
new Vue({
router,
store,
vuetify,
render: h => h(App)
}).$mount('#app')
|
from numbers import Number
from typing import Any, AnyStr, Callable, Collection, Dict, Hashable, Iterator, List, Mapping, NewType, Optional
from typing import Sequence, Tuple, TypeVar, Union
from types import SimpleNamespace
from abc import abstractmethod, abstractproperty
from collections import abc, Counter, defaultdict, Iterable, namedtuple, OrderedDict
from IPython.core.debugger import set_trace
from torch import nn, optim, as_tensor
import numpy as np
def is_listy(x:Any)->bool: return isinstance(x, (tuple,list))
def is_tuple(x:Any)->bool: return isinstance(x, tuple)
def is_dict(x:Any)->bool: return isinstance(x, dict)
def is_pathlike(x:Any)->bool: return isinstance(x, (str,Path))
def noop(x): return x
AnnealFunc = Callable[[Number,Number,float], Number]
ListOrItem = Union[Collection[Any],int,float,str]
OptListOrItem = Optional[ListOrItem]
StartOptEnd=Union[float,Tuple[float,float]]
Floats = Union[float, Collection[float]]
OptOptimizer = Optional[optim.Optimizer]
def annealing_linear(start:Number, end:Number, pct:float)->Number:
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start:Number, end:Number, pct:float)->Number:
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start:Number, end:Number, pct:float)->Number:
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def ifnone(a:Any,b:Any)->Any:
"`a` if `a` is not None, otherwise `b`."
return b if a is None else a
def listify(p:OptListOrItem=None, q:OptListOrItem=None):
"Make `p` listy and the same length as `q`."
if p is None: p=[]
elif isinstance(p, str): p = [p]
elif not isinstance(p, Iterable): p = [p]
#Rank 0 tensors in PyTorch are Iterable but don't have a length.
else:
try: a = len(p)
except: p = [p]
n = q if type(q)==int else len(p) if q is None else len(q)
if len(p)==1: p = p * n
assert len(p)==n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
class Callback(object):
"Base class for callbacks that want to record values, dynamically change learner params, etc."
_order=0
def on_train_begin(self, **kwargs:Any)->None:
"To initialize constants in the callback."
pass
def on_epoch_begin(self, **kwargs:Any)->None:
"At the beginning of each epoch."
pass
def on_batch_begin(self, **kwargs:Any)->None:
"Set HP before the output and loss are computed."
pass
def on_loss_begin(self, **kwargs:Any)->None:
"Called after forward pass but before loss has been computed."
pass
def on_backward_begin(self, **kwargs:Any)->None:
"Called after the forward pass and the loss has been computed, but before backprop."
pass
def on_backward_end(self, **kwargs:Any)->None:
"Called after backprop but before optimizer step. Useful for true weight decay in AdamW."
pass
def on_step_end(self, **kwargs:Any)->None:
"Called after the step of the optimizer but before the gradients are zeroed."
pass
def on_batch_end(self, **kwargs:Any)->None:
"Called at the end of the batch."
pass
def on_epoch_end(self, **kwargs:Any)->None:
"Called at the end of an epoch."
pass
def on_train_end(self, **kwargs:Any)->None:
"Useful for cleaning up things and saving files/models."
pass
def jump_to_epoch(self, epoch)->None:
"To resume training at `epoch` directly."
pass
def get_state(self, minimal:bool=True):
"Return the inner state of the `Callback`, `minimal` or not."
to_remove = ['exclude', 'not_min'] + getattr(self, 'exclude', []).copy()
if minimal: to_remove += getattr(self, 'not_min', []).copy()
return {k:v for k,v in self.__dict__.items() if k not in to_remove}
def __repr__(self):
attrs = func_args(self.__init__)
to_remove = getattr(self, 'exclude', [])
list_repr = [self.__class__.__name__] + [f'{k}: {getattr(self, k)}' for k in attrs if k != 'self' and k not in to_remove]
return '\n'.join(list_repr)
class Scheduler():
"Used to \"step\" from start,end (`vals`) over `n_iter` iterations on a schedule defined by `func`"
def __init__(self, vals:StartOptEnd, n_iter:int, func:Optional[AnnealFunc]=None):
self.start,self.end = (vals[0],vals[1]) if is_tuple(vals) else (vals,0)
self.n_iter = max(1,n_iter)
if func is None: self.func = annealing_linear if is_tuple(vals) else annealing_no
else: self.func = func
self.n = 0
# set_trace()
def restart(self): self.n = 0
def step(self)->Number:
"Return next value along annealed schedule."
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
"Return `True` if schedule completed."
return self.n >= self.n_iter
class OneCycleScheduler(Callback):
"Manage 1-Cycle style training as outlined in Leslie Smith's [paper](https://arxiv.org/pdf/1803.09820.pdf)."
def __init__(self, lr_max:float, num_batches:int, opt:OptOptimizer, moms:Floats=(0.95,0.85), div_factor:float=25., pct_start:float=0.3,
final_div:float=None, tot_epochs:int=None, start_epoch:int=None):
# super().__init__(learn)
self.lr_max,self.num_batches,self.opt,self.div_factor,self.pct_start,self.final_div = lr_max,num_batches,opt,div_factor,pct_start,final_div
if self.final_div is None: self.final_div = div_factor*1e4
self.moms=tuple(listify(moms,2))
if is_listy(self.lr_max): self.lr_max = np.array(self.lr_max)
self.start_epoch, self.tot_epochs = start_epoch, tot_epochs
def steps(self, *steps_cfg:StartOptEnd):
"Build anneal schedule for all of the parameters."
return [Scheduler(step, n_iter, func=func)
for (step,(n_iter,func)) in zip(steps_cfg, self.phases)]
def on_train_begin(self, n_epochs:int, epoch:int, **kwargs:Any)->None:
"Initialize our optimization params based on our annealing schedule."
res = {'epoch':self.start_epoch} if self.start_epoch is not None else None
self.start_epoch = ifnone(self.start_epoch, epoch)
self.tot_epochs = ifnone(self.tot_epochs, n_epochs)
n = self.num_batches * self.tot_epochs
a1 = int(n * self.pct_start)
a2 = n-a1
self.phases = ((a1, annealing_cos), (a2, annealing_cos))
low_lr = self.lr_max/self.div_factor
self.lr_scheds = self.steps((low_lr, self.lr_max), (self.lr_max, self.lr_max/self.final_div))
self.mom_scheds = self.steps(self.moms, (self.moms[1], self.moms[0]))
# self.opt = self.learn.opt
lr, mom = self.lr_scheds[0].start, self.mom_scheds[0].start
for param_group in self.opt.param_groups:
param_group['lr'] = lr
# param_group['momentum'] = mom
if 'momentum' in param_group: param_group['momentum'] = mom
if 'betas' in param_group: param_group['betas'] = (mom, param_group['betas'][1])
# self.opt.lr,self.opt.mom = self.lr_scheds[0].start,self.mom_scheds[0].start
self.idx_s = 0
# set_trace()
return res
def jump_to_epoch(self, epoch:int)->None:
for _ in range(self.num_batches * epoch):
self.on_batch_end(True)
def on_batch_end(self, train, **kwargs:Any)->None:
"Take one step forward on the annealing schedule for the optim params."
if train:
if self.idx_s >= len(self.lr_scheds): return {'stop_training': True, 'stop_epoch': True}
# self.opt.lr = self.lr_scheds[self.idx_s].step()
# self.opt.mom = self.mom_scheds[self.idx_s].step()
lr, mom = self.lr_scheds[self.idx_s].step(), self.mom_scheds[self.idx_s].step()
for param_group in self.opt.param_groups:
param_group['lr'] = lr
if 'momentum' in param_group: param_group['momentum'] = mom
if 'betas' in param_group: param_group['betas'] = (mom, param_group['betas'][1])
# when the current schedule is complete we move onto the next
# schedule. (in 1-cycle there are two schedules)
if self.lr_scheds[self.idx_s].is_done:
self.idx_s += 1
def on_epoch_end(self, epoch, **kwargs:Any)->None:
"Tell Learner to stop if the cycle is finished."
if epoch > self.tot_epochs: return {'stop_training': True}
class FlatToAnnealingLR(Callback):
"Linearly rise from max_lr/div_factor to max_lr, then annealing_cos to final_lr."
def __init__(self, lr_max:float, num_batches:int, opt:OptOptimizer, div_factor:float=25., pct_start:float=0.3,
final_div:float=None, tot_epochs:int=None, start_epoch:int=None):
self.lr_max,self.num_batches,self.opt,self.div_factor,self.pct_start,self.final_div = lr_max,num_batches,opt,div_factor,pct_start,final_div
if self.final_div is None: self.final_div = div_factor*1e4
if is_listy(self.lr_max): self.lr_max = np.array(self.lr_max)
self.start_epoch, self.tot_epochs = start_epoch, tot_epochs
def steps(self, *steps_cfg:StartOptEnd):
"Build anneal schedule for all of the parameters."
return [Scheduler(step, n_iter, func=func)
for (step,(n_iter,func)) in zip(steps_cfg, self.phases)]
def on_train_begin(self, n_epochs:int, epoch:int, **kwargs:Any)->None:
"Initialize our optimization params based on our annealing schedule."
res = {'epoch':self.start_epoch} if self.start_epoch is not None else None
self.start_epoch = ifnone(self.start_epoch, epoch)
self.tot_epochs = ifnone(self.tot_epochs, n_epochs)
n = self.num_batches * self.tot_epochs
a1 = int(n * self.pct_start)
a2 = n-a1
self.phases = ((a1, annealing_linear), (a2, annealing_cos))
low_lr = self.lr_max/self.div_factor
self.lr_scheds = self.steps((low_lr, self.lr_max), (self.lr_max, self.lr_max/self.final_div))
# self.opt = self.learn.opt
lr = self.lr_scheds[0].start
for param_group in self.opt.param_groups:
param_group['lr'] = lr
self.idx_s = 0
return res
def jump_to_epoch(self, epoch:int)->None:
for _ in range(self.num_batches * epoch):
self.on_batch_end(True)
def on_batch_end(self, train, **kwargs:Any)->None:
"Take one step forward on the annealing schedule for the optim params."
if train:
if self.idx_s >= len(self.lr_scheds): return {'stop_training': True, 'stop_epoch': True}
lr = self.lr_scheds[self.idx_s].step()
for param_group in self.opt.param_groups:
param_group['lr'] = lr
# when the current schedule is complete we move onto the next
# schedule. (in 1-cycle there are two schedules)
if self.lr_scheds[self.idx_s].is_done:
self.idx_s += 1
def on_epoch_end(self, epoch, **kwargs:Any)->None:
"Tell Learner to stop if the cycle is finished."
if epoch > self.tot_epochs: return {'stop_training': True}
class OneCycleSchedulerTau(Callback):
def __init__(self, lemniscate:Any, tau:Tuple, num_batches:int, div_factor:float=25., pct_start:float=0.3, final_div:float=None, tot_epochs:int=None, start_epoch:int=None):
self.lemniscate,self.tau,self.num_batches,self.div_factor,self.pct_start,self.final_div = lemniscate,tau,num_batches,div_factor,pct_start,final_div
#if self.final_div is None: self.final_div = div_factor*1e4
#if is_listy(self.tau_max): self.tau_max = np.array(self.tau_max)
self.start_epoch, self.tot_epochs = start_epoch, tot_epochs
def steps(self, *steps_cfg:StartOptEnd):
"Build anneal schedule for all of the parameters."
return [Scheduler(step, n_iter, func=func)
for (step,(n_iter,func)) in zip(steps_cfg, self.phases)]
def on_train_begin(self, n_epochs:int, epoch:int, **kwargs:Any)->None:
"Initialize our optimization params based on our annealing schedule."
res = {'epoch':self.start_epoch} if self.start_epoch is not None else None
self.start_epoch = ifnone(self.start_epoch, epoch)
self.tot_epochs = ifnone(self.tot_epochs, n_epochs)
n = self.num_batches * self.tot_epochs
a1 = int(n * self.pct_start)
a2 = n-a1
self.phases = ((a1, annealing_cos), (a2, annealing_cos))
# low_lr = self.tau_max/self.div_factor
# self._scheds = self.steps((low_lr, self.tau_max), (self.tau_max, self.tau_max/self.final_div))
self._scheds = self.steps((self.tau[0], self.tau[1]), (self.tau[1], self.tau[0]))
val = self._scheds[0].start
self.lemniscate.params[1] = val
self.idx_s = 0
return res
def jump_to_epoch(self, epoch:int)->None:
for _ in range(self.num_batches * epoch):
self.on_batch_end(True)
def on_batch_end(self, train, **kwargs:Any)->None:
"Take one step forward on the annealing schedule for the optim params."
if train:
if self.idx_s >= len(self._scheds): return {'stop_training': True, 'stop_epoch': True}
val = self._scheds[self.idx_s].step()
self.lemniscate.params[1] = val
# when the current schedule is complete we move onto the next
# schedule. (in 1-cycle there are two schedules)
if self._scheds[self.idx_s].is_done:
self.idx_s += 1
def on_epoch_end(self, epoch, **kwargs:Any)->None:
"Tell Learner to stop if the cycle is finished."
if epoch > self.tot_epochs: return {'stop_training': True}
def show_tau_schedule(tau_scheduler, num_epochs, num_batches):
epochs = []
tau = []
tau_scheduler.on_train_begin(epoch=0, n_epochs=num_epochs)
mb = master_bar(range(num_epochs))
for epoch in mb:
ts = []
for batch_no in progress_bar(range(num_batches), parent=mb):
ts.append(tau_scheduler.lemniscate.params[1].cpu().numpy().tolist())
tau_scheduler.on_batch_end(True)
epochs.append(epoch)
tau.append(np.mean(ts))
ax = sns.lineplot(x="epoch", y="tau", data={"epoch":epochs ,"tau": tau})
return epochs, tau, ax
|
from .Basics import Basics
from .Interface import Interface
from .Network import NetworkData, Network
from .Router import Router
from .Scope import Scope
from .Subnet import Subnet
from .Telnet import Telnet
from .Yaml import YamlReader
from .Utils import *
|
#!/usr/bin/env python2.7
#-*- coding:utf-8 -*-
"""
grpc server for python2.7 function
"""
import argparse
import importlib
import os
import sys
import time
import grpc
import yaml
import json
import signal
from concurrent import futures
import function_pb2
import function_pb2_grpc
import logging
import logging.handlers
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class mo(function_pb2_grpc.FunctionServicer):
"""
grpc server module for python2.7 function
"""
def Load(self, conf):
"""
load config and init module
"""
self.config = yaml.load(open(conf, 'r').read())
# overwrite config from env
if 'OPENEDGE_SERVICE_INSTANCE_NAME' in os.environ:
self.config['name'] = os.environ['OPENEDGE_SERVICE_INSTANCE_NAME']
elif 'OPENEDGE_SERVICE_NAME' in os.environ: # deprecated
self.config['name'] = os.environ['OPENEDGE_SERVICE_NAME']
if 'OPENEDGE_SERVICE_INSTANCE_ADDRESS' in os.environ:
if 'server' not in self.config:
self.config['server'] = {}
self.config['server']['address'] = os.environ['OPENEDGE_SERVICE_INSTANCE_ADDRESS']
elif 'OPENEDGE_SERVICE_ADDRESS' in os.environ: # deprecated
if 'server' not in self.config:
self.config['server'] = {}
self.config['server']['address'] = os.environ['OPENEDGE_SERVICE_ADDRESS']
if 'name' not in self.config:
raise Exception, 'config invalid, missing name'
if 'server' not in self.config:
raise Exception, 'config invalid, missing server'
if 'address' not in self.config['server']:
raise Exception, 'config invalid, missing server address'
if 'functions' not in self.config:
raise Exception, 'config invalid, missing functions'
self.log = get_logger(self.config)
self.functions = get_functions(self.config['functions'])
self.server = get_grpc_server(self.config['server'])
function_pb2_grpc.add_FunctionServicer_to_server(self, self.server)
def Start(self):
"""
start module
"""
self.log.info("service starting")
self.server.start()
def Close(self):
"""
close module
"""
grace = None
if 'timeout' in self.config['server']:
grace = self.config['server']['timeout'] / 1e9
self.server.stop(grace)
self.log.info("service closed")
def Call(self, request, context):
"""
call request
"""
if request.FunctionName not in self.functions:
raise Exception, 'function not found'
ctx = {}
ctx['messageQOS'] = request.QOS
ctx['messageTopic'] = request.Topic
ctx['functionName'] = request.FunctionName
ctx['functionInvokeID'] = request.FunctionInvokeID
ctx['invokeid'] = request.FunctionInvokeID
msg = None
if request.Payload:
try:
msg = json.loads(request.Payload)
except ValueError:
msg = request.Payload # raw data, not json format
msg = self.functions[request.FunctionName](msg, ctx)
if msg is None:
request.Payload = b''
else:
request.Payload = json.dumps(msg)
return request
def Talk(self, request_iterator, context):
"""
talk request
"""
pass
def get_functions(c):
"""
get functions
"""
fs = {}
for fc in c:
if 'name' not in fc or 'handler' not in fc or 'codedir' not in fc:
raise Exception, 'config invalid, missing function name, handler or codedir'
sys.path.append(fc['codedir'])
module_handler = fc['handler'].split('.')
handler_name = module_handler.pop()
module = importlib.import_module('.'.join(module_handler))
fs[fc['name']] = getattr(module, handler_name)
return fs
def get_grpc_server(c):
"""
get grpc server
"""
# TODO: to test
max_workers = None
max_concurrent = None
max_message_length = 4 * 1024 * 1024
if 'workers' in c:
if 'max' in c['workers']:
max_workers = c['workers']['max']
if 'concurrent' in c:
if 'max' in c['concurrent']:
max_concurrent = c['concurrent']['max']
if 'message' in c:
if 'length' in c['message']:
if 'max' in c['message']['length']:
max_message_length = c['message']['length']['max']
ssl_ca = None
ssl_key = None
ssl_cert = None
if 'ca' in c:
with open(c['ca'], 'rb') as f:
ssl_ca = f.read()
if 'key' in c:
with open(c['key'], 'rb') as f:
ssl_key = f.read()
if 'cert' in c:
with open(c['cert'], 'rb') as f:
ssl_cert = f.read()
s = grpc.server(thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers),
options=[('grpc.max_send_message_length', max_message_length),
('grpc.max_receive_message_length', max_message_length)],
maximum_concurrent_rpcs=max_concurrent)
if ssl_key is not None and ssl_cert is not None:
credentials = grpc.ssl_server_credentials(
((ssl_key, ssl_cert),), ssl_ca, ssl_ca is not None)
s.add_secure_port(c['address'], credentials)
else:
s.add_insecure_port(c['address'])
return s
def get_logger(c):
"""
get logger
"""
logger = logging.getLogger(c['name'])
if 'logger' not in c:
return logger
if 'path' not in c['logger']:
return logger
try:
os.mkdir(os.path.dirname(c['logger']['path']))
except OSError:
pass
level = logging.INFO
if 'level' in c['logger']:
if c['logger']['level'] == 'debug':
level = logging.DEBUG
elif c['logger']['level'] == 'warn':
level = logging.WARNING
elif c['logger']['level'] == 'error':
level = logging.ERROR
interval = 15
if 'age' in c['logger'] and 'max' in c['logger']['age']:
interval = c['logger']['age']['max']
backupCount = 15
if 'backup' in c['logger'] and 'max' in c['logger']['backup']:
backupCount = c['logger']['backup']['max']
logger.setLevel(level)
# create a file handler
handler = logging.handlers.TimedRotatingFileHandler(
c['logger']['path'], when='h', interval=interval, backupCount=backupCount)
handler.setLevel(level)
# create a logging format
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
return logger
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='grpc server for python2.7 function')
parser.add_argument('-c',
type=str,
default=os.path.join("etc", "openedge", "service.yml"),
help='config file path (default: etc/openedge/service.yml)')
args = parser.parse_args()
m = mo()
m.Load(args.c)
m.Start()
def exit(signum, frame):
sys.exit(0)
signal.signal(signal.SIGINT, exit)
signal.signal(signal.SIGTERM, exit)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except BaseException as ex:
m.log.debug(ex)
finally:
m.Close()
|
/*
* MIT License
*
* Copyright (c) 2021 Denton Woods
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*!
* \file Sonardyne.h
* \brief
*/
#pragma once
#include <optional>
#include <string>
#include <SspCpp/Cast.h>
namespace ssp
{
std::optional<SCast> ReadSonardyne(const std::string& fileName);
};
|
/*****************************************************************************/
/* StormPort.h Copyright (c) Marko Friedemann 2001 */
/*---------------------------------------------------------------------------*/
/* Portability module for the StormLib library. Contains a wrapper symbols */
/* to make the compilation under Linux work */
/* */
/* Author: Marko Friedemann <marko.friedemann@bmx-chemnitz.de> */
/* Created at: Mon Jan 29 18:26:01 CEST 2001 */
/* Computer: whiplash.flachland-chemnitz.de */
/* System: Linux 2.4.0 on i686 */
/* */
/* Author: Sam Wilkins <swilkins1337@gmail.com> */
/* System: Mac OS X and port to big endian processor */
/* */
/*---------------------------------------------------------------------------*/
/* Date Ver Who Comment */
/* -------- ---- --- ------- */
/* 29.01.01 1.00 Mar Created */
/* 24.03.03 1.01 Lad Some cosmetic changes */
/* 12.11.03 1.02 Dan Macintosh compatibility */
/* 24.07.04 1.03 Sam Mac OS X compatibility */
/* 22.11.06 1.04 Sam Mac OS X compatibility (for StormLib 6.0) */
/* 31.12.06 1.05 XPinguin Full GNU/Linux compatibility */
/* 17.10.12 1.05 Lad Moved error codes so they don't overlap with errno.h */
/*****************************************************************************/
#ifndef __STORMPORT_H__
#define __STORMPORT_H__
#ifndef __cplusplus
#define bool char
#define true 1
#define false 0
#endif
// Defines for Windows
#if !defined(PLATFORM_DEFINED) && (defined(WIN32) || defined(WIN64))
// In MSVC 8.0, there are some functions declared as deprecated.
#if _MSC_VER >= 1400
#define _CRT_SECURE_NO_DEPRECATE
#define _CRT_NON_CONFORMING_SWPRINTFS
#endif
#include <tchar.h>
#include <assert.h>
#include <ctype.h>
#include <stdio.h>
#include <windows.h>
#include <wininet.h>
#define PLATFORM_LITTLE_ENDIAN
#ifdef WIN64
#define PLATFORM_64BIT
#else
#define PLATFORM_32BIT
#endif
#define PLATFORM_WINDOWS
#define PLATFORM_DEFINED // The platform is known now
#endif
// Defines for Mac
#if !defined(PLATFORM_DEFINED) && defined(__APPLE__) // Mac BSD API
// Macintosh
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
#include <errno.h>
// Support for PowerPC on Max OS X
#if (__ppc__ == 1) || (__POWERPC__ == 1) || (_ARCH_PPC == 1)
#include <stdint.h>
#include <CoreFoundation/CFByteOrder.h>
#endif
#define PKEXPORT
#define __SYS_ZLIB
#define __SYS_BZLIB
#ifndef __BIG_ENDIAN__
#define PLATFORM_LITTLE_ENDIAN
#endif
#define PLATFORM_MAC
#define PLATFORM_DEFINED // The platform is known now
#endif
// Assumption: we are not on Windows nor Macintosh, so this must be linux *grin*
#if !defined(PLATFORM_DEFINED)
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <ctype.h>
#include <assert.h>
#include <errno.h>
#define PLATFORM_LITTLE_ENDIAN
#define PLATFORM_LINUX
#define PLATFORM_DEFINED
#endif
// Definition of Windows-specific structures for non-Windows platforms
#ifndef PLATFORM_WINDOWS
#if __LP64__
#define PLATFORM_64BIT
#else
#define PLATFORM_32BIT
#endif
// Typedefs for ANSI C
typedef unsigned char BYTE;
typedef unsigned short USHORT;
typedef int LONG;
typedef unsigned int DWORD;
typedef unsigned long DWORD_PTR;
typedef long LONG_PTR;
typedef long INT_PTR;
typedef long long LONGLONG;
typedef unsigned long long ULONGLONG;
typedef void * HANDLE;
typedef void * LPOVERLAPPED; // Unsupported on Linux and Mac
typedef char TCHAR;
typedef unsigned int LCID;
typedef LONG * PLONG;
typedef DWORD * LPDWORD;
typedef BYTE * LPBYTE;
#ifdef PLATFORM_32BIT
#define _LZMA_UINT32_IS_ULONG
#endif
// Some Windows-specific defines
#ifndef MAX_PATH
#define MAX_PATH 1024
#endif
#define WINAPI
#define FILE_BEGIN SEEK_SET
#define FILE_CURRENT SEEK_CUR
#define FILE_END SEEK_END
#define _T(x) x
#define _tcslen strlen
#define _tcscpy strcpy
#define _tcscat strcat
#define _tcsrchr strrchr
#define _tprintf printf
#define _stprintf sprintf
#define _tremove remove
#define _stricmp strcasecmp
#define _strnicmp strncasecmp
#define _tcsnicmp strncasecmp
#endif // !WIN32
// 64-bit calls are supplied by "normal" calls on Mac
#if defined(PLATFORM_MAC)
#define stat64 stat
#define fstat64 fstat
#define lseek64 lseek
#define off64_t off_t
#define O_LARGEFILE 0
#endif
// Platform-specific error codes for UNIX-based platforms
#if defined(PLATFORM_MAC) || defined(PLATFORM_LINUX)
#define ERROR_SUCCESS 0
#define ERROR_FILE_NOT_FOUND ENOENT
#define ERROR_ACCESS_DENIED EPERM
#define ERROR_INVALID_HANDLE EBADF
#define ERROR_NOT_ENOUGH_MEMORY ENOMEM
#define ERROR_NOT_SUPPORTED ENOTSUP
#define ERROR_INVALID_PARAMETER EINVAL
#define ERROR_DISK_FULL ENOSPC
#define ERROR_ALREADY_EXISTS EEXIST
#define ERROR_INSUFFICIENT_BUFFER ENOBUFS
#define ERROR_BAD_FORMAT 1000 // No such error code under Linux
#define ERROR_NO_MORE_FILES 1001 // No such error code under Linux
#define ERROR_HANDLE_EOF 1002 // No such error code under Linux
#define ERROR_CAN_NOT_COMPLETE 1003 // No such error code under Linux
#define ERROR_FILE_CORRUPT 1004 // No such error code under Linux
#endif
#ifdef PLATFORM_LITTLE_ENDIAN
#define BSWAP_INT16_UNSIGNED(a) (a)
#define BSWAP_INT16_SIGNED(a) (a)
#define BSWAP_INT32_UNSIGNED(a) (a)
#define BSWAP_INT32_SIGNED(a) (a)
#define BSWAP_INT64_SIGNED(a) (a)
#define BSWAP_INT64_UNSIGNED(a) (a)
#define BSWAP_ARRAY16_UNSIGNED(a,b) {}
#define BSWAP_ARRAY32_UNSIGNED(a,b) {}
#define BSWAP_ARRAY64_UNSIGNED(a,b) {}
#define BSWAP_PART_HEADER(a) {}
#define BSWAP_TMPQUSERDATA(a) {}
#define BSWAP_TMPQHEADER(a) {}
#else
#ifdef __cplusplus
extern "C" {
#endif
int16_t SwapInt16(uint16_t);
uint16_t SwapUInt16(uint16_t);
int32_t SwapInt32(uint32_t);
uint32_t SwapUInt32(uint32_t);
int64_t SwapInt64(uint64_t);
uint64_t SwapUInt64(uint64_t);
void ConvertUInt16Buffer(void * ptr, size_t length);
void ConvertUInt32Buffer(void * ptr, size_t length);
void ConvertUInt64Buffer(void * ptr, size_t length);
void ConvertPartHeader(void * partHeader);
void ConvertTMPQUserData(void *userData);
void ConvertTMPQHeader(void *header);
#ifdef __cplusplus
}
#endif
#define BSWAP_INT16_SIGNED(a) SwapInt16((a))
#define BSWAP_INT16_UNSIGNED(a) SwapUInt16((a))
#define BSWAP_INT32_SIGNED(a) SwapInt32((a))
#define BSWAP_INT32_UNSIGNED(a) SwapUInt32((a))
#define BSWAP_INT64_SIGNED(a) SwapInt64((a))
#define BSWAP_INT64_UNSIGNED(a) SwapUInt64((a))
#define BSWAP_ARRAY16_UNSIGNED(a,b) ConvertUInt16Buffer((a),(b))
#define BSWAP_ARRAY32_UNSIGNED(a,b) ConvertUInt32Buffer((a),(b))
#define BSWAP_ARRAY64_UNSIGNED(a,b) ConvertUInt64Buffer((a),(b))
#define BSWAP_PART_HEADER(a) ConvertPartHeader(a)
#define BSWAP_TMPQUSERDATA(a) ConvertTMPQUserData((a))
#define BSWAP_TMPQHEADER(a) ConvertTMPQHeader((a))
#endif
#endif // __STORMPORT_H__
|
module.exports = {
"src/**/*.ts": "eslint -c .eslintrc.js",
"*.{ts,json,md}": "prettier --write"
};
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'segmentation.views.demo', name='demo'),
# Examples:
# url(r'^$', 'example_project.views.home', name='home'),
# url(r'^example_project/', include('example_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
import unittest
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
import debt_snowball as ds
class TestWebResponses(unittest.TestCase):
def setUp(self):
self.c = Client(ds.application, BaseResponse)
def test_file_404(self):
"""Any file request should return 404"""
resp = self.c.get('/favicon.ico')
self.assertEqual(resp.status_code, 404)
def test_valid_get(self):
"""Test a valid get"""
resp = self.c.get('/')
self.assertEqual(resp.status_code, 200)
self.assertIn('Snowball debt paydown', resp.data.decode('utf-8'))
def test_invalid_method(self):
"""Test an invalid method"""
resp = self.c.head('/')
self.assertEqual(resp.status_code, 400)
def test_empty_post(self):
"""Sent an empty post"""
resp = self.c.post('/')
self.assertEqual(resp.status_code, 200)
self.assertIn('Snowball debt paydown', resp.data.decode('utf-8'))
def test_incomplete_values(self):
"""Send a line with incomplete data"""
resp = self.c.post('/', data={'row_count': '10', 'debt_name_1': 'test_name',
'balance_1': '0', 'payment_1': '0', 'apr_1':''})
self.assertIn('All fields on a line must be filled out.', resp.data.decode('utf-8'))
def test_too_few_debts(self):
"""Send only one debt"""
resp = self.c.post('/', data={'row_count': '1', 'debt_name_1': 'test_name',
'balance_1': '0', 'payment_1': '0', 'apr_1':'5.3'})
self.assertIn('Two or more debts must be provided', resp.data.decode('utf-8'))
def test_negative_numbers(self):
"""Throw exception on negative numbers"""
resp = self.c.post('/', data={'row_count': '1', 'debt_name_1': 'test_name',
'balance_1': '1', 'payment_1': '1', 'apr_1':'-5.3'})
self.assertIn('All numbers must be positive.', resp.data.decode('utf-8'))
def test_duplicate_names(self):
"""Throw exception on duplicate debt names"""
resp = self.c.post('/', data={'row_count': '2', 'debt_name_1': 'test_name',
'balance_1': '1', 'payment_1': '1', 'apr_1':'5.3',
'debt_name_2': 'test_name', 'balance_2': '1',
'payment_2': '1', 'apr_2': '5.3'})
self.assertIn('To avoid confusion, all debts must have unique names.', resp.data.decode('utf-8'))
def test_invalid_values(self):
"""Throw an exception on non-numeric values"""
resp = self.c.post('/', data={'row_count': '1', 'debt_name_1': 'test_name',
'balance_1': 'Dog', 'payment_1': '1', 'apr_1':'-5.3'})
self.assertIn('Balance, payment, and APR must be numeric.', resp.data.decode('utf-8'))
def test_rising_balance(self):
"""Throw an exception when the debt value isn't going down"""
resp = self.c.post('/', data={'row_count': '2', 'debt_name_1': 'test_name_1',
'balance_1': '95113', 'payment_1': '100', 'apr_1':'5.375',
'debt_name_2': 'test_name_2', 'balance_2': '1',
'payment_2': '1', 'apr_2': '5.3'})
self.assertIn("Debt 'test_name_1' does", resp.data.decode('utf-8'))
def test_valid_run(self):
"""Run valid values all the way through and get a result"""
resp = self.c.post('/', data={'row_count': '3',
'debt_name_1': 'debt a', 'balance_1':'10000',
'payment_1': '200', 'apr_1': '12',
'debt_name_2': 'debt b', 'balance_2':'10000',
'payment_2': '300', 'apr_2': '12',
'debt_name_3': 'debt c', 'balance_3':'10000',
'payment_3': '150', 'apr_3': '12'})
data = resp.data.decode('utf-8')
self.assertIn('debt a', data)
self.assertIn('debt b', data)
self.assertIn('debt c', data)
self.assertIn('$222.73', data)
self.assertIn('$250.55', data)
self.assertIn('$502.83', data)
|
/**
* Latin (lingua Latina) language functions
*
* @author Santhosh Thottingal
*/
( function ( $ ) {
'use strict';
$.i18n.languages.la = $.extend( {}, $.i18n.languages['default'], {
convertGrammar: function ( word, form ) {
switch ( form ) {
case 'genitive':
// only a few declensions, and even for those mostly the singular only
word = word.replace( /u[ms]$/i, 'i' ); // 2nd declension singular
word = word.replace( /ommunia$/i, 'ommunium' ); // 3rd declension neuter plural (partly)
word = word.replace( /a$/i, 'ae' ); // 1st declension singular
word = word.replace( /libri$/i, 'librorum' ); // 2nd declension plural (partly)
word = word.replace( /nuntii$/i, 'nuntiorum' ); // 2nd declension plural (partly)
word = word.replace( /tio$/i, 'tionis' ); // 3rd declension singular (partly)
word = word.replace( /ns$/i, 'ntis' );
word = word.replace( /as$/i, 'atis' );
word = word.replace( /es$/i, 'ei' ); // 5th declension singular
break;
case 'accusative':
// only a few declensions, and even for those mostly the singular only
word = word.replace( /u[ms]$/i, 'um' ); // 2nd declension singular
word = word.replace( /ommunia$/i, 'am' ); // 3rd declension neuter plural (partly)
word = word.replace( /a$/i, 'ommunia' ); // 1st declension singular
word = word.replace( /libri$/i, 'libros' ); // 2nd declension plural (partly)
word = word.replace( /nuntii$/i, 'nuntios' );// 2nd declension plural (partly)
word = word.replace( /tio$/i, 'tionem' ); // 3rd declension singular (partly)
word = word.replace( /ns$/i, 'ntem' );
word = word.replace( /as$/i, 'atem' );
word = word.replace( /es$/i, 'em' ); // 5th declension singular
break;
case 'ablative':
// only a few declensions, and even for those mostly the singular only
word = word.replace( /u[ms]$/i, 'o' ); // 2nd declension singular
word = word.replace( /ommunia$/i, 'ommunibus' ); // 3rd declension neuter plural (partly)
word = word.replace( /a$/i, 'a' ); // 1st declension singular
word = word.replace( /libri$/i, 'libris' ); // 2nd declension plural (partly)
word = word.replace( /nuntii$/i, 'nuntiis' ); // 2nd declension plural (partly)
word = word.replace( /tio$/i, 'tione' ); // 3rd declension singular (partly)
word = word.replace( /ns$/i, 'nte' );
word = word.replace( /as$/i, 'ate' );
word = word.replace( /es$/i, 'e' ); // 5th declension singular
break;
}
return word;
}
} );
}( jQuery ) );
|
'''
Count the rows in a time range or on the whole table.
Tables and time range are read from a CSV file.
Outputs the results to a CSV file.
'''
from google.cloud import bigquery
import uuid
import pandas as pd
import bq_lib as bq
from config import config, load_config
from lib import make_gen_csv, log_info
import rs
#
# --> RedShift
#
def rs_configure(options):
project = options['PROJECT']
settings = config[project]['rs']
schema = settings['schema']
time_column = 'timestamp'
aws_json = '../../etc/{proj}/aws.json'.format(proj=project)
aws_config = load_config(aws_json)
engine = rs.connect(settings['game_title'], aws_config)
if options['--in']:
csv_tables = options['--in']
else:
csv_tables = "rs_{project}_minmax_day.csv".format(project=project)
if options['--out']:
csv_count = options['--out']
else:
csv_count = "rs_{project}_table_daily_rows.csv".format(project=project)
rs_query = rs.make_run(engine, schema)
count_rows = rs_make_count_rows_daily(time_column)
read_count = rs_make_count_daily(rs_query, count_rows)
gen_tables = make_gen_csv(csv_tables)
inject = {'ignore': settings['ignore_table'],
'gen_tables': gen_tables,
'read_count': read_count,
'csv_count': csv_count,
}
return inject
def rs_make_count_rows_daily(time_column):
def count_rows_daily(table, start_day, end_day):
return """
SELECT
date({timestamp}) as day_part,
count(*) as total_rows
FROM {tablename}
WHERE date({timestamp}) between '{start_day}' and '{end_day}'
GROUP by day_part
ORDER by day_part
""".format(tablename=table,
timestamp=time_column,
start_day=start_day, end_day=end_day)
return count_rows_daily
def rs_make_count_daily(rs_query, count_rows_daily):
def count_daily(table, start_day, end_day):
return rs_query(count_rows_daily(table, start_day, end_day))
return count_daily
#
# --> BigQuery
#
def bq_configure_daily(options):
project = options['PROJECT']
settings = config[project]['bq']
# project:dataset.tablename
schema = ':'.join([settings['project'], settings['dataset']])
time_column = 'timestamp'
gcp_cfg = '../../etc/{proj}/gcp.json'.format(proj=project)
gc_client = bigquery.Client.from_service_account_json(gcp_cfg)
dataset = gc_client.dataset(settings['dataset'])
if options['--in']:
csv_tables = options['--in']
else:
csv_tables = "bq_{project}_minmax_day.csv".format(project=project)
if options['--out']:
csv_count = options['--out']
else:
csv_count = "bq_{project}_table_daily_rows.csv".format(project=project)
count_rows = bq_make_count_rows_daily(time_column)
read_count = bq_make_count_daily(gc_client, schema, count_rows)
gen_tables = make_gen_csv(csv_tables)
inject = {'csv_count': csv_count,
'gen_tables': gen_tables,
'read_count': read_count,
'ignore': settings['ignore_table'],
}
return inject
def bq_configure_whole(options):
project = options['PROJECT']
settings = config[project]['bq']
# project:dataset.tablename
schema = ':'.join([settings['project'], settings['dataset']])
gcp_cfg = '../../etc/{proj}/gcp.json'.format(proj=project)
gc_client = bigquery.Client.from_service_account_json(gcp_cfg)
dataset = gc_client.dataset(settings['dataset'])
csv_tables = "bq_{project}_minmax_day.csv".format(project=project)
csv_count = "bq_{project}_table_rows.csv".format(project=project)
read_count = bq_make_whole_count(gc_client, schema)
gen_tables = make_gen_csv(csv_tables)
inject = {'csv_count': csv_count,
'gen_tables': gen_tables,
'read_count': read_count,
'ignore': settings['ignore_table'],
}
return inject
def bq_configure(options):
if options['--daily']:
return bq_configure_daily(options)
elif options['--whole']:
return bq_configure_whole(options)
else:
return bq_configure_daily(options)
def bq_run_query(client, sql):
query_job = client.run_async_query(str(uuid.uuid4()), sql)
query_job.begin()
query_job.result()
destination_table = query_job.destination
destination_table.reload()
for row in destination_table.fetch_data():
yield row
def bq_make_count_rows_daily(time_column):
def count_rows_daily(table, start_day, end_day):
return """
SELECT
DATE({timestamp}) AS day_part,
COUNT({timestamp}) AS total_rows
FROM
[{table_id}]
WHERE
DATE({timestamp}) BETWEEN '{start_day}'
AND '{end_day}'
GROUP BY
day_part
ORDER BY
day_part
""".format(table_id=table,
timestamp=time_column,
start_day=start_day, end_day=end_day)
return count_rows_daily
def bq_make_count_daily(client, table_pre, count_rows_daily):
def count_daily(table_id, start_day, end_day):
table = '.'.join([table_pre, table_id])
return bq_run_query(client, count_rows_daily(table, start_day, end_day))
return count_daily
def _bq_count_whole_rows(table, column):
return """
SELECT
COUNT({count_column}) AS total_rows
FROM
[{table_id}]
""".format(table_id=table, count_column=column)
def bq_make_whole_count(client, table_prefix):
def count_whole(table_name, column):
table = '.'.join([table_prefix, table_name])
return bq_run_query(client, _bq_count_whole_rows(table, column))
return count_whole
#
# FUNCTIONALITY
#
def make_filter_tables(ignore):
log_info("skip tables {ignore}".format(ignore=','.join(ignore)))
filter_ignore = lambda tables: (table for table in tables if table not in ignore)
return filter_ignore
# differences on TABLE
# tables do not have a _timestamp_ column
# list differences on the whole table as a first test
def make_count_whole(count_rows):
def count_whole(table_info):
table, column = table_info
log_info("read row count for {table}".format(table=table))
result = count_rows(table, column)
for r in result:
yield {'tablename': table, 'total_rows': r[0]}
return count_whole
def count_rows_whole(tables, read_count, csv_out):
'''
tables
|> read_count
|> flatten
|> to_dataframe
|> to_csv
'''
results = (read_count(table) for table in tables)
part_tables = (item for result in results for item in result)
df = pd.DataFrame(part_tables)
df.to_csv(csv_out, header=False, index=False,
columns=['tablename', 'total_rows'])
# differences in DAY ranges
# tables have a _timestamp_ column
def make_count_daily(count_rows_daily):
def count_daily(table_info):
(table, start_day, end_day) = table_info
log_info("read row count per day for {table}".format(table=table))
result = count_rows_daily(table, start_day, end_day)
for r in result:
yield {'tablename': table, 'on_day': r[0], 'total_rows': r[1]}
return count_daily
def count_rows_daily(tables, read_daily_count, csv_out):
def read_from(generators):
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=8)
results = pool.map(lambda result: [r for r in result], generators)
pool.close()
pool.join()
return results
'''
tables
|> read_daily_count
|> flatten
|> to_dataframe
|> to_csv
'''
result_generators = (read_daily_count(table) for table in tables)
results = read_from(result_generators)
# flatten results
part_tables = (item for result in results for item in result)
df = pd.DataFrame(part_tables)
df.to_csv(csv_out, header=False, index=False,
columns=['tablename', 'on_day', 'total_rows'])
def main(options):
end_day = options['END_DAY']
if options['--rs']:
inject = rs_configure(options)
elif options['--bq']:
inject = bq_configure(options)
if options['--daily']:
ignore = inject['ignore']
gen_tables = inject['gen_tables']
gen_tables = ((table, start_day, end_day)
for table, start_day, _ in gen_tables
if table not in ignore and start_day)
csv_count = inject['csv_count']
f_db_daily = inject['read_count']
f_count = make_count_daily(f_db_daily)
count_rows = count_rows_daily
if options['--whole']:
gen_tables = inject['gen_tables']
csv_count = inject['csv_count']
f_db_count = inject['read_count']
f_count = make_count_whole(f_db_count)
count_rows = count_rows_whole
count_rows(gen_tables, f_count, csv_count)
# Options probably must start with a unique letter
# --rsload and --rsunload does not work
_usage="""
Perform row count
Create csv with tablename,on_day,row_count
Usage:
db_count (--rs | --bq) (--daily [--column=<c>] | --whole) [--in=<i>] [--out=<o>] PROJECT END_DAY
Arguments:
PROJECT name of the project
END_DAY upper bound in YYYYMMDD for row count
Options:
-h --help show this
--rs use Redshift
--bq use Bigquery
--daily only tables with time-column (events, facts)
--column=<c> name of time-column
--whole only tables with no time-column (dimensions)
--in=<i> read tables from this file
--out=<o> write row counts to this file
"""
from docopt import docopt
if __name__ == '__main__':
options = docopt(_usage)
main(options)
|
"""
sites.py
Created by Daniel Magee on 2008-01-29.
Copyright (c) 2008 UCO/Lick Observatory. All rights reserved.
"""
import ephem as E
import math
class Site(object):
"""Observatory site class."""
def __init__(self, siteid):
try:
self.siteid = siteid
except KeyError:
print 'ERROR: site %s not found.' % siteid
def __setattr__(self, name, value):
if name == 'siteid':
object.__setattr__(self, name, value)
if value is not None:
for k,v in self._siteData[value].items():
object.__setattr__(self, k, v)
else:
object.__setattr__(self, name, value)
def observer(self):
"""Returns a pyephem Observer object for a site."""
obs = E.Observer()
obs.long = -1*E.degrees(self._siteData[self.siteid]['longitude'])
obs.lat = E.degrees(self._siteData[self.siteid]['latitude'])
obs.elevation = self._siteData[self.siteid]['elevation']
obs.temp = self._siteData[self.siteid]['temperature']
obs.pressure = self._siteData[self.siteid]['pressure']
obs.epoch = 2000.0
obs.horizon = -1*math.sqrt(2*obs.elevation/E.earth_radius)
return obs
def sites(self):
"""Returns a list of sites."""
sitekeys = self._siteData.keys()
sitekeys.sort()
site_names = []
for s in sitekeys:
site_names.append([s, self._siteData[s]['name']])
return site_names
_siteData = {
#logitudes +W -E, latitudes +N -S
'kpno' : {'name':'Kitt Peak National Observatory',
'longitude':'111:36.0',
'latitude':'31:57.8',
'elevation':2120.,
'temperature':15.0,
'pressure':760.0,
'timezone':'US/Mountain',
'tzname':'Mountain'},
'ctio' : {'name':'Cerro Tololo Interamerican Observatory',
'longitude':'70:48:54.00',
'latitude':'-30:09:55.00',
'elevation':2215.,
'temperature':7.0,
'pressure':740.0,
'timezone':'Chile/Continental',
'tzname':'Chilean'},
'lasilla' : {'name':'European Southern Observatory: La Silla',
'longitude':'70:43.8',
'latitude':'-29:15.4',
'elevation':2347,
'temperature':10.0,
'pressure':770.0,
'timezone':'Chile/Continental',
'tzname':'Chilean'},
'paranal' : {'name':'European Southern Observatory: Paranal',
'longitude':'70:24.2',
'latitude':'-24:37.5',
'elevation':2635,
'temperature':10.0,
'pressure':750.0,
'timezone':'Chile/Continental',
'tzname':'Chilean'},
'lick' : {'name':'Lick Observatory',
'longitude':'121:38.2',
'latitude':'37:20.6',
'elevation':1290,
'temperature':15.0,
'pressure':1000.0,
'timezone':'US/Pacific',
'tzname':'Pacific'},
'mmto' : {'name':'MMT Observatory',
'longitude':'110:53.1',
'latitude':'31:41.3',
'elevation':2600,
'temperature':14.0,
'pressure':980.0,
'timezone':'US/Mountain',
'tzname':'Mountain'},
'cfht' : {'name':'Canada-France-Hawaii Telescope',
'longitude':'155:28.3',
'latitude':'19:49.6',
'elevation':4215,
'temperature':0.0,
'pressure':615.0,
'timezone':'US/Hawaii',
'tzname':'Hawaiian'},
'lapalma' : {'name':'Roque de los Muchachos, La Palma',
'longitude':'17:52.8',
'latitude':'28:45.5',
'elevation':2327,
'temperature':8.0,
'pressure':768.0,
'timezone':'Atlantic/Canary',
'tzname':'Canary Islands'},
'mso' : {'name':'Mt. Stromlo Observatory',
'longitude':'210:58:32.4',
'latitude':'-35:19:14.34',
'elevation':767,
'temperature':15.0,
'pressure':1010.0,
'timezone':'Australia/Sydney',
'tzname':'Australian'},
'sso' : {'name':'Siding Spring Observatory',
'longitude':'210:56:19.70',
'latitude':'-31:16:24.10',
'elevation':1149,
'temperature':15.0,
'pressure':1010.0,
'timezone':'Australia/Sydney',
'tzname':'Australian'},
'aao' : {'name':'Anglo-Australian Observatory',
'longitude':'210:56:2.09',
'latitude':'-31:16:37.34',
'elevation':1164,
'temperature':15.0,
'pressure':1010.0,
'timezone':'Australia/Sydney',
'tzname':'Australian'},
'mcdonald' : {'name':'McDonald Observatory',
'longitude':'104:01:18.00',
'latitude':'30:40:18.00',
'elevation':2075,
'temperature':10.0,
'pressure':800.0,
'timezone':'US/Central',
'tzname':'Central'},
'lco' : {'name':'Las Campanas Observatory',
'longitude':'70:42.1',
'latitude':'-29:0.2',
'elevation':2282,
'temperature':7.0,
'pressure':760.0,
'timezone':'Chile/Continental',
'tzname':'Chilean'},
'mtbigelow' : {'name':'Catalina Observatory: 61 inch telescope',
'longitude':'110:43.9',
'latitude':'32:25.0',
'elevation':2510.,
'temperature':15.0,
'pressure':1010.0,
'timezone':'US/Mountain',
'tzname':'Mountain'},
'dao' : {'name':'Dominion Astrophysical Observatory',
'longitude':'123:25.0',
'latitude':'48:31.3',
'elevation':229,
'temperature':15.0,
'pressure':1010.0,
'timezone':'US/Pacific',
'tzname':'Pacific'},
'spm' : {'name':'Observatorio Astronomico Nacional, San Pedro Martir',
'longitude':'115:29:13',
'latitude':'31:01:45',
'elevation':2830,
'temperature':15.0,
'pressure':1010.0,
'timezone':'US/Pacific',
'tzname':'Pacific'},
'tona' : {'name':'Observatorio Astronomico Nacional, Tonantzintla',
'longitude':'98:18:50',
'latitude':'19:01:58',
'elevation':0,
'temperature':15.0,
'pressure':1010.0,
'timezone':'US/Central',
'tzname':'Central'},
'palomar' : {'name':'The Hale Telescope',
'longitude':'116:51:46.80',
'latitude':'33:21:21.6',
'elevation':1706.,
'temperature':15.0,
'pressure':1010.0,
'timezone':'US/Pacific',
'tzname':'Pacific'},
'mdm' : {'name':'Michigan-Dartmouth-MIT Observatory',
'longitude':'111:37.0',
'latitude':'31:57.0',
'elevation':1938.5,
'temperature':15.0,
'pressure':760.0,
'timezone':'US/Mountain',
'tzname':'Mountain'},
'bmo' : {'name':'Black Moshannon Observatory',
'longitude':'78:00.3',
'latitude':'40:55.3',
'elevation':738,
'temperature':15.0,
'pressure':1010.0,
'timezone':'US/Eastern',
'tzname':'Eastern'},
'keck' : {'name':'W. M. Keck Observatory',
'longitude':'155:28:28.11',
'latitude':'19:49:34.51',
'elevation':4160,
'temperature':0.0,
'pressure':615.0,
'timezone':'US/Hawaii',
'tzname':'Hawaiian'},
'subaru' : {'name':'Subaru Telescope',
'longitude':'155:28:33.67',
'latitude':'19:49:31.81',
'elevation':4163,
'temperature':0.0,
'pressure':615.0,
'timezone':'US/Hawaii',
'tzname':'Hawaiian'},
'apo' : {'name':'Apache Point Observatory',
'longitude':'105:49.2',
'latitude':'32:46.8',
'elevation':2798.,
'temperature':10.0,
'pressure':760.0,
'timezone':'US/Mountain',
'tzname':'Mountain'},
'lowell' : {'name':'Lowell Observatory',
'longitude':'111:32.1',
'latitude':'35:05.8',
'elevation':2198.,
'temperature':15.0,
'pressure':1010.0,
'timezone':'US/Mountain',
'tzname':'Mountain'},
'flwo' : {'name':'Whipple Observatory',
'longitude':'110:52:39',
'latitude':'31:40:51.4',
'elevation':2320,
'temperature':14.0,
'pressure':980.0,
'timezone':'US/Mountain',
'tzname':'Mountain'},
'irtf' : {'name':'NASA Infrared Telescope Facility',
'longitude':'155:28:19.19564',
'latitude':'19:49:34.38594',
'elevation':4168,
'temperature':0.0,
'pressure':615.0,
'timezone':'US/Hawaii',
'tzname':'Hawaiian'},
'gemini-north' : {'name':'Gemini North Observatory',
'longitude':'155:28:08.56831',
'latitude':'19:49:25.68521',
'elevation':4213.4,
'temperature':0.0,
'pressure':615.0,
'timezone':'US/Hawaii',
'tzname':'Hawaiian'},
'gemini-south' : {'name':'Gemini South Observatory',
'longitude':'70:43.4',
'latitude':'-30:13.7',
'elevation':2737.,
'temperature':5.0,
'pressure':735.0,
'timezone':'Chile/Continental',
'tzname':'Chilean'},
'esontt' : {'name':'European Southern Observatory, NTT, La Silla',
'longitude':'70:43:54.272',
'latitude':'-29:15:18.440',
'elevation':2375,
'temperature':10.0,
'pressure':770.0,
'timezone':'Chile/Continental',
'tzname':'Chilean'},
'eso36m' : {'name':'European Southern Observatory, 3.6m Telescope, La Silla',
'longitude':'70:43:46.606',
'latitude':'-29:15:25.814',
'elevation':2400,
'temperature':10.0,
'pressure':770.0,
'timezone':'Chile/Continental',
'tzname':'Chilean'},
'vlt' : {'name':'European Southern Observatory, VLT, Paranal',
'longitude':'70:24:10.1',
'latitude':'-24:37:31.5',
'elevation':2648,
'temperature':10.0,
'pressure':750.0,
'timezone':'Chile/Continental',
'tzname':'Chilean'},
'mgo' : {'name':'Mount Graham Observatory',
'longitude':'109:53:30',
'latitude':'32:42:06',
'elevation':3181,
'temperature':15.0,
'pressure':760.0,
'timezone':'US/Mountain',
'tzname':'Mountain'}
}
|
/*
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INK_STROKE_MODELER_INTERNAL_PREDICTION_KALMAN_FILTER_KALMAN_FILTER_H_
#define INK_STROKE_MODELER_INTERNAL_PREDICTION_KALMAN_FILTER_KALMAN_FILTER_H_
#include "ink_stroke_modeler/internal/prediction/kalman_filter/matrix.h"
namespace ink {
namespace stroke_model {
// Generates a state estimation based upon observations which can then be used
// to compute predicted values.
class KalmanFilter {
public:
KalmanFilter(const Matrix4& state_transition,
const Matrix4& process_noise_covariance,
const Vec4& measurement_vector,
double measurement_noise_variance, int min_stable_iteration);
// Get the estimation of current state.
const Vec4& GetStateEstimation() const { return state_estimation_; }
// Will return true only if the Kalman filter has seen enough data and is
// considered as stable.
bool Stable() const { return iter_num_ >= min_stable_iteration_; }
// Update the observation of the system.
void Update(double observation);
void Reset();
// Returns the number of times Update() has been called since the last time
// the KalmanFilter was reset.
int NumIterations() const { return iter_num_; }
private:
void Predict();
// Estimate of the latent state
// Symbol: X
// Dimension: state_vector_dim_
Vec4 state_estimation_;
// The covariance of the difference between prior predicted latent
// state and posterior estimated latent state (the so-called "innovation".
// Symbol: P
Matrix4 error_covariance_matrix_;
// For position, state transition matrix is derived from basic physics:
// new_x = x + v * dt + 1/2 * a * dt^2 + 1/6 * jerk * dt^3
// new_v = v + a * dt + 1/2 * jerk * dt^2
// ...
// Matrix that transmit current state to next state
// Symbol: F
Matrix4 state_transition_matrix_;
// Process_noise_covariance_matrix_ is a time-varying parameter that will be
// estimated as part of the Kalman filter process.
// Symbol: Q
Matrix4 process_noise_covariance_matrix_;
// Vector to transform estimate to measurement.
// Symbol: H
const Vec4 measurement_vector_{0, 0, 0, 0};
// measurement_noise_ is a time-varying parameter that will be estimated as
// part of the Kalman filter process.
// Symbol: R
double measurement_noise_variance_;
// The first iteration at which the Kalman filter is considered stable enough
// to make a good estimate of the state.
int min_stable_iteration_;
// Tracks the number of update iterations that have occurred.
int iter_num_;
};
} // namespace stroke_model
} // namespace ink
#endif // INK_STROKE_MODELER_INTERNAL_PREDICTION_KALMAN_FILTER_KALMAN_FILTER_H_
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TriggerUpdateParameters(Model):
"""The properties for updating build triggers.
:param source_triggers: The collection of triggers based on source code
repository.
:type source_triggers:
list[~azure.mgmt.containerregistry.v2018_09_01.models.SourceTriggerUpdateParameters]
:param base_image_trigger: The trigger based on base image dependencies.
:type base_image_trigger:
~azure.mgmt.containerregistry.v2018_09_01.models.BaseImageTriggerUpdateParameters
"""
_attribute_map = {
'source_triggers': {'key': 'sourceTriggers', 'type': '[SourceTriggerUpdateParameters]'},
'base_image_trigger': {'key': 'baseImageTrigger', 'type': 'BaseImageTriggerUpdateParameters'},
}
def __init__(self, **kwargs):
super(TriggerUpdateParameters, self).__init__(**kwargs)
self.source_triggers = kwargs.get('source_triggers', None)
self.base_image_trigger = kwargs.get('base_image_trigger', None)
|
//
// HHAppStoreReceiptSqliteTool.h
// BXlive
//
// Created by bxlive on 2018/10/22.
// Copyright © 2018年 cat. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "HHAppStoreReceipt.h"
@interface HHAppStoreReceiptSqliteTool : NSObject
+ (void)insertAppStoreReceipt:(HHAppStoreReceipt *)appStoreReceipt;
+ (void)deleteAppStoreReceipt:(HHAppStoreReceipt *)appStoreReceipt;
+ (void)queryAppStoreReceiptsBlock:(void(^)(NSArray *appStoreReceipts))block;
@end
|
//
// TOWebViewController.h
//
// Copyright 2013-2016 Timothy Oliver. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#import <UIKit/UIKit.h>
@interface TOWebViewController : UIViewController <UIWebViewDelegate>
/**
Initializes a new `TOWebViewController` object with the specified URL.
@param url The URL to the web page that the controller will initially display.
@return The newly initialized `TOWebViewController` object.
*/
- (instancetype)initWithURL:(NSURL *)url;
/**
Initializes a new `TOWebViewController` object with the specified URL string.
@param url The URL as a string, of the web page that the controller will initially display.
@return The newly initialized `TOWebViewController` object.
*/
- (instancetype)initWithURLString:(NSString *)urlString;
/**
Get/set the current URL being displayed. (Will automatically start loading)
*/
@property (nonatomic,strong) NSURL *url;
/**
Get/set the request
*/
@property (nonatomic,strong) NSMutableURLRequest *urlRequest;
/**
The web view used to display the HTML content. You can access it through this
read-only property if you need to anything specific, such as having it execute arbitrary JS code.
@warning Usage of the web view's delegate property is reserved by this view controller. Do not set it to another object.
*/
@property (nonatomic,readonly) UIWebView *webView;
/**
Shows a loading progress bar underneath the top navigation bar.
Default value is YES.
*/
@property (nonatomic,assign) BOOL showLoadingBar;
/**
Shows the URL of the web request currently being loaded, before the page's title attribute becomes available.
Default value is YES.
*/
@property (nonatomic,assign) BOOL showUrlWhileLoading;
/**
The tint colour of the page loading progress bar.
If not set on iOS 7 and above, the loading bar will defer to the app's global UIView tint color.
If not set on iOS 6 or below, it will default to the standard system blue tint color.
Default value is nil.
*/
@property (nonatomic,copy) UIColor *loadingBarTintColor;
/**
Hides all of the page navigation buttons, and on iPhone, hides the bottom toolbar.
Default value is NO.
*/
@property (nonatomic,assign) BOOL navigationButtonsHidden;
/**
An array of `UIBarButtonItem` objects that will be inserted alongside the default navigation
buttons.
These buttons will remain visible, even if `navigationButtonsHidden` is set to YES.
*/
@property (nonatomic,copy) NSArray *applicationBarButtonItems;
/**
Unlike `applicationBarButtonItems`, `UIBarButtonItem` objects placed set here
will ALWAYS remain on the left hand side of this controller's `UINavigationController`.
*/
@property (nonatomic, copy) NSArray *applicationLeftBarButtonItems;
/**
An array of `UIBarButtonItem` objects from `applicationBarButtonitems` that will
disabled until pages are completely loaded.
*/
@property (nonatomic,copy) NSArray *loadCompletedApplicationBarButtonItems;
/**
Shows the iOS 'Activty' button, which when tapped, presents a series of actions the user may
take, including copying the page URL, tweeting the URL, or switching to Safari or Chrome.
Default value is YES.
*/
@property (nonatomic,assign) BOOL showActionButton;
/**
Shows the Done button when presented modally. When tapped, it dismisses the view controller.
Default value is YES.
*/
@property (nonatomic,assign) BOOL showDoneButton;
/**
If desired, override the title of the system 'Done' button to this string.
Default value is nil.
*/
@property (nonatomic,copy) NSString *doneButtonTitle;
/**
When web pages are loaded, the view controller's title property will be set to the page's
HTML title attribute.
Default value is YES.
*/
@property (nonatomic,assign) BOOL showPageTitles;
/**
Disables the contextual popups that can appear when the user taps and holds on a page link.
Default value is NO.
*/
@property (nonatomic,assign) BOOL disableContextualPopupMenu;
/**
Hides the default system background behind the outer bounds of the webview, and replaces it with
a background color derived from the the page content currently being displayed by the web view.
Default value is NO.
*/
@property (nonatomic,assign) BOOL hideWebViewBoundaries;
/**
When the view controller is being presented as a modal popup, this block will be automatically performed
right after the view controller is dismissed.
*/
@property (nonatomic,copy) void (^modalCompletionHandler)(void);
/**
An optional block that when set, will have each incoming web load request forwarded to it, and can
determine whether to let them proceed or not.
*/
@property (nonatomic,copy) BOOL (^shouldStartLoadRequestHandler)(NSURLRequest *request, UIWebViewNavigationType navigationType);
/**
An optional block that when set, will be triggered each time the web view has finished a load operation.
*/
@property (nonatomic,copy) void (^didFinishLoadHandler)(UIWebView *webView);
/**
This can be used to override the default tint color of the navigation button icons.
This property is mainly for iOS 6 and lower. Where possible, you should use iOS 7's proper color styling
system instead.
*/
@property (nonatomic,strong) UIColor *buttonTintColor;
/**
On iOS 6 or below, this overrides the default opacity level of the bevel around the navigation buttons.
*/
@property (nonatomic,assign) CGFloat buttonBevelOpacity;
@end
|
import falcon
from peewee import IntegrityError
from aness.db import models
from aness.resources import BaseResource
from aness.schemas import UserSchema
from aness.helpers import token_required
class UserCollectionResource(BaseResource):
def on_get(self, req, resp):
# with self.db.atomic():
model_list = models.Users().select()
_schema = UserSchema(many=True)
unresult = _schema.dump(model_list)
resp.status = falcon.HTTP_200
resp.media = unresult.data
def on_post(self, req, resp):
_schema = UserSchema(many=False)
unresult = _schema.load(req.media)
user = unresult.data
try:
with self.db.atomic():
user.save()
except IntegrityError as e:
raise falcon.HTTPBadRequest('Integrity error: {}'.format(e))
resp.status = falcon.HTTP_201
resp.media = {'id': user.id}
class UserResource(BaseResource):
@token_required
def on_get(self, req, resp, id):
model_list = models.Users.get_or_none(models.Users.id == id)
_schema = UserSchema(many=False)
unresult = _schema.dump(model_list)
resp.status = falcon.HTTP_200
resp.media = unresult.data
def on_post(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = 'Server works!'
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFlexx(PythonPackage):
"""Write desktop and web apps in pure Python."""
homepage = "http://flexx.readthedocs.io"
pypi = "flexx/flexx-0.4.1.zip"
version('0.4.1', sha256='54be868f01d943018d0907821f2562f6eb31c568b3932abfd8518f75c29b8be1')
depends_on('py-setuptools', type='build')
depends_on('py-tornado', type=('build', 'run'))
|
// SPDX-License-Identifier: GPL-2.0+
//
// sy8106a-regulator.c - Regulator device driver for SY8106A
//
// Copyright (C) 2016 Ondřej Jirman <megous@megous.com>
// Copyright (c) 2017-2018 Icenowy Zheng <icenowy@aosc.io>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#define SY8106A_REG_VOUT1_SEL 0x01
#define SY8106A_REG_VOUT_COM 0x02
#define SY8106A_REG_VOUT1_SEL_MASK 0x7f
#define SY8106A_DISABLE_REG BIT(0)
/*
* The I2C controlled voltage will only work when this bit is set; otherwise
* it will behave like a fixed regulator.
*/
#define SY8106A_GO_BIT BIT(7)
struct sy8106a {
struct regulator_dev *rdev;
struct regmap *regmap;
u32 fixed_voltage;
};
static const struct regmap_config sy8106a_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static const struct regulator_ops sy8106a_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
/* Enabling/disabling the regulator is not yet implemented */
};
/* Default limits measured in millivolts */
#define SY8106A_MIN_MV 680
#define SY8106A_MAX_MV 1950
#define SY8106A_STEP_MV 10
static const struct regulator_desc sy8106a_reg = {
.name = "SY8106A",
.id = 0,
.ops = &sy8106a_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = ((SY8106A_MAX_MV - SY8106A_MIN_MV) / SY8106A_STEP_MV) + 1,
.min_uV = (SY8106A_MIN_MV * 1000),
.uV_step = (SY8106A_STEP_MV * 1000),
.vsel_reg = SY8106A_REG_VOUT1_SEL,
.vsel_mask = SY8106A_REG_VOUT1_SEL_MASK,
/*
* This ramp_delay is a conservative default value which works on
* H3/H5 boards VDD-CPUX situations.
*/
.ramp_delay = 200,
.owner = THIS_MODULE,
};
/*
* I2C driver interface functions
*/
static int sy8106a_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct sy8106a *chip;
struct device *dev = &i2c->dev;
struct regulator_dev *rdev = NULL;
struct regulator_config config = { };
unsigned int reg, vsel;
int error;
chip = devm_kzalloc(&i2c->dev, sizeof(struct sy8106a), GFP_KERNEL);
if (!chip)
return -ENOMEM;
error = of_property_read_u32(dev->of_node, "silergy,fixed-microvolt",
&chip->fixed_voltage);
if (error)
return error;
if (chip->fixed_voltage < SY8106A_MIN_MV * 1000 ||
chip->fixed_voltage > SY8106A_MAX_MV * 1000)
return -EINVAL;
chip->regmap = devm_regmap_init_i2c(i2c, &sy8106a_regmap_config);
if (IS_ERR(chip->regmap)) {
error = PTR_ERR(chip->regmap);
dev_err(dev, "Failed to allocate register map: %d\n", error);
return error;
}
config.dev = &i2c->dev;
config.regmap = chip->regmap;
config.driver_data = chip;
config.of_node = dev->of_node;
config.init_data = of_get_regulator_init_data(dev, dev->of_node,
&sy8106a_reg);
if (!config.init_data)
return -ENOMEM;
/* Ensure GO_BIT is enabled when probing */
error = regmap_read(chip->regmap, SY8106A_REG_VOUT1_SEL, ®);
if (error)
return error;
if (!(reg & SY8106A_GO_BIT)) {
vsel = (chip->fixed_voltage / 1000 - SY8106A_MIN_MV) /
SY8106A_STEP_MV;
error = regmap_write(chip->regmap, SY8106A_REG_VOUT1_SEL,
vsel | SY8106A_GO_BIT);
if (error)
return error;
}
/* Probe regulator */
rdev = devm_regulator_register(&i2c->dev, &sy8106a_reg, &config);
if (IS_ERR(rdev)) {
error = PTR_ERR(rdev);
dev_err(&i2c->dev, "Failed to register SY8106A regulator: %d\n", error);
return error;
}
chip->rdev = rdev;
i2c_set_clientdata(i2c, chip);
return 0;
}
static const struct of_device_id sy8106a_i2c_of_match[] = {
{ .compatible = "silergy,sy8106a" },
{ },
};
MODULE_DEVICE_TABLE(of, sy8106a_i2c_of_match);
static const struct i2c_device_id sy8106a_i2c_id[] = {
{ "sy8106a", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, sy8106a_i2c_id);
static struct i2c_driver sy8106a_regulator_driver = {
.driver = {
.name = "sy8106a",
.of_match_table = of_match_ptr(sy8106a_i2c_of_match),
},
.probe = sy8106a_i2c_probe,
.id_table = sy8106a_i2c_id,
};
module_i2c_driver(sy8106a_regulator_driver);
MODULE_AUTHOR("Ondřej Jirman <megous@megous.com>");
MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
MODULE_DESCRIPTION("Regulator device driver for Silergy SY8106A");
MODULE_LICENSE("GPL");
|
# -*- coding: utf-8 -*-
from hamcrest import *
from test.base import BaseTestCase
from test.fixtures.defaults import DEFAULT_UUID
from amplify.ext.phpfpm.objects.master import PHPFPMObject
__author__ = "Grant Hulegaard"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Grant Hulegaard"
__email__ = "grant.hulegaard@nginx.com"
class PHPFPMObjectTestCase(BaseTestCase):
"""
Test case for PHPFPMObject (master).
"""
def test_init(self):
phpfpm_obj = PHPFPMObject(
local_id=123,
pid=2,
cmd='php-fpm: master process (/etc/php5/fpm/php-fpm.conf)',
conf_path='/etc/php5/fpm/php-fpm.conf',
workers=[3, 4]
)
assert_that(phpfpm_obj, not_none())
assert_that(phpfpm_obj.local_id_args, equal_to(
('php-fpm: master process (/etc/php5/fpm/php-fpm.conf)', '/etc/php5/fpm/php-fpm.conf')
))
assert_that(phpfpm_obj.local_id, equal_to(123))
assert_that(phpfpm_obj.definition, equal_to(
{'local_id': 123, 'type': 'phpfpm', 'root_uuid': DEFAULT_UUID}
))
assert_that(phpfpm_obj.definition_hash, equal_to(
'32e8faf1747e8fa5778388b2db268941abeba7140cd83c52712ef97eb571e6d2'
))
assert_that(phpfpm_obj.collectors, has_length(2))
def test_parse(self):
"""This test is only possible because there is a working config in the test container"""
phpfpm_obj = PHPFPMObject(
local_id=123,
pid=2,
cmd='php-fpm: master process (/etc/php5/fpm/php-fpm.conf)',
conf_path='/etc/php5/fpm/php-fpm.conf',
workers=[3, 4]
)
assert_that(phpfpm_obj, not_none())
parsed_conf = phpfpm_obj.parse()
assert_that(parsed_conf, not_none())
assert_that(parsed_conf, equal_to(
{
'pools': [
{
'status_path': '/status',
'name': 'www',
'file': '/etc/php5/fpm/pool.d/www.conf',
'listen': '/run/php/php7.0-fpm.sock'
},
{
'status_path': '/status',
'name': 'www2',
'file': '/etc/php5/fpm/pool.d/www2.conf',
'listen': '127.0.0.1:51'
}
],
'include': ['/etc/php5/fpm/pool.d/*.conf'],
'file': '/etc/php5/fpm/php-fpm.conf'
}
))
def test_properties(self):
"""
This test is meant to test some properties that have had intermittent
user bug reports.
"""
phpfpm_obj = PHPFPMObject(
pid=2,
cmd='php-fpm: master process (/etc/php5/fpm/php-fpm.conf)',
conf_path='/etc/php5/fpm/php-fpm.conf',
workers=[3, 4]
)
assert_that(phpfpm_obj, not_none())
assert_that(phpfpm_obj.local_id_args, equal_to(
(
'php-fpm: master process (/etc/php5/fpm/php-fpm.conf)',
'/etc/php5/fpm/php-fpm.conf'
)
))
assert_that(phpfpm_obj.local_id, equal_to(
'e5942daaa5bf35af722bac3b9582b17c07515f0f77936fb5c7f771c7736cc157'
))
assert_that(phpfpm_obj.definition, equal_to(
{
'local_id': 'e5942daaa5bf35af722bac3b9582b17c07515f0f77936fb5c7f771c7736cc157',
'type': 'phpfpm',
'root_uuid': DEFAULT_UUID
}
))
assert_that(phpfpm_obj.definition_hash, equal_to(
'6ee51f6b649782e5dd04db052e7a018372645756378a7a3de3356c2ae6ff3bd7'
))
|
//= link_directory ../stylesheets/glowstick .css
|
/*!
* @license base62.js Copyright(c) 2012 sasa+1
* https://github.com/sasaplus1/base62.js
* Released under the MIT license.
*/
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(global = global || self, factory(global.base62 = {}));
}(this, function (exports) { 'use strict';
var basicTable = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ';
/**
* create index map
*
* @param table base62 string table
*/
function createIndexMap(table) {
if (table === void 0) { table = basicTable; }
return table
.split('')
.reduce(function (result, value, index) {
result[value] = index;
return result;
}, {});
}
var basicIndexMap = createIndexMap();
/**
* decode to decimal number from base62 string
*
* @param str base62 string
* @param [baseTable=basicTable] base62 table
* @throws {TypeError} str is not a string
* @throws {Error} str is unexpected format
* @throws {Error} baseTable is not 62 in length
*/
function decode(str, baseTable) {
if (baseTable === void 0) { baseTable = basicTable; }
if (typeof str !== 'string') {
throw new TypeError("str must be a string: " + str);
}
if (!/^-?[\dA-Za-z]+$/.test(str)) {
throw new Error("unexpected format: " + str);
}
if (baseTable.length !== 62) {
throw new Error('baseTable must be 62 in length');
}
var indexMap = baseTable === basicTable ? basicIndexMap : createIndexMap(baseTable);
var isNegative = str[0] === '-';
var numbers = (isNegative ? str.slice(1) : str).split('');
var numbersLength = numbers.length;
var result = numbers.reduce(function (result, n, index) {
return result + indexMap[n] * Math.pow(62, numbersLength - index - 1);
}, 0);
return isNegative ? -result : result;
}
/**
* encode to base62 string from number
*
* @param num integer
* @param [baseTable=basicTable] base62 table
* @throws {TypeError} num is not an Integer
* @throws {Error} baseTable is not 62 in length
*/
function encode(num, baseTable) {
if (baseTable === void 0) { baseTable = basicTable; }
if (!Number.isSafeInteger(num)) {
throw new TypeError("num is must be an integer: " + num);
}
if (baseTable.length !== 62) {
throw new Error('baseTable must be 62 in length');
}
if (num === 0) {
return '0';
}
var result = [];
var n = Math.abs(num);
while (n > 0) {
result.unshift(baseTable[n % 62]);
n = Math.floor(n / 62);
}
return num < 0 ? "-" + result.join('') : result.join('');
}
exports.decode = decode;
exports.encode = encode;
Object.defineProperty(exports, '__esModule', { value: true });
}));
//# sourceMappingURL=base62.legacy.js.map
|
"""Start Home Assistant."""
from __future__ import print_function
import argparse
import os
import platform
import subprocess
import sys
import threading
from typing import Optional, List
from homeassistant import monkey_patch
from homeassistant.const import (
__version__,
EVENT_HOMEASSISTANT_START,
REQUIRED_PYTHON_VER,
RESTART_EXIT_CODE,
)
def attempt_use_uvloop():
"""Attempt to use uvloop."""
import asyncio
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
def validate_python() -> None:
"""Validate that the right Python version is running."""
if sys.version_info[:3] < REQUIRED_PYTHON_VER:
print("Home Assistant requires at least Python {}.{}.{}".format(
*REQUIRED_PYTHON_VER))
sys.exit(1)
def ensure_config_path(config_dir: str) -> None:
"""Validate the configuration directory."""
import homeassistant.config as config_util
lib_dir = os.path.join(config_dir, 'deps')
# Test if configuration directory exists
if not os.path.isdir(config_dir):
if config_dir != config_util.get_default_config_dir():
print(('Fatal Error: Specified configuration directory does '
'not exist {} ').format(config_dir))
sys.exit(1)
try:
os.mkdir(config_dir)
except OSError:
print(('Fatal Error: Unable to create default configuration '
'directory {} ').format(config_dir))
sys.exit(1)
# Test if library directory exists
if not os.path.isdir(lib_dir):
try:
os.mkdir(lib_dir)
except OSError:
print(('Fatal Error: Unable to create library '
'directory {} ').format(lib_dir))
sys.exit(1)
def ensure_config_file(config_dir: str) -> str:
"""Ensure configuration file exists."""
import homeassistant.config as config_util
config_path = config_util.ensure_config_exists(config_dir)
if config_path is None:
print('Error getting configuration path')
sys.exit(1)
return config_path
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
import homeassistant.config as config_util
parser = argparse.ArgumentParser(
description="Home Assistant: Observe, Control, Automate.")
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument(
'-c', '--config',
metavar='path_to_config_dir',
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration")
parser.add_argument(
'--demo-mode',
action='store_true',
help='Start Home Assistant in demo mode')
parser.add_argument(
'--debug',
action='store_true',
help='Start Home Assistant in debug mode')
parser.add_argument(
'--open-ui',
action='store_true',
help='Open the webinterface in a browser')
parser.add_argument(
'--skip-pip',
action='store_true',
help='Skips pip install of required packages on startup')
parser.add_argument(
'-v', '--verbose',
action='store_true',
help="Enable verbose logging to file.")
parser.add_argument(
'--pid-file',
metavar='path_to_pid_file',
default=None,
help='Path to PID file useful for running as daemon')
parser.add_argument(
'--log-rotate-days',
type=int,
default=None,
help='Enables daily log rotation and keeps up to the specified days')
parser.add_argument(
'--log-file',
type=str,
default=None,
help='Log file to write to. If not set, CONFIG/home-assistant.log '
'is used')
parser.add_argument(
'--log-no-color',
action='store_true',
help="Disable color logs")
parser.add_argument(
'--runner',
action='store_true',
help='On restart exit with code {}'.format(RESTART_EXIT_CODE))
parser.add_argument(
'--script',
nargs=argparse.REMAINDER,
help='Run one of the embedded scripts')
if os.name == "posix":
parser.add_argument(
'--daemon',
action='store_true',
help='Run Home Assistant as daemon')
arguments = parser.parse_args()
if os.name != "posix" or arguments.debug or arguments.runner:
setattr(arguments, 'daemon', False)
return arguments
def daemonize() -> None:
"""Move current process to daemon process."""
# Create first fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# Decouple fork
os.setsid()
# Create second fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# redirect standard file descriptors to devnull
infd = open(os.devnull, 'r')
outfd = open(os.devnull, 'a+')
sys.stdout.flush()
sys.stderr.flush()
os.dup2(infd.fileno(), sys.stdin.fileno())
os.dup2(outfd.fileno(), sys.stdout.fileno())
os.dup2(outfd.fileno(), sys.stderr.fileno())
def check_pid(pid_file: str) -> None:
"""Check that Home Assistant is not already running."""
# Check pid file
try:
with open(pid_file, 'r') as file:
pid = int(file.readline())
except IOError:
# PID File does not exist
return
# If we just restarted, we just found our own pidfile.
if pid == os.getpid():
return
try:
os.kill(pid, 0)
except OSError:
# PID does not exist
return
print('Fatal Error: HomeAssistant is already running.')
sys.exit(1)
def write_pid(pid_file: str) -> None:
"""Create a PID File."""
pid = os.getpid()
try:
with open(pid_file, 'w') as file:
file.write(str(pid))
except IOError:
print('Fatal Error: Unable to write pid file {}'.format(pid_file))
sys.exit(1)
def closefds_osx(min_fd: int, max_fd: int) -> None:
"""Make sure file descriptors get closed when we restart.
We cannot call close on guarded fds, and we cannot easily test which fds
are guarded. But we can set the close-on-exec flag on everything we want to
get rid of.
"""
from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC
for _fd in range(min_fd, max_fd):
try:
val = fcntl(_fd, F_GETFD)
if not val & FD_CLOEXEC:
fcntl(_fd, F_SETFD, val | FD_CLOEXEC)
except IOError:
pass
def cmdline() -> List[str]:
"""Collect path and arguments to re-execute the current hass instance."""
if os.path.basename(sys.argv[0]) == '__main__.py':
modulepath = os.path.dirname(sys.argv[0])
os.environ['PYTHONPATH'] = os.path.dirname(modulepath)
return [sys.executable] + [arg for arg in sys.argv if
arg != '--daemon']
return [arg for arg in sys.argv if arg != '--daemon']
def setup_and_run_hass(config_dir: str,
args: argparse.Namespace) -> Optional[int]:
"""Set up HASS and run."""
from homeassistant import bootstrap
# Run a simple daemon runner process on Windows to handle restarts
if os.name == 'nt' and '--runner' not in sys.argv:
nt_args = cmdline() + ['--runner']
while True:
try:
subprocess.check_call(nt_args)
sys.exit(0)
except subprocess.CalledProcessError as exc:
if exc.returncode != RESTART_EXIT_CODE:
sys.exit(exc.returncode)
if args.demo_mode:
config = {
'frontend': {},
'demo': {}
}
hass = bootstrap.from_config_dict(
config, config_dir=config_dir, verbose=args.verbose,
skip_pip=args.skip_pip, log_rotate_days=args.log_rotate_days,
log_file=args.log_file, log_no_color=args.log_no_color)
else:
config_file = ensure_config_file(config_dir)
print('Config directory:', config_dir)
hass = bootstrap.from_config_file(
config_file, verbose=args.verbose, skip_pip=args.skip_pip,
log_rotate_days=args.log_rotate_days, log_file=args.log_file,
log_no_color=args.log_no_color)
if hass is None:
return None
if args.open_ui:
# Imported here to avoid importing asyncio before monkey patch
from homeassistant.util.async_ import run_callback_threadsafe
def open_browser(event):
"""Open the webinterface in a browser."""
if hass.config.api is not None:
import webbrowser
webbrowser.open(hass.config.api.base_url)
run_callback_threadsafe(
hass.loop,
hass.bus.async_listen_once,
EVENT_HOMEASSISTANT_START, open_browser
)
return hass.start()
def try_to_restart() -> None:
"""Attempt to clean up state and start a new Home Assistant instance."""
# Things should be mostly shut down already at this point, now just try
# to clean up things that may have been left behind.
sys.stderr.write('Home Assistant attempting to restart.\n')
# Count remaining threads, ideally there should only be one non-daemonized
# thread left (which is us). Nothing we really do with it, but it might be
# useful when debugging shutdown/restart issues.
try:
nthreads = sum(thread.is_alive() and not thread.daemon
for thread in threading.enumerate())
if nthreads > 1:
sys.stderr.write(
"Found {} non-daemonic threads.\n".format(nthreads))
# Somehow we sometimes seem to trigger an assertion in the python threading
# module. It seems we find threads that have no associated OS level thread
# which are not marked as stopped at the python level.
except AssertionError:
sys.stderr.write("Failed to count non-daemonic threads.\n")
# Try to not leave behind open filedescriptors with the emphasis on try.
try:
max_fd = os.sysconf("SC_OPEN_MAX")
except ValueError:
max_fd = 256
if platform.system() == 'Darwin':
closefds_osx(3, max_fd)
else:
os.closerange(3, max_fd)
# Now launch into a new instance of Home Assistant. If this fails we
# fall through and exit with error 100 (RESTART_EXIT_CODE) in which case
# systemd will restart us when RestartForceExitStatus=100 is set in the
# systemd.service file.
sys.stderr.write("Restarting Home Assistant\n")
args = cmdline()
os.execv(args[0], args)
def main() -> int:
"""Start Home Assistant."""
validate_python()
monkey_patch_needed = sys.version_info[:3] < (3, 6, 3)
if monkey_patch_needed and os.environ.get('HASS_NO_MONKEY') != '1':
if sys.version_info[:2] >= (3, 6):
monkey_patch.disable_c_asyncio()
monkey_patch.patch_weakref_tasks()
attempt_use_uvloop()
args = get_arguments()
if args.script is not None:
from homeassistant import scripts
return scripts.run(args.script)
config_dir = os.path.join(os.getcwd(), args.config)
ensure_config_path(config_dir)
# Daemon functions
if args.pid_file:
check_pid(args.pid_file)
if args.daemon:
daemonize()
if args.pid_file:
write_pid(args.pid_file)
exit_code = setup_and_run_hass(config_dir, args)
if exit_code == RESTART_EXIT_CODE and not args.runner:
try_to_restart()
return exit_code
if __name__ == "__main__":
sys.exit(main())
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _wepy = require('./../npm/wepy/lib/wepy.js');
var _wepy2 = _interopRequireDefault(_wepy);
var _http = require('./../mixins/http.js');
var _http2 = _interopRequireDefault(_http);
var _base = require('./../mixins/base.js');
var _base2 = _interopRequireDefault(_base);
var _cart = require('./../mixins/cart.js');
var _cart2 = _interopRequireDefault(_cart);
var _user = require('./../mixins/user.js');
var _user2 = _interopRequireDefault(_user);
var _swiper = require('./../components/swiper.js');
var _swiper2 = _interopRequireDefault(_swiper);
var _screen = require('./../components/screen.js');
var _screen2 = _interopRequireDefault(_screen);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var Detail = function (_wepy$page) {
_inherits(Detail, _wepy$page);
function Detail() {
var _ref;
var _temp, _this, _ret;
_classCallCheck(this, Detail);
for (var _len = arguments.length, args = Array(_len), _key = 0; _key < _len; _key++) {
args[_key] = arguments[_key];
}
return _ret = (_temp = (_this = _possibleConstructorReturn(this, (_ref = Detail.__proto__ || Object.getPrototypeOf(Detail)).call.apply(_ref, [this].concat(args))), _this), _this.config = {
navigationBarTitleText: '详情'
}, _this.$repeat = {}, _this.$props = { "Swiper": { "xmlns:v-bind": "", "v-bind:list.sync": "swipers", "height": "280" }, "Screen": { "class": "fixed-bottom" } }, _this.$events = {}, _this.components = {
Swiper: _swiper2.default,
Screen: _screen2.default
}, _this.mixins = [_base2.default, _http2.default, _cart2.default, _user2.default], _this.data = {
isEnabled: true,
img_banner: '/images/swiper.png',
icon_star: '/images/icon/icon-star@2x.png',
icon_star_active: '/images/icon/icon-star-active@2x.png',
icon_shelf: '/images/tabbars/icon-shelf@2x.png',
swipers: [{ image: 'https://233.calamus.xyz/2.jpg', url: '' }, { image: 'https://233.calamus.xyz/3.jpg', url: '' }],
anime: {
'name': '命运石之门',
'image': ['http://233.calamus.xyz/image/233/%E5%91%BD%E8%BF%90%E7%9F%B3%E4%B9%8B%E9%97%A80.jpeg', '', ''],
'description': '一切都是命运石之门的选择,石头门,一切都是命运石之门的选择,石头门一切都是命运石之门的选择,石头门一切都是命运石之门的选择,石头门一切都是命运石之门的选择,石头门一切都是命运石之门的选择,石头门一切都是命运石之门的选择,石头门一切都是命运石之门的选择,石头门一切都是命运石之门的选择,石头门一切都是命运石之门的选择,石头门',
'content': [{ 'type': 'text', 'value': '' }, { 'type': 'text', 'value': '' }, { 'type': 'image', 'value': 'http://233.calamus.xyz/image/233/%E5%91%BD%E8%BF%90%E7%9F%B3%E4%B9%8B%E9%97%A80.jpeg' }],
'tag': ['致郁', '时间穿越', '神作'],
'stars': '5',
'createTime': '2013-04-20',
'flag': '1',
'author': '',
'type': 'game',
'company': '',
'cvs': '宫野真守,花泽香菜',
'id': '',
'download': ''
}
}, _this.computed = {}, _this.methods = {
addAnime: function addAnime() {
console.log("isEnabled", this.isEnabled);
this.isEnabled = this.isEnabled == false ? true : false;
}
}, _this.events = {}, _temp), _possibleConstructorReturn(_this, _ret);
}
_createClass(Detail, [{
key: 'onLoad',
value: function onLoad() {}
}]);
return Detail;
}(_wepy2.default.page);
Page(require('./../npm/wepy/lib/wepy.js').default.$createPage(Detail , 'pages/detail'));
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImRldGFpbC5qcyJdLCJuYW1lcyI6WyJEZXRhaWwiLCJjb25maWciLCJuYXZpZ2F0aW9uQmFyVGl0bGVUZXh0IiwiJHJlcGVhdCIsIiRwcm9wcyIsIiRldmVudHMiLCJjb21wb25lbnRzIiwiU3dpcGVyIiwiU2NyZWVuIiwibWl4aW5zIiwiYmFzZSIsImh0dHAiLCJjYXJ0IiwidXNlciIsImRhdGEiLCJpc0VuYWJsZWQiLCJpbWdfYmFubmVyIiwiaWNvbl9zdGFyIiwiaWNvbl9zdGFyX2FjdGl2ZSIsImljb25fc2hlbGYiLCJzd2lwZXJzIiwiaW1hZ2UiLCJ1cmwiLCJhbmltZSIsImNvbXB1dGVkIiwibWV0aG9kcyIsImFkZEFuaW1lIiwiY29uc29sZSIsImxvZyIsImV2ZW50cyIsIndlcHkiLCJwYWdlIl0sIm1hcHBpbmdzIjoiOzs7Ozs7Ozs7QUFDRTs7OztBQUNBOzs7O0FBQ0E7Ozs7QUFDQTs7OztBQUNBOzs7O0FBQ0E7Ozs7QUFDQTs7Ozs7Ozs7Ozs7O0lBR3FCQSxNOzs7Ozs7Ozs7Ozs7OztzTEFDbkJDLE0sR0FBUztBQUNQQyw4QkFBd0I7QUFEakIsSyxRQUdWQyxPLEdBQVUsRSxRQUNiQyxNLEdBQVMsRUFBQyxVQUFTLEVBQUMsZ0JBQWUsRUFBaEIsRUFBbUIsb0JBQW1CLFNBQXRDLEVBQWdELFVBQVMsS0FBekQsRUFBVixFQUEwRSxVQUFTLEVBQUMsU0FBUSxjQUFULEVBQW5GLEUsUUFDVEMsTyxHQUFVLEUsUUFDVEMsVSxHQUFhO0FBQ1JDLGNBQU9BLGdCQURDO0FBRVJDLGNBQU9BO0FBRkMsSyxRQUtWQyxNLEdBQVMsQ0FBQ0MsY0FBRCxFQUFPQyxjQUFQLEVBQWFDLGNBQWIsRUFBbUJDLGNBQW5CLEMsUUFFVEMsSSxHQUFPO0FBQ0xDLGlCQUFVLElBREw7QUFFTEMsa0JBQVksb0JBRlA7QUFHTEMsaUJBQVcsK0JBSE47QUFJTEMsd0JBQWtCLHNDQUpiO0FBS0xDLGtCQUFZLG1DQUxQO0FBTUxDLGVBQVMsQ0FDRCxFQUFDQyxPQUFPLCtCQUFSLEVBQXlDQyxLQUFLLEVBQTlDLEVBREMsRUFFRCxFQUFDRCxPQUFPLCtCQUFSLEVBQXlDQyxLQUFLLEVBQTlDLEVBRkMsQ0FOSjtBQVVMQyxhQUFNO0FBQ0EsZ0JBQU8sT0FEUDtBQUVBLGlCQUFRLENBQUMsc0ZBQUQsRUFBd0YsRUFBeEYsRUFBMkYsRUFBM0YsQ0FGUjtBQUdBLHVCQUFjLG1LQUhkO0FBSUEsbUJBQVUsQ0FDUixFQUFDLFFBQU8sTUFBUixFQUFlLFNBQVEsRUFBdkIsRUFEUSxFQUVWLEVBQUMsUUFBTyxNQUFSLEVBQWUsU0FBUSxFQUF2QixFQUZVLEVBR1YsRUFBQyxRQUFPLE9BQVIsRUFBZ0IsU0FBUSxzRkFBeEIsRUFIVSxDQUpWO0FBU0EsZUFBTSxDQUFDLElBQUQsRUFBTSxNQUFOLEVBQWEsSUFBYixDQVROO0FBVUEsaUJBQVEsR0FWUjtBQVdBLHNCQUFhLFlBWGI7QUFZQSxnQkFBTyxHQVpQO0FBYUEsa0JBQVMsRUFiVDtBQWNBLGdCQUFPLE1BZFA7QUFlQSxtQkFBVSxFQWZWO0FBZ0JBLGVBQU0sV0FoQk47QUFpQkEsY0FBSyxFQWpCTDtBQWtCQSxvQkFBVztBQWxCWDtBQVZELEssUUFnQ1BDLFEsR0FBVyxFLFFBSVhDLE8sR0FBVTtBQUNSQyxjQURRLHNCQUNFO0FBQ1JDLGdCQUFRQyxHQUFSLENBQVksV0FBWixFQUF3QixLQUFLYixTQUE3QjtBQUNBLGFBQUtBLFNBQUwsR0FBaUIsS0FBS0EsU0FBTCxJQUFrQixLQUFsQixHQUEwQixJQUExQixHQUFnQyxLQUFqRDtBQUNEO0FBSk8sSyxRQU9WYyxNLEdBQVMsRTs7Ozs7NkJBSUEsQ0FFUjs7OztFQS9EaUNDLGVBQUtDLEk7O2tCQUFwQi9CLE0iLCJmaWxlIjoiZGV0YWlsLmpzIiwic291cmNlc0NvbnRlbnQiOlsiXG4gIGltcG9ydCB3ZXB5IGZyb20gJ3dlcHknXG4gIGltcG9ydCBodHRwIGZyb20gJy4uL21peGlucy9odHRwJ1xuICBpbXBvcnQgYmFzZSBmcm9tICcuLi9taXhpbnMvYmFzZSdcbiAgaW1wb3J0IGNhcnQgZnJvbSAnLi4vbWl4aW5zL2NhcnQnXG4gIGltcG9ydCB1c2VyIGZyb20gJy4uL21peGlucy91c2VyJ1xuICBpbXBvcnQgU3dpcGVyIGZyb20gJy4uL2NvbXBvbmVudHMvc3dpcGVyJ1xuICBpbXBvcnQgU2NyZWVuIGZyb20gJy4uL2NvbXBvbmVudHMvc2NyZWVuJ1xuXG5cbiAgZXhwb3J0IGRlZmF1bHQgY2xhc3MgRGV0YWlsIGV4dGVuZHMgd2VweS5wYWdlIHtcbiAgICBjb25maWcgPSB7XG4gICAgICBuYXZpZ2F0aW9uQmFyVGl0bGVUZXh0OiAn6K+m5oOFJ1xuICAgIH1cbiAgICRyZXBlYXQgPSB7fTtcclxuJHByb3BzID0ge1wiU3dpcGVyXCI6e1wieG1sbnM6di1iaW5kXCI6XCJcIixcInYtYmluZDpsaXN0LnN5bmNcIjpcInN3aXBlcnNcIixcImhlaWdodFwiOlwiMjgwXCJ9LFwiU2NyZWVuXCI6e1wiY2xhc3NcIjpcImZpeGVkLWJvdHRvbVwifX07XHJcbiRldmVudHMgPSB7fTtcclxuIGNvbXBvbmVudHMgPSB7XG4gICAgICBTd2lwZXI6U3dpcGVyLFxuICAgICAgU2NyZWVuOlNjcmVlblxuICAgIH1cblxuICAgIG1peGlucyA9IFtiYXNlLCBodHRwLCBjYXJ0LCB1c2VyXVxuXG4gICAgZGF0YSA9IHtcbiAgICAgIGlzRW5hYmxlZDp0cnVlLFxuICAgICAgaW1nX2Jhbm5lcjogJy9pbWFnZXMvc3dpcGVyLnBuZycsXG4gICAgICBpY29uX3N0YXI6ICcvaW1hZ2VzL2ljb24vaWNvbi1zdGFyQDJ4LnBuZycsXG4gICAgICBpY29uX3N0YXJfYWN0aXZlOiAnL2ltYWdlcy9pY29uL2ljb24tc3Rhci1hY3RpdmVAMngucG5nJyxcbiAgICAgIGljb25fc2hlbGY6ICcvaW1hZ2VzL3RhYmJhcnMvaWNvbi1zaGVsZkAyeC5wbmcnLFxuICAgICAgc3dpcGVyczogW1xuICAgICAgICAgICAgICB7aW1hZ2U6ICdodHRwczovLzIzMy5jYWxhbXVzLnh5ei8yLmpwZycsIHVybDogJyd9LFxuICAgICAgICAgICAgICB7aW1hZ2U6ICdodHRwczovLzIzMy5jYWxhbXVzLnh5ei8zLmpwZycsIHVybDogJyd9XG4gICAgICAgICAgICBdLFxuICAgICAgYW5pbWU6e1xuICAgICAgICAgICAgJ25hbWUnOiflkb3ov5Dnn7PkuYvpl6gnLFxuICAgICAgICAgICAgJ2ltYWdlJzpbJ2h0dHA6Ly8yMzMuY2FsYW11cy54eXovaW1hZ2UvMjMzLyVFNSU5MSVCRCVFOCVCRiU5MCVFNyU5RiVCMyVFNCVCOSU4QiVFOSU5NyVBODAuanBlZycsJycsJyddLFxuICAgICAgICAgICAgJ2Rlc2NyaXB0aW9uJzon5LiA5YiH6YO95piv5ZG96L+Q55+z5LmL6Zeo55qE6YCJ5oupLOefs+WktOmXqO+8jOS4gOWIh+mDveaYr+WRvei/kOefs+S5i+mXqOeahOmAieaLqSznn7PlpLTpl6jkuIDliIfpg73mmK/lkb3ov5Dnn7PkuYvpl6jnmoTpgInmi6ks55+z5aS06Zeo5LiA5YiH6YO95piv5ZG96L+Q55+z5LmL6Zeo55qE6YCJ5oupLOefs+WktOmXqOS4gOWIh+mDveaYr+WRvei/kOefs+S5i+mXqOeahOmAieaLqSznn7PlpLTpl6jkuIDliIfpg73mmK/lkb3ov5Dnn7PkuYvpl6jnmoTpgInmi6ks55+z5aS06Zeo5LiA5YiH6YO95piv5ZG96L+Q55+z5LmL6Zeo55qE6YCJ5oupLOefs+WktOmXqOS4gOWIh+mDveaYr+WRvei/kOefs+S5i+mXqOeahOmAieaLqSznn7PlpLTpl6jkuIDliIfpg73mmK/lkb3ov5Dnn7PkuYvpl6jnmoTpgInmi6ks55+z5aS06Zeo5LiA5YiH6YO95piv5ZG96L+Q55+z5LmL6Zeo55qE6YCJ5oupLOefs+WktOmXqCcsXG4gICAgICAgICAgICAnY29udGVudCc6W1xuICAgICAgICAgICAgICB7J3R5cGUnOid0ZXh0JywndmFsdWUnOicnfSxcbiAgICAgICAgICAgIHsndHlwZSc6J3RleHQnLCd2YWx1ZSc6Jyd9LFxuICAgICAgICAgICAgeyd0eXBlJzonaW1hZ2UnLCd2YWx1ZSc6J2h0dHA6Ly8yMzMuY2FsYW11cy54eXovaW1hZ2UvMjMzLyVFNSU5MSVCRCVFOCVCRiU5MCVFNyU5RiVCMyVFNCVCOSU4QiVFOSU5NyVBODAuanBlZyd9XG4gICAgICAgICAgICBdLFxuICAgICAgICAgICAgJ3RhZyc6Wyfoh7Tpg4EnLCfml7bpl7Tnqb/otoonLCfnpZ7kvZwnXSxcbiAgICAgICAgICAgICdzdGFycyc6JzUnLFxuICAgICAgICAgICAgJ2NyZWF0ZVRpbWUnOicyMDEzLTA0LTIwJyxcbiAgICAgICAgICAgICdmbGFnJzonMScsXG4gICAgICAgICAgICAnYXV0aG9yJzonJyxcbiAgICAgICAgICAgICd0eXBlJzonZ2FtZScsXG4gICAgICAgICAgICAnY29tcGFueSc6JycsXG4gICAgICAgICAgICAnY3ZzJzon5a6r6YeO55yf5a6I77yM6Iqx5rO96aaZ6I+cJyxcbiAgICAgICAgICAgICdpZCc6JycsXG4gICAgICAgICAgICAnZG93bmxvYWQnOicnXG4gICAgICAgICAgfVxuICAgIH1cblxuICAgIGNvbXB1dGVkID0ge1xuXG4gICAgfVxuXG4gICAgbWV0aG9kcyA9IHtcbiAgICAgIGFkZEFuaW1lKCl7XG4gICAgICAgIGNvbnNvbGUubG9nKFwiaXNFbmFibGVkXCIsdGhpcy5pc0VuYWJsZWQpXG4gICAgICAgIHRoaXMuaXNFbmFibGVkID0gdGhpcy5pc0VuYWJsZWQgPT0gZmFsc2UgPyB0cnVlIDpmYWxzZSA7XG4gICAgICB9XG4gICAgfVxuXG4gICAgZXZlbnRzID0ge1xuXG4gICAgfVxuXG4gICAgb25Mb2FkKCkge1xuXG4gICAgfVxuICB9XG4iXX0=
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Extracts annotations from a PDF file in markdown format for use in reviewing.
"""
import argparse
import io
import sys
import os
import textwrap
from collections import defaultdict
from PyPDF2 import PdfFileReader
import pdfminer.pdftypes as pdftypes
import pdfminer.settings
import pdfminer.utils
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams, LTContainer, LTAnno, LTChar, LTTextBox
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from pdfminer.psparser import PSLiteralTable, PSLiteral
pdfminer.settings.STRICT = False
SUBSTITUTIONS = {
u'ff': 'ff',
u'fi': 'fi',
u'fl': 'fl',
u'ffi': 'ffi',
u'ffl': 'ffl',
u'‘': "'",
u'’': "'",
u'“': '"',
u'”': '"',
u'…': '...',
}
ANNOT_SUBTYPES = frozenset(
{'Text', 'Highlight', 'Squiggly', 'StrikeOut', 'Underline'})
COLUMNS_PER_PAGE = 2 # default only, changed via a command-line parameter
DEBUG_BOXHIT = False
def boxhit(item, box):
(x0, y0, x1, y1) = box
assert item.x0 <= item.x1 and item.y0 <= item.y1
assert x0 <= x1 and y0 <= y1
# does most of the item area overlap the box?
# http://math.stackexchange.com/questions/99565/simplest-way-to-calculate-the-intersect-area-of-two-rectangles
x_overlap = max(0, min(item.x1, x1) - max(item.x0, x0))
y_overlap = max(0, min(item.y1, y1) - max(item.y0, y0))
overlap_area = x_overlap * y_overlap
item_area = (item.x1 - item.x0) * (item.y1 - item.y0)
assert overlap_area <= item_area
if DEBUG_BOXHIT and overlap_area != 0:
print(
"'%s' %f-%f,%f-%f in %f-%f,%f-%f %2.0f%%" %
(item.get_text(),
item.x0,
item.x1,
item.y0,
item.y1,
x0,
x1,
y0,
y1,
100 *
overlap_area /
item_area))
if item_area == 0:
return False
else:
return overlap_area >= 0.5 * item_area
class RectExtractor(TextConverter):
def __init__(self, rsrcmgr, codec='utf-8', pageno=1, laparams=None):
dummy = io.StringIO()
TextConverter.__init__(
self,
rsrcmgr,
outfp=dummy,
codec=codec,
pageno=pageno,
laparams=laparams)
self.annots = set()
def setannots(self, annots):
self.annots = {a for a in annots if a.boxes}
# main callback from parent PDFConverter
def receive_layout(self, ltpage):
self._lasthit = frozenset()
self._curline = set()
self.render(ltpage)
def testboxes(self, item):
hits = frozenset({a for a in self.annots if any(
{boxhit(item, b) for b in a.boxes})})
self._lasthit = hits
self._curline.update(hits)
return hits
# "broadcast" newlines to _all_ annotations that received any text on the
# current line, in case they see more text on the next line, even if the
# most recent character was not covered.
def capture_newline(self):
for a in self._curline:
a.capture('\n')
self._curline = set()
def render(self, item):
# If it's a container, recurse on nested items.
if isinstance(item, LTContainer):
for child in item:
self.render(child)
# Text boxes are a subclass of container, and somehow encode newlines
# (this weird logic is derived from pdfminer.converter.TextConverter)
if isinstance(item, LTTextBox):
self.testboxes(item)
self.capture_newline()
# Each character is represented by one LTChar, and we must handle
# individual characters (not higher-level objects like LTTextLine)
# so that we can capture only those covered by the annotation boxes.
elif isinstance(item, LTChar):
for a in self.testboxes(item):
a.capture(item.get_text())
# Annotations capture whitespace not explicitly encoded in
# the text. They don't have an (X,Y) position, so we need some
# heuristics to match them to the nearby annotations.
elif isinstance(item, LTAnno):
text = item.get_text()
if text == '\n':
self.capture_newline()
else:
for a in self._lasthit:
a.capture(text)
class Page:
def __init__(self, pageno, mediabox):
self.pageno = pageno
self.mediabox = mediabox
self.annots = []
def __eq__(self, other):
return self.pageno == other.pageno
def __lt__(self, other):
return self.pageno < other.pageno
class Annotation:
def __init__(
self,
page,
tagname,
coords=None,
rect=None,
contents=None,
author=None):
self.page = page
self.tagname = tagname
if contents == '':
self.contents = None
else:
self.contents = contents
self.rect = rect
self.author = author
self.text = ''
if coords is None:
self.boxes = None
else:
assert len(coords) % 8 == 0
self.boxes = []
while coords != []:
(x0, y0, x1, y1, x2, y2, x3, y3) = coords[:8]
coords = coords[8:]
xvals = [x0, x1, x2, x3]
yvals = [y0, y1, y2, y3]
box = (min(xvals), min(yvals), max(xvals), max(yvals))
self.boxes.append(box)
def capture(self, text):
if text == '\n':
# Kludge for latex: elide hyphens
if self.text.endswith('-'):
self.text = self.text[:-1]
# Join lines, treating newlines as space, while ignoring successive
# newlines. This makes it easier for the for the renderer to
# "broadcast" LTAnno newlines to active annotations regardless of
# box hits. (Detecting paragraph breaks is tricky anyway, and left
# for future future work!)
elif not self.text.endswith(' '):
self.text += ' '
else:
self.text += text
def gettext(self):
if self.boxes:
if self.text:
# replace tex ligatures (and other common odd characters)
return ''.join([SUBSTITUTIONS.get(c, c)
for c in self.text.strip()])
else:
# something's strange -- we have boxes but no text for them
return "(XXX: missing text!)"
else:
return None
def getstartpos(self):
if self.rect:
(x0, y0, x1, y1) = self.rect
elif self.boxes:
(x0, y0, x1, y1) = self.boxes[0]
else:
return None
# XXX: assume left-to-right top-to-bottom text
return Pos(self.page, min(x0, x1), max(y0, y1))
# custom < operator for sorting
def __lt__(self, other):
return self.getstartpos() < other.getstartpos()
class Pos:
def __init__(self, page, x, y):
self.page = page
self.x = x
self.y = y
def __lt__(self, other):
if self.page < other.page:
return True
elif self.page == other.page:
assert self.page is other.page
# XXX: assume left-to-right top-to-bottom documents
(sx, sy) = self.normalise_to_mediabox()
(ox, oy) = other.normalise_to_mediabox()
(x0, y0, x1, y1) = self.page.mediabox
colwidth = (x1 - x0) / COLUMNS_PER_PAGE
self_col = (sx - x0) // colwidth
other_col = (ox - x0) // colwidth
return self_col < other_col or (self_col == other_col and sy > oy)
else:
return False
def normalise_to_mediabox(self):
x, y = self.x, self.y
(x0, y0, x1, y1) = self.page.mediabox
if x < x0:
x = x0
elif x > x1:
x = x1
if y < y0:
y = y0
elif y > y1:
y = y1
return (x, y)
def getannots(pdfannots, page):
annots = []
for pa in pdfannots:
subtype = pa.get('Subtype')
if subtype is not None and subtype.name not in ANNOT_SUBTYPES:
continue
contents = pa.get('Contents')
if contents is not None:
# decode as string, normalise line endings, replace special
# characters
contents = pdfminer.utils.decode_text(contents)
contents = contents.replace('\r\n', '\n').replace('\r', '\n')
contents = ''.join([SUBSTITUTIONS.get(c, c) for c in contents])
coords = pdftypes.resolve1(pa.get('QuadPoints'))
rect = pdftypes.resolve1(pa.get('Rect'))
author = pdftypes.resolve1(pa.get('T'))
if author is not None:
author = pdfminer.utils.decode_text(author)
a = Annotation(
page,
subtype.name,
coords,
rect,
contents,
author=author)
annots.append(a)
return annots
class OrgPrinter:
"""
OrgPrinter is used to extract annotations in org-mode format
"""
def __init__(self, outlines, wrapcol, outfile):
"""
outlines List of outlines
wrapcol If not None, specifies the column at which output is word-wrapped
"""
self.outlines = outlines
self.wrapcol = wrapcol
self.outfile = outfile
self.annot_nits = frozenset({'Squiggly', 'StrikeOut', 'Underline'})
self.INDENT = " "
if wrapcol:
self.text_wrapper = textwrap.TextWrapper(
width=wrapcol,
initial_indent=self.INDENT * 2,
subsequent_indent=self.INDENT * 2
)
self.indent_wrapper = textwrap.TextWrapper(
width=wrapcol,
initial_indent=self.INDENT,
subsequent_indent=self.INDENT
)
def nearest_outline(self, pos):
prev = None
for o in self.outlines:
if o.pos < pos:
prev = o
else:
break
return prev
def format_pos(self, annot):
apos = annot.getstartpos()
o = self.nearest_outline(apos) if apos else None
return f"** {annot.page.pageno + 1}" + (f" {o.title}" if o else "")
def format_bullet(self, paras, quotepos=None, quotelen=None):
if quotepos:
assert quotepos > 0 and quotelen > 0 and quotepos + \
quotelen <= len(paras)
# emit the first paragraph with the bullet
# if self.wrapcol:
# ret = self.text_wrapper.fill(paras[0])
# else:
# ret = self.INDENT + paras[0]
ret = ""
page_number = paras[0]
# emit subsequent paragraphs
npara = 1
for para in paras[1:]:
# are we in a blockquote?
inquote = quotepos and npara >= quotepos and npara < quotepos + quotelen
# emit a paragraph break
# if we're going straight to a quote, we don't need an extra
# newline
ret = ret + ('\n' if npara == quotepos else '\n\n')
if self.wrapcol:
tw = self.text_wrapper if inquote else self.indent_wrapper
ret = ret + tw.fill(para)
else:
# indent = self.HEADER_INDENT * 2 + self.INDENT if inquote else self.INDENT
indent = self.INDENT * 2
ret = ret + indent + para
npara += 1
return page_number, ret
def format_annot(self, annot, extra=None):
# capture item text and contents (i.e. the comment), and split each
# into paragraphs
rawtext = annot.gettext()
text = [l for l in rawtext.strip().splitlines()
if l] if rawtext else []
comment = [l for l in annot.contents.splitlines()
if l] if annot.contents else []
# we are either printing: item text and item contents, or one of the two
# if we see an annotation with neither, something has gone wrong
assert text or comment
# compute the formatted position (and extra bit if needed) as a label
label = self.format_pos(
annot) + (f":PROPERTIES:\n:Extra: {extra}\n:END:\n" if extra else "")
# If we have short (single-paragraph, few words) text with a short or no
# comment, and the text contains no embedded full stops or quotes, then
# we'll just put quotation marks around the text and merge the two into
# a single paragraph.
# if (text and len(text) == 1 and len(text[0].split()) <= 10 # words
# and all([x not in text[0] for x in ['"', '. ']])
# and (not comment or len(comment) == 1)):
# msg = label + ' "' + text[0] + '"'
# if comment:
# msg = msg + ' -- ' + comment[0]
# return self.format_bullet([msg]) + "\n"
# If there is no text and a single-paragraph comment, it also goes on
# one line.
# if comment and not text and len(comment) == 1:
# msg = label + " " + comment[0]
# return self.format_bullet([msg]) + "\n"
# Otherwise, text (if any) turns into a blockquote, and the comment (if
# any) into subsequent paragraphs.
# else:
msgparas = [label] + text + comment
quotepos = 1 if text else None
quotelen = len(text) if text else None
return self.format_bullet(msgparas, quotepos, quotelen)
def printall(self, annots):
for a in annots:
print(self.format_annot(a, a.tagname), file=self.outfile)
def printall_grouped(self, sections, annots):
"""
sections controls the order of sections output
e.g.: ["highlights", "comments", "nits"]
"""
self._printheader_called = False
def printheader(name):
# emit blank separator line if needed
if self._printheader_called:
print("", file=self.outfile)
else:
self._printheader_called = True
print(f"* {name}\n", file=self.outfile)
highlights = [a for a in annots if a.tagname ==
'Highlight' and a.contents is None]
comments = [
a for a in annots if a.tagname not in self.annot_nits and a.contents]
nits = [a for a in annots if a.tagname in self.annot_nits]
page_highlights, page_comments, page_nits = defaultdict(
str), defaultdict(str), defaultdict(str)
for section_name in sections:
if highlights and section_name == 'highlights':
printheader("Highlights")
for a in highlights:
ph = self.format_annot(a)
page_highlights[ph[0]] += ph[1] + '\n'
for k, v in page_highlights.items():
print(k + v, file=self.outfile)
if comments and section_name == 'comments':
printheader("Detailed comments")
for a in comments:
ps = self.format_annot(a)
page_comments[ps[0]] = ps[1]
for k, v in page_comments.items():
print(k + v, file=self.outfile)
if nits and section_name == 'nits':
printheader("Nits")
for a in nits:
if a.tagname == 'StrikeOut':
extra = "delete"
else:
extra = None
pn = self.format_annot(a, extra)
page_nits[pn[0]] = pn[1]
for k, v in page_nits.items():
print(k + v, file=self.outfile)
def resolve_dest(doc, dest):
if isinstance(dest, bytes):
dest = pdftypes.resolve1(doc.get_dest(dest))
elif isinstance(dest, PSLiteral):
dest = pdftypes.resolve1(doc.get_dest(dest.name))
if isinstance(dest, dict):
dest = dest['D']
return dest
class Outline:
def __init__(self, title, dest, pos):
self.title = title
self.dest = dest
self.pos = pos
def get_outlines(doc, pageslist, pagesdict):
result = []
for (_, title, destname, actionref, _) in doc.get_outlines():
if destname is None and actionref:
action = pdftypes.resolve1(actionref)
if isinstance(action, dict):
subtype = action.get('S')
if subtype is PSLiteralTable.intern('GoTo'):
destname = action.get('D')
if destname is None:
continue
dest = resolve_dest(doc, destname)
# consider targets of the form [page /XYZ left top zoom]
if dest[1] is PSLiteralTable.intern('XYZ'):
(pageref, _, targetx, targety) = dest[:4]
if isinstance(pageref, int):
page = pageslist[pageref]
elif isinstance(pageref, pdftypes.PDFObjRef):
page = pagesdict[pageref.objid]
else:
sys.stderr.write(
'Warning: unsupported pageref in outline: %s\n' %
pageref)
page = None
if page:
pos = Pos(page, targetx, targety)
result.append(Outline(title, destname, pos))
return result
def pdftitle(fh):
pdf_reader = PdfFileReader(fh)
docinfo = pdf_reader.getDocumentInfo()
return docinfo.title if (docinfo and docinfo.title) else ''
def process_file(fh, emit_progress):
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = RectExtractor(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
parser = PDFParser(fh)
doc = PDFDocument(parser)
pageslist = [] # pages in page order
pagesdict = {} # map from PDF page object ID to Page object
allannots = []
for (pageno, pdfpage) in enumerate(PDFPage.create_pages(doc)):
page = Page(pageno, pdfpage.mediabox)
pageslist.append(page)
pagesdict[pdfpage.pageid] = page
if pdfpage.annots:
# emit progress indicator
if emit_progress:
sys.stderr.write(
(" " if pageno > 0 else "") + "%d" %
(pageno + 1))
sys.stderr.flush()
pdfannots = []
for a in pdftypes.resolve1(pdfpage.annots):
if isinstance(a, pdftypes.PDFObjRef):
pdfannots.append(a.resolve())
else:
sys.stderr.write('Warning: unknown annotation: %s\n' % a)
page.annots = getannots(pdfannots, page)
page.annots.sort()
device.setannots(page.annots)
interpreter.process_page(pdfpage)
allannots.extend(page.annots)
if emit_progress:
sys.stderr.write("\n")
outlines = []
try:
outlines = get_outlines(doc, pageslist, pagesdict)
except PDFNoOutlines:
if emit_progress:
sys.stderr.write(
"Document doesn't include outlines (\"bookmarks\")\n")
except Exception as ex:
sys.stderr.write("Warning: failed to retrieve outlines: %s\n" % ex)
device.close()
return allannots, outlines
def parse_args():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("input", metavar="INFILE", type=argparse.FileType("rb"),
help="PDF files to process", nargs='+')
g = p.add_argument_group('Basic options')
g.add_argument("-p", "--progress", default=False, action="store_true",
help="emit progress information")
g.add_argument(
"-n",
"--cols",
default=2,
type=int,
metavar="COLS",
dest="cols",
help="number of columns per page in the document (default: 2)")
g = p.add_argument_group('Options controlling output format')
allsects = ["highlights", "comments", "nits"]
g.add_argument(
"-s",
"--sections",
metavar="SEC",
nargs="*",
choices=allsects,
default=allsects,
help=(
"sections to emit (default: %s)" %
', '.join(allsects)))
g.add_argument(
"--no-group",
dest="group",
default=True,
action="store_false",
help="emit annotations in order, don't group into sections")
g.add_argument(
"--print-filename",
dest="printfilename",
default=False,
action="store_true",
help="print the filename when it has annotations")
g.add_argument("-w", "--wrap", metavar="COLS", type=int,
help="wrap text at this many output columns")
return p.parse_args()
def main():
args = parse_args()
global COLUMNS_PER_PAGE
COLUMNS_PER_PAGE = args.cols
for file in args.input:
(annots, outlines) = process_file(file, args.progress)
orgfilename = os.path.splitext(os.path.basename(file.name))[0]
orgfile = open(orgfilename + '.org', 'w')
op = OrgPrinter(outlines, args.wrap, orgfile)
title = pdftitle(file)
if args.printfilename and annots:
print(f"#+Title: {title if title else file.name}\n", file=orgfile)
if args.group:
op.printall_grouped(args.sections, annots)
else:
op.printall(annots)
orgfile.close()
return 0
if __name__ == "__main__":
sys.exit(main())
|
import Index from './SkeletonLoader.vue';
export default Index;
|
function oddOrEven(number) {
let num = Number(number);
if (num % 2 == 0) {
console.log("even");
} else {
console.log("odd");
}
}
oddOrEven("1024");
|
const toString = Object.prototype.toString
export const checkType = (val) => Object.prototype.toString.call(val).slice(8, -1)
export const toKebabCase = (str) => str.replace(/[A-Z]/g, (letter) => `-${letter.toLowerCase()}`).replace(/^-/, '')
export const clone = (object, deep) => {
if (object === null || typeof object !== 'object') {
return object
}
deep = deep || false
const result = new object.constructor()
for (const propertyName in object) {
if (Object.prototype.hasOwnProperty.call(object, propertyName)) {
let value = object[propertyName]
if (deep) {
value = clone(value, deep)
}
result[propertyName] = value
}
}
return result
}
export function getString (arrayBuffer, encoding) {
if (!(arrayBuffer instanceof Uint8Array) && !(arrayBuffer instanceof ArrayBuffer) && arrayBuffer.buffer) {
arrayBuffer = arrayBuffer.buffer
}
const decoder = new TextDecoder(encoding)
const decodedText = decoder.decode(arrayBuffer, { stream: true })
return decodedText
}
export function isEmptyObj (o) {
if (isUndefined(o)) {
return true
}
if (o instanceof Element) {
return false
}
const arr = Object.keys(o)
return arr.length === 0
}
/**
* 通过 class 名获取 Dom 元素。
* @param {Array<Element>} htmlCollection Dom元素集合。
* @param {String} className class 名称。
*/
export const getDocumentByClassName = (htmlCollection, className) => {
let temp
const BreakException = {}
try {
Array.prototype.slice.call(htmlCollection).forEach((element) => {
if (element.className === className) {
temp = element
throw BreakException
}
})
} catch (e) {
if (e !== BreakException) throw e
}
return temp
}
/**
* Determine if a value is an Array
*
* @param {Object} val The value to test
* @returns {boolean} True if value is an Array, otherwise false
*/
export function isArray (val) {
return toString.call(val) === '[object Array]'
}
/**
* Determine if a value is an Object
*
* @param {Object} val The value to test
* @returns {boolean} True if value is an Object, otherwise false
*/
export function isObject (val) {
return val !== null && typeof val === 'object'
}
/**
* Determine if a value is a String
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a String, otherwise false
*/
export function isString (val) {
return typeof val === 'string'
}
/**
* Determine if a value is a Number
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a Number, otherwise false
*/
export function isNumber (val) {
return typeof val === 'number'
}
/**
* Determine if a value is undefined
*
* @param {Object} val The value to test
* @returns {boolean} True if the value is undefined, otherwise false
*/
export function isUndefined (val) {
return typeof val === 'undefined'
}
/**
* Determine if a value is a Function
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a Function, otherwise false
*/
export function isFunction (val) {
return toString.call(val) === '[object Function]'
}
/**
* 验证是否是经纬度。
* @param {Number} longitude
* @param {Number} latitude
* @returns {Boolean}
*/
export function lnglatValidator (longitude, latitude) {
// 经度,整数部分为0-180小数部分为0到6位
const longreg = /^(-|\+)?(((\d|[1-9]\d|1[0-7]\d|0{1,3})\.\d{0,15})|(\d|[1-9]\d|1[0-7]\d|0{1,3})|180\.0{0,15}|180)$/
if (!longreg.test(longitude)) {
return false
} // 纬度,整数部分为0-90小数部分为0到6位
const latreg = /^(-|\+)?([0-8]?\d{1}\.\d{0,15}|90\.0{0,15}|[0-8]?\d{1}|90)$/
if (!latreg.test(latitude)) {
return false
}
return true
}
export function dirname (path) {
if (typeof path !== 'string') path = path + ''
if (path.length === 0) return '.'
let code = path.charCodeAt(0)
const hasRoot = code === 47 /* / */
let end = -1
let matchedSlash = true
for (let i = path.length - 1; i >= 1; --i) {
code = path.charCodeAt(i)
if (code === 47 /* / */) {
if (!matchedSlash) {
end = i
break
}
} else {
// We saw the first non-path separator
matchedSlash = false
}
}
if (end === -1) return hasRoot ? '/' : '.'
if (hasRoot && end === 1) {
// return '//';
// Backwards-compat fix:
return '/'
}
return path.slice(0, end)
}
export function Platform () {
const ua = navigator.userAgent
const isWindowsPhone = /(?:Windows Phone)/.test(ua)
const isSymbian = /(?:SymbianOS)/.test(ua) || isWindowsPhone
const isAndroid = /(?:Android)/.test(ua)
const isFireFox = /(?:Firefox)/.test(ua)
const isChrome = /(?:Chrome|CriOS)/.test(ua)
const isTablet = /(?:iPad|PlayBook)/.test(ua) || (isAndroid && !/(?:Mobile)/.test(ua)) || (isFireFox && /(?:Tablet)/.test(ua))
const isPhone = /(?:iPhone)/.test(ua) && !isTablet
const isPc = !isPhone && !isAndroid && !isSymbian
return {
isTablet: isTablet,
isPhone: isPhone,
isAndroid: isAndroid,
isPc: isPc,
isChrome: isChrome
}
}
export function captureScreenshot (viewer, showSplitter = false) {
const { when } = Cesium
const deferred = when.defer()
const scene = viewer.scene
const removeCallback = scene.postRender.addEventListener(function () {
removeCallback()
try {
const cesiumCanvas = viewer.scene.canvas
// If we're using the splitter, draw the split position as a vertical white line.
const canvas = cesiumCanvas
// if (showSplitter) {
// canvas = document.createElement('canvas')
// canvas.width = cesiumCanvas.width
// canvas.height = cesiumCanvas.height
// const context = canvas.getContext('2d')
// context.drawImage(cesiumCanvas, 0, 0)
// const x = viewer.splitPosition * cesiumCanvas.width
// context.strokeStyle = this.terria.baseMapContrastColor
// context.beginPath()
// context.moveTo(x, 0)
// context.lineTo(x, cesiumCanvas.height)
// context.stroke()
// }
deferred.resolve(canvas.toDataURL('image/png'))
} catch (e) {
deferred.reject(e)
}
}, this)
scene.render(viewer.clock.currentTime)
return deferred.promise
}
export function getAllAttribution (viewer) {
const credits = viewer.scene.frameState.creditDisplay._currentFrameCredits.screenCredits.values.concat(
viewer.scene.frameState.creditDisplay._currentFrameCredits.lightboxCredits.values
)
return credits.map((credit) => credit.html)
}
export function drawTriangle (options) {
if (!options) {
throw new Error('options is required')
}
if (!options.width) {
throw new Error('options.width is required')
}
if (!options.height) {
throw new Error('options.height is required')
}
options.backgroundColor = options.backgroundColor || 'black'
options.borderColor = options.borderColor || 'orange'
options.borderWidth = options.borderWidth || 1
const cv = document.createElement('canvas')
cv.width = options.width
cv.height = options.height
const ctx = cv.getContext('2d')
ctx.beginPath()
if (options.direction === 1) {
// left
ctx.moveTo(cv.width, 0)
ctx.lineTo(0, cv.height / 2)
ctx.lineTo(cv.width, cv.height)
} else if (options.direction === 2) {
// top
ctx.moveTo(0, cv.height)
ctx.lineTo(cv.width / 2, 0)
ctx.lineTo(cv.width, cv.height)
} else if (options.direction === 3) {
// right
ctx.moveTo(0, cv.height)
ctx.lineTo(cv.width, cv.height / 2)
ctx.lineTo(0, 0)
} else {
// bottom
ctx.moveTo(0, 0)
ctx.lineTo(cv.width / 2, cv.height)
ctx.lineTo(cv.width, 0)
}
ctx.lineJoin = 'round' // 两条线交汇时的边角类型(miter 尖角默认 bevel斜角 round 圆角 )
if (options.backgroundColor) {
ctx.fillStyle = options.backgroundColor.toCssColorString()
ctx.fill()
}
if (options.border) {
ctx.lineWidth = options.borderWidth
ctx.strokeStyle = options.borderColor.toCssColorString()
ctx.stroke()
}
return cv
}
export function drawText (text, options) {
options = options || {
font: '20px sans-serif'
}
const backcolor = options.backgroundColor
const padding = options.padding
delete options.backgroundColor
delete options.padding
const lines = text.split(/[\r]?\n+/)
const lineImgs = []
let w = 0
let h = 0
for (let i = 0; i < lines.length; i++) {
const tempCv = Cesium.writeTextToCanvas(lines[i], options)
if (tempCv) {
lineImgs.push(tempCv)
h += tempCv.height
w = Math.max(w, tempCv.width)
}
}
options.backgroundColor = backcolor
options.padding = padding
let cv = options.canvas
if (!cv) {
w += padding * 2
h += padding * 2.25
cv = document.createElement('canvas')
cv.width = w
cv.height = h
}
const ctx = cv.getContext('2d')
if (backcolor) {
ctx.fillStyle = backcolor.toCssColorString()
} else {
ctx.fillStyle = undefined
}
if (options.border) {
ctx.lineWidth = options.borderWidth
ctx.strokeStyle = options.borderColor.toCssColorString()
}
if (!options.borderRadius) {
if (backcolor) {
ctx.fillRect(0, 0, cv.width, cv.height)
}
if (options.border) {
ctx.strokeRect(0, 0, cv.width, cv.height)
}
} else {
drawRoundedRect(
{
x: 0,
y: 0,
width: cv.width,
height: cv.height
},
options.borderRadius,
ctx
)
}
delete ctx.strokeStyle
delete ctx.fillStyle
let y = 0
for (let i = 0; i < lineImgs.length; i++) {
ctx.drawImage(lineImgs[i], 0 + padding, y + padding)
y += lineImgs[i].height
}
return cv
}
function drawRoundedRect (rect, r, ctx) {
ctx.beginPath()
ctx.moveTo(rect.x + r, rect.y)
ctx.arcTo(rect.x + rect.width, rect.y, rect.x + rect.width, rect.y + rect.height, r)
ctx.arcTo(rect.x + rect.width, rect.y + rect.height, rect.x, rect.y + rect.height, r)
ctx.arcTo(rect.x, rect.y + rect.height, rect.x, rect.y, r)
ctx.arcTo(rect.x, rect.y, rect.x + r, rect.y, r)
ctx.fill()
ctx.stroke()
}
export function getExtension (fileName) {
const start = fileName.lastIndexOf('.')
if (start >= 0) {
return fileName.substring(start, fileName.length)
}
return ''
}
export function changeExtension (fname, newExt) {
return fname.replace(getExtension(fname), newExt)
}
export function readAsArrayBuffer (file) {
const promise = Cesium.when.defer()
const fr = new FileReader()
fr.onload = function (e) {
promise.resolve(e.target.result)
}
fr.onprogress = function (e) {
promise.progress(e.target.result)
}
fr.onerror = function (e) {
promise.reject(e.error)
}
fr.readAsArrayBuffer(file)
return promise
}
export function readAsText (file) {
const promise = Cesium.when.defer()
const fr = new FileReader()
fr.onload = function (e) {
promise.resolve(e.target.result)
}
fr.onprogress = function (e) {
promise.progress(e.target.result)
}
fr.onerror = function (e) {
promise.reject(e.error)
}
fr.readAsText(file)
return promise
}
export function readAllBytes (file) {
const promise = Cesium.when.defer()
const fr = new FileReader()
fr.onload = function (e) {
promise.resolve(new Uint8Array(e.target.result))
}
fr.onprogress = function (e) {
promise.progress(e.target.result)
}
fr.onerror = function (e) {
promise.reject(e.error)
}
fr.readAsArrayBuffer(file)
return promise
}
|
import json
import numpy as np
import argparse
from random import randint
import cv2
with open('../infos/directory.json') as fp: all_data_dir = json.load(fp)
ANN_FILE_train = all_data_dir + 'Annotations_hico/train_annotations_quattro.json'
ANN_FILE_test = all_data_dir + 'Annotations_hico/test_annotations.json'
with open(ANN_FILE_train) as fp:
ANNOTATIONS_train = json.load(fp)
with open(ANN_FILE_test) as fp:
ANNOTATIONS_test = json.load(fp)
OBJ_PATH_train_s = all_data_dir + 'Object_Detections_hico/train/'
OBJ_PATH_test_s = all_data_dir + 'Object_Detections_hico/test/'
with open(all_data_dir + 'hico_infos/hico_list_obj.json') as fp:
list_obj = json.load(fp)
obj_list = dict([(value, int(key) - 1) for key, value in list_obj.items()])
VERB2ID = {u'adjust': 0,
u'assemble': 1,
u'block': 2,
u'blow': 3,
u'board': 4,
u'break': 5,
u'brush_with': 6,
u'buy': 7,
u'carry': 8,
u'catch': 9,
u'chase': 10,
u'check': 11,
u'clean': 12,
u'control': 13,
u'cook': 14,
u'cut': 15,
u'cut_with': 16,
u'direct': 17,
u'drag': 18,
u'dribble': 19,
u'drink_with': 20,
u'drive': 21,
u'dry': 22,
u'eat': 23,
u'eat_at': 24,
u'exit': 25,
u'feed': 26,
u'fill': 27,
u'flip': 28,
u'flush': 29,
u'fly': 30,
u'greet': 31,
u'grind': 32,
u'groom': 33,
u'herd': 34,
u'hit': 35,
u'hold': 36,
u'hop_on': 37,
u'hose': 38,
u'hug': 39,
u'hunt': 40,
u'inspect': 41,
u'install': 42,
u'jump': 43,
u'kick': 44,
u'kiss': 45,
u'lasso': 46,
u'launch': 47,
u'lick': 48,
u'lie_on': 49,
u'lift': 50,
u'light': 51,
u'load': 52,
u'lose': 53,
u'make': 54,
u'milk': 55,
u'move': 56,
u'no_interaction': 57,
u'open': 58,
u'operate': 59,
u'pack': 60,
u'paint': 61,
u'park': 62,
u'pay': 63,
u'peel': 64,
u'pet': 65,
u'pick': 66,
u'pick_up': 67,
u'point': 68,
u'pour': 69,
u'pull': 70,
u'push': 71,
u'race': 72,
u'read': 73,
u'release': 74,
u'repair': 75,
u'ride': 76,
u'row': 77,
u'run': 78,
u'sail': 79,
u'scratch': 80,
u'serve': 81,
u'set': 82,
u'shear': 83,
u'sign': 84,
u'sip': 85,
u'sit_at': 86,
u'sit_on': 87,
u'slide': 88,
u'smell': 89,
u'spin': 90,
u'squeeze': 91,
u'stab': 92,
u'stand_on': 93,
u'stand_under': 94,
u'stick': 95,
u'stir': 96,
u'stop_at': 97,
u'straddle': 98,
u'swing': 99,
u'tag': 100,
u'talk_on': 101,
u'teach': 102,
u'text_on': 103,
u'throw': 104,
u'tie': 105,
u'toast': 106,
u'train': 107,
u'turn': 108,
u'type_on': 109,
u'walk': 110,
u'wash': 111,
u'watch': 112,
u'wave': 113,
u'wear': 114,
u'wield': 115,
u'zip': 116}
MATCHING_IOU = .5
NO_VERBS = 117
def get_detections(segment_key, flag):
if flag == 'train':
key_ann = '%.8i' % (segment_key)
annotation = ANNOTATIONS_train[key_ann]
cur_obj_path_s = OBJ_PATH_train_s + "HICO_train2015_%.8i.json" % (segment_key)
SCORE_TH = 0.6
SCORE_OBJ = 0.3
select_threshold = 15
elif flag == 'test':
key_ann = '%.8i' % (segment_key)
annotation = ANNOTATIONS_test[key_ann]
cur_obj_path_s = OBJ_PATH_test_s + "HICO_test2015_%.8i.json" % (segment_key)
SCORE_TH = 0.6
SCORE_OBJ = 0.3
select_threshold = 15
annotation = clean_up_annotation(annotation)
with open(cur_obj_path_s) as fp:
detections = json.load(fp)
img_H = detections['H']
img_W = detections['W']
shape = [img_W, img_H]
persons_d, objects_d = analyze_detections(detections, SCORE_TH, SCORE_OBJ)
d_p_boxes, scores_persons, class_id_humans = get_boxes_det(persons_d, img_H, img_W)
d_o_boxes, scores_objects, class_id_objects = get_boxes_det(objects_d, img_H, img_W)
try:
d_o_boxes = np.concatenate([d_o_boxes, d_p_boxes]).tolist()
except:
import pdb;
pdb.set_trace()
class_id_objects = np.concatenate([class_id_objects, class_id_humans]).tolist()
scores_objects = np.concatenate([scores_objects, scores_persons]).tolist()
if len(d_p_boxes) > select_threshold:
d_p_boxes, scores_persons, class_id_humans = d_p_boxes[0:select_threshold], scores_persons[
0:select_threshold], class_id_humans[
0:select_threshold]
if len(d_o_boxes) > select_threshold:
d_o_boxes, scores_objects, class_id_objects = d_o_boxes[0:select_threshold], scores_objects[
0:select_threshold], class_id_objects[
0:select_threshold]
# scores_objects.insert(0,1)
return d_p_boxes, d_o_boxes, scores_persons, scores_objects, class_id_humans, class_id_objects, annotation, shape
def get_compact_detections(segment_key, flag):
d_p_boxes, d_o_boxes, scores_persons, scores_objects, class_id_humans, class_id_objects, annotation, shape = get_detections(
segment_key, flag)
img_W = shape[0]
img_H = shape[1]
no_person_dets = len(d_p_boxes)
no_object_dets = len(d_o_boxes)
persons_np = np.zeros([no_person_dets, 4], np.float32)
objects_np = np.zeros([no_object_dets, 4], np.float32)
if no_person_dets != 0:
persons_np = np.array(d_p_boxes, np.float32)
objects_np = np.array(d_o_boxes, np.float32)
persons_np = persons_np / np.array([img_W, img_H, img_W, img_H])
objects_np = objects_np / np.array([img_W, img_H, img_W, img_H])
return {'person_bbx': persons_np, 'objects_bbx': objects_np,
'person_bbx_score': scores_persons, 'objects_bbx_score': scores_objects,
'class_id_objects': class_id_objects}
def get_attention_maps(segment_key, flag):
compact_detections = get_compact_detections(segment_key, flag)
persons_np, objects_np = compact_detections['person_bbx'], compact_detections['objects_bbx']
union_box = []
no_person_dets = len(persons_np)
no_object_dets = len(objects_np)
for dd_i in range(no_person_dets):
for do_i in range(len(objects_np)):
union_box.append(union_BOX(persons_np[dd_i], objects_np[do_i], segment_key))
return np.concatenate(union_box)
def get_compact_label(segment_key, flag):
d_p_boxes, d_o_boxes, scores_persons, scores_objects, class_id_humans, class_id_objects, annotation, shape = get_detections(
segment_key, flag)
no_person_dets = len(d_p_boxes)
no_object_dets = len(d_o_boxes)
labels_np = np.zeros([no_person_dets, no_object_dets, NO_VERBS], np.int32)
a_p_boxes = [ann['person_box'] for ann in annotation]
iou_mtx = get_iou_mtx(a_p_boxes, d_p_boxes)
if no_person_dets != 0 and len(a_p_boxes) != 0:
max_iou_for_each_det = np.max(iou_mtx, axis=0)
index_for_each_det = np.argmax(iou_mtx, axis=0)
for dd in range(no_person_dets):
cur_max_iou = max_iou_for_each_det[dd]
if cur_max_iou < MATCHING_IOU:
continue
matched_ann = annotation[index_for_each_det[dd]]
hoi_anns = matched_ann['hois']
# verbs with actions######
object_hois = [oi for oi in hoi_anns if len(oi['obj_box']) != 0]
a_o_boxes = [oi['obj_box'] for oi in object_hois]
iou_mtx_o = get_iou_mtx(a_o_boxes, d_o_boxes)
if a_o_boxes and d_o_boxes:
for do in range(len(d_o_boxes)):
for ao in range(len(a_o_boxes)):
cur_iou = iou_mtx_o[ao, do]
# enough iou
if cur_iou < MATCHING_IOU:
continue
current_hoi = object_hois[ao]
verb_idx = VERB2ID[current_hoi['verb']]
labels_np[dd, do, verb_idx] = 1
comp_labels = labels_np.reshape(no_person_dets * (no_object_dets), NO_VERBS)
labels_single = np.array([1 if i.any() == True else 0 for i in comp_labels])
labels_single = labels_single.reshape(np.shape(labels_single)[0], 1)
return {'labels_all': labels_np, 'labels_single': labels_single}
else:
comp_labels = labels_np.reshape(no_person_dets * (no_object_dets + 1), NO_VERBS)
labels_single = np.array([1 if i.any() == True else 0 for i in comp_labels])
labels_single = labels_single.reshape(np.shape(labels_single)[0], 1)
return {'labels_all': labels_np, 'labels_single': labels_single}
def get_bad_detections(segment_key, flag): # Detections Without any person#
labels_all = get_compact_label(segment_key, flag)['labels_all']
if labels_all.size == 0:
return True
else:
return False
def union_BOX(roi_pers, roi_objs, segment_key, H=64, W=64):
assert H == W
roi_pers = np.array(roi_pers * H, dtype=int)
roi_objs = np.array(roi_objs * H, dtype=int)
sample_box = np.zeros([1, 2, H, W])
sample_box[0, 0, roi_pers[1]:roi_pers[3] + 1, roi_pers[0]:roi_pers[2] + 1] = 100
sample_box[0, 1, roi_objs[1]:roi_objs[3] + 1, roi_objs[0]:roi_objs[2] + 1] = 100
return sample_box
def clean_up_annotation(annotation):
persons_dict = {}
person_list = []
object_list = []
for hoi in annotation:
box = hoi['person_bbx']
box = [int(coord) for coord in box]
box = clean_person(person_list, np.asarray(box))
# import pdb;pdb.set_trace()
dkey = tuple(box)
objects = hoi['object']
if len(objects['obj_bbx']) == 0: # no obj case
cur_oi = {'verb': hoi['Verbs'],
'obj_box': [],
'obj_str': '',
'obj_id': ''
}
else:
cur_obj = clean_object(object_list, np.asarray(hoi['object']['obj_bbx']))
cur_oi = {'verb': hoi['Verbs'],
# 'obj_box':[int(coord) for coord in hoi['object']['obj_bbx']],
'obj_box': cur_obj,
'obj_str': hoi['object']['obj_name'],
'obj_id': int(obj_list[hoi['object']['obj_name']])
}
if dkey in persons_dict:
persons_dict[dkey]['hois'].append(cur_oi)
else:
persons_dict[dkey] = {'person_box': box, 'hois': [cur_oi]}
pers_list = []
for dkey in persons_dict:
pers_list.append(persons_dict[dkey])
# import pdb;pdb.set_trace()
return pers_list
def clean_person(person_list, box):
if len(person_list) == 0:
person_list.append(box)
return box.tolist()
else:
for person in person_list:
if len(box) == 0:
if len(person) == 0:
return person.tolist()
elif len(person) == 0:
continue
else:
IOU = IoU_box(person, box)
if IOU >= 0.5:
return person.tolist()
person_list.append(box)
return box.tolist()
def clean_object(object_list, box):
if len(object_list) == 0:
object_list.append(box)
return box.tolist()
else:
for object in object_list:
if len(box) == 0:
if len(object) == 0:
return object.tolist()
elif len(object) == 0:
continue
else:
IOU = IoU_box(object, box)
if IOU >= 0.5:
return object.tolist()
object_list.append(box)
return box.tolist()
def get_boxes_det(dets, img_H, img_W):
boxes = []
scores = []
class_no = []
for det in dets:
top, left, bottom, right = det['box_coords']
scores.append(det['score'])
if len(det['class_str'].split()) == 2:
# import pdb;pdb.set_trace()
str = det['class_str'].split()[0] + '_' + det['class_str'].split()[1]
else:
str = det['class_str']
class_no.append(int(obj_list[str]))
# left, top, right, bottom = left*img_W, top*img_H, right*img_W, bottom*img_H
left, top, right, bottom = left, top, right, bottom
boxes.append([left, top, right, bottom])
# import pdb;pdb.set_trace()
return boxes, scores, class_no
def get_iou_mtx(anns, dets):
no_gt = len(anns)
no_dt = len(dets)
iou_mtx = np.zeros([no_gt, no_dt])
for gg in range(no_gt):
gt_box = anns[gg]
for dd in range(no_dt):
dt_box = dets[dd]
iou_mtx[gg, dd] = IoU_box(gt_box, dt_box)
return iou_mtx
def analyze_detections(detections, SCORE_TH, SCORE_OBJ):
persons = []
objects = []
for det in detections['detections']:
if det['class_str'] == 'person':
if det['score'] < SCORE_TH:
continue
persons.append(det)
else:
if det['score'] < SCORE_OBJ:
continue
objects.append(det)
return persons, objects
def IoU_box(box1, box2):
'''
left1, top1, right1, bottom1 = box1
left2, top2, right2, bottom2 = box2
returns intersection over union
'''
try:
left1, top1, right1, bottom1 = box1
left2, top2, right2, bottom2 = box2
except:
IoU = 0
return IoU
left_int = max(left1, left2)
top_int = max(top1, top2)
right_int = min(right1, right2)
bottom_int = min(bottom1, bottom2)
areaIntersection = max(0, right_int - left_int) * max(0, bottom_int - top_int)
area1 = (right1 - left1) * (bottom1 - top1)
area2 = (right2 - left2) * (bottom2 - top2)
IoU = areaIntersection / float(area1 + area2 - areaIntersection)
return IoU
def dry_run():
print("Doing a test run to detect bad detections\n")
with open(all_data_dir + 'bad_detections_hico/bad_detections_train.json') as fp:
bad_detections_train = json.load(fp)
print("In training set object detector failed to detect any person in {} images".format(len(bad_detections_train)))
with open(all_data_dir + 'bad_detections_hico/bad_detections_test.json') as fp:
bad_detections_test = json.load(fp)
print("In testing set object detector failed to detect any person in {} images".format(len(bad_detections_test)))
# import pdb;pdb.set_trace()
return bad_detections_train, bad_detections_test
if __name__ == "__main__":
new_anns = {}
compact_dets = {}
att_maps = {}
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type_of_data', type=str, required=False, default='train', help="type_of_data")
args = parser.parse_args()
flag = args.type_of_data
bad_detections_train, bad_detections_test = dry_run()
b_d_tr, b_d_test = [int(l) for l in bad_detections_train], [int(l) for l in bad_detections_test]
phases = ['train', 'test']
from tqdm import tqdm
# import pdb;pdb.set_trace()
for flag in phases:
if flag == 'train':
ALL_SEGS = ANNOTATIONS_train.keys()
elif flag == 'test':
ALL_SEGS = ANNOTATIONS_test.keys()
ALL_SEGS = [int(v) for v in ALL_SEGS]
ALL_SEGS.sort()
for segkey in tqdm(ALL_SEGS):
# import pdb;pdb.set_trace()
if segkey in (b_d_tr + b_d_test):
new_anns[segkey] = get_compact_label(segkey, flag)
compact_dets[segkey] = get_compact_detections(segkey, flag)
att_maps[segkey] = get_attention_maps(segkey, flag)
# import pdb;pdb.set_trace()
pass
|
const router = require("koa-router")();
const uploadCtl = require("./../controllers/upload");
const uploadRouter = router.post("/ApplyUploadSign", uploadCtl.applyUploadSign);
module.exports = uploadRouter;
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('content', '0016_sitesettings_context_processor_code'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='is_featured',
field=models.BooleanField(default=False, help_text='Korostetut postaukset n\xe4kyv\xe4t sit\xe4 tukevilla sivustoilla n\xe4ytt\xe4v\xe4mmin.', verbose_name='Korosta postausta'),
),
]
|
"""
Still a cache server. This does not interact with the client.
"""
import pickle
import socket
from distcache import configure as config
from distcache import utils
from distcache.lru_cache import LRUCache
config = config.config()
class CacheClient:
def __init__(self, capacity=100):
"""
:param capacity: capacity of the cache in MBs
"""
self.cache = LRUCache(capacity)
# Communication configurations
self.FORMAT = config.FORMAT
self.HEADER_LENGTH = config.HEADER_LENGTH
# Start the connection with the server. socket. connect
self.server_address = (config.IP, config.PORT)
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.id = self.register() # Congrats! You're registered with the server. Server now knows you IP, PORT
print("About: ", self.client_socket.getsockname(), self.id)
def register(self):
"""
Just try connecting to the server. And it will register you.
:return:
"""
self.client_socket.connect(self.server_address)
print("Client connected at address {}:{}".format(*self.server_address))
# TODO: Make sure a proper client_id is always received.
return utils.receive_message(self.client_socket, self.HEADER_LENGTH, self.FORMAT)
def execute_query(self, message):
response = self.parse_message(message)
utils.send_message(response, self.client_socket, self.HEADER_LENGTH, self.FORMAT)
return
def monitor(self):
"""
A client has a few things to listen for.
The server may ping to monitor your health.
The server may request for key, value pair
The server can request you to store key, value pair
The server can request you to delete key from cache
:return:
"""
print("Monitoring queries from server and responding...")
self.client_socket.settimeout(30) # TODO: Increasing timeout is not the solution.
while True:
response = self.client_socket.recv(config.HEADER_LENGTH)
if not response:
continue
message_length = int(response.decode(config.FORMAT))
message = self.client_socket.recv(message_length)
self.execute_query(message) # TODO: Should ultimately be an async operation
def parse_message(self, message):
"""
Parse and execute the command
:param message: the message sent by the cache_server
:return: depends on the operation that was carried out after parsing message
"""
# This should run in a separate thread
message = pickle.loads(message)
if message[0] == "set":
print("set ", message[1:])
return self.cache.set(message[1], message[2])
elif message[0] == "del":
print("delete ", message[1:])
return self.cache.delete(message[1])
elif message[0] == "get":
print("get ", message[1:])
return self.cache.get(message[1])
elif message[0] == "add":
print("get ", message[1:])
return self.cache.add(message[1], message[2])
else:
print("Only these keywords are supported: get, set, delete")
return message
if __name__ == '__main__':
client = CacheClient()
client.monitor()
|
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class ConversationMessageDeliveryChannelEventAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'payload': 'ConversationMessageDeliveryPayload'
}
attribute_map = {
'payload': 'payload'
}
nulls = set()
def __init__(self, payload=None, local_vars_configuration=None): # noqa: E501
"""ConversationMessageDeliveryChannelEventAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._payload = None
self.discriminator = None
if payload is not None:
self.payload = payload
@property
def payload(self):
"""Gets the payload of this ConversationMessageDeliveryChannelEventAllOf. # noqa: E501
:return: The payload of this ConversationMessageDeliveryChannelEventAllOf. # noqa: E501
:rtype: ConversationMessageDeliveryPayload
"""
return self._payload
@payload.setter
def payload(self, payload):
"""Sets the payload of this ConversationMessageDeliveryChannelEventAllOf.
:param payload: The payload of this ConversationMessageDeliveryChannelEventAllOf. # noqa: E501
:type: ConversationMessageDeliveryPayload
"""
self._payload = payload
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConversationMessageDeliveryChannelEventAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConversationMessageDeliveryChannelEventAllOf):
return True
return self.to_dict() != other.to_dict()
|
/* eslint camelcase: off */
module.exports = {
communes: [
{name: 'id', type: 'character', length: 5},
{name: 'nom', type: 'character', length: 80},
{name: 'created', type: 'date'},
{name: 'updated', type: 'date'}
],
sections: [
{name: 'id', type: 'character', length: 10},
{name: 'commune', type: 'character', length: 5},
{name: 'prefixe', type: 'character', length: 3},
{name: 'code', type: 'character', length: 2},
{name: 'created', type: 'date'},
{name: 'updated', type: 'date'}
],
feuilles: [
{name: 'id', type: 'character', length: 12},
{name: 'commune', type: 'character', length: 5},
{name: 'prefixe', type: 'character', length: 3},
{name: 'section', type: 'character', length: 2},
{name: 'numero', type: 'character', length: 2},
{name: 'qualite', type: 'character', length: 2},
{name: 'modeConfection', type: 'character', length: 2},
{name: 'echelle', type: 'number', length: 6, precision: 0},
{name: 'created', type: 'date'},
{name: 'updated', type: 'date'}
],
parcelles: [
{name: 'id', type: 'character', length: 14},
{name: 'commune', type: 'character', length: 5},
{name: 'prefixe', type: 'character', length: 3},
{name: 'section', type: 'character', length: 2},
{name: 'numero', type: 'character', length: 4},
{name: 'contenance', type: 'number', length: 12, precision: 0},
{name: 'created', type: 'date'},
{name: 'updated', type: 'date'}
],
batiments: [
{name: 'commune', type: 'character', length: 5},
{name: 'nom', type: 'character', length: 80},
{name: 'type', type: 'character', length: 2},
{name: 'created', type: 'date'},
{name: 'updated', type: 'date'}
],
lieux_dits: [
{name: 'commune', type: 'character', length: 5},
{name: 'nom', type: 'character', length: 80},
{name: 'created', type: 'date'},
{name: 'updated', type: 'date'}
],
subdivisions_fiscales: [
{name: 'parcelle', type: 'character', length: 14},
{name: 'lettre', type: 'character', length: 1},
{name: 'created', type: 'date'},
{name: 'updated', type: 'date'}
],
prefixes_sections: [
{name: 'id', type: 'character', length: 6},
{name: 'commune', type: 'character', length: 5},
{name: 'prefixe', type: 'character', length: 3},
{name: 'ancienne', type: 'character', length: 5},
{name: 'nom', type: 'character', length: 80}
]
}
|
// server.js
// set up ======================
var express = require('express');
var mongoose = require('mongoose');
var bodyParser = require('body-parser');
var methodOverride = require('method-override');
var morgan = require('morgan');
var app = express();
// configuration ================
var config = require('./config/config');
mongoose.connect(config.mongourl); // connect to mongoDB database on modulus.io
app.use(express.static(__dirname + '/public'));
app.use(bodyParser.urlencoded({'extended':'true'}));
app.use(bodyParser.json());
app.use(bodyParser.json({ type: 'application/vnd.api+json' }));
app.use(methodOverride());
app.use(morgan('dev'));
// define task model =============
var Task = mongoose.model('Task', {
text : String
});
// routes ========================
// get
app.get('/api/tasks', function(req, res) {
Task.find(function(err, tasks) {
if (err){
res.send(err);
}
res.json(tasks);
});
});
// create task
app.post('/api/tasks', function(req, res) {
Task.create({
text : req.body.text,
done : false
}, function(err, task) {
if (err){
res.send(err);
}
Task.find(function(err, tasks) {
if (err){
res.send(err);
}
res.json(tasks);
});
});
});
// delete a task
app.delete('/api/tasks/:task_id', function(req, res) {
Task.remove({
_id : req.params.task_id
}, function(err, task) {
if (err){
res.send(err);
}
Task.find(function(err, tasks) {
if (err){
res.send(err);
}
res.json(tasks);
});
});
});
// application ===================
app.get('*', function(req, res) {
res.sendfile('./public/index.html');
});
// listen ========================
app.listen(process.env.PORT || 8081);
console.log("Magic happens on port 8081 or other...");
|
/**
* React Starter Kit (https://www.reactstarterkit.com/)
*
* Copyright © 2014-present Kriasoft, LLC. All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE.txt file in the root directory of this source tree.
*/
export function updateTag(tagName, keyName, keyValue, attrName, attrValue) {
const node = document.head.querySelector(
`${tagName}[${keyName}="${keyValue}"]`,
);
if (node && node.getAttribute(attrName) === attrValue) return;
// Remove and create a new tag in order to make it work with bookmarks in Safari
if (node) {
node.parentNode.removeChild(node);
}
if (typeof attrValue === 'string') {
const nextNode = document.createElement(tagName);
nextNode.setAttribute(keyName, keyValue);
nextNode.setAttribute(attrName, attrValue);
document.head.appendChild(nextNode);
}
}
export function updateMeta(name, content) {
updateTag('meta', 'name', name, 'content', content);
}
export function updateCustomMeta(property, content) {
updateTag('meta', 'property', property, 'content', content);
}
export function updateLink(rel, href) {
updateTag('link', 'rel', rel, 'href', href);
}
|
import WeightFirebase from './weightFirebase'
import { toFirebaseTimestamp, pastDays, dateDiff, toDate, nextDay } from '../utils/date-utils'
import { avgHome } from '../stat/StatHelper'
import FirebaseException from '../utils/FirebaseException'
class WeightAPI {
getAll = async uid => {
try {
return await WeightFirebase.getAll(uid)
} catch(error) {
throw new FirebaseException(error)
}
}
getWeightListBtwDates = (uid, beginDate, endDate) => {
const beginTimestamp = toFirebaseTimestamp(beginDate)
const endTimestamp = toFirebaseTimestamp(endDate)
return WeightFirebase.getWeightListBtwDates(uid, beginTimestamp, endTimestamp)
}
addWeight = async (nbPers, startDate, endDate, recycled, norecycled) => {
// calculate average weight
const diff = dateDiff(startDate, endDate)
const avgNb = avgHome(recycled, norecycled, nbPers, diff)
// get days list (we also include endDate)
const dayList = pastDays(diff, endDate)
dayList.push(endDate)
// generate dynamic weight to insert
const insertList = dayList.map(d => {
return convertToWeight(nbPers, d, d, avgNb.recycled, avgNb.norecycled)
})
if (insertList == null || insertList.length === 0) {
return
}
// call db
try {
await WeightFirebase.addWeightBatch(insertList)
} catch(error) {
throw new FirebaseException(error)
}
}
removeAllWeight = async () => {
try {
WeightFirebase.removeAllWeight()
} catch(error) {
throw new FirebaseException(error)
}
}
getLastWeight = async uid => {
try {
const w = await WeightFirebase.getLastWeight(uid)
return (w != null) ? convertFromWeight(w) : null
} catch(error) {
throw new FirebaseException(error)
}
}
getLastStartDate = async uid => {
const w = await this.getLastWeight(uid)
if (w != null) {
return nextDay(w.startDate)
}
}
}
const convertToWeight = (nbPers, startDate, endDate, recycled, norecycled) => {
const startTimestamp = toFirebaseTimestamp(startDate)
const endTimestamp = toFirebaseTimestamp(endDate)
const nowTimestamp = toFirebaseTimestamp(new Date())
return {
nbPers: nbPers,
recycled: recycled,
norecycled: norecycled,
startDate: startTimestamp,
endDate: endTimestamp,
recordedDate: nowTimestamp,
}
}
const convertFromWeight = w => {
const startDate = toDate(w.startDate)
const endDate = toDate(w.endDate)
const recordedDate = toDate(w.recordedDate)
return {
nbPers: w.nbPers,
recycled: w.recycled,
norecycled: w.norecycled,
startDate,
endDate,
recordedDate,
}
}
export default new WeightAPI()
|
// Redux
import { createStore, applyMiddleware } from "redux";
import { composeWithDevTools } from "redux-devtools-extension";
// Middlewares
import { createLogger } from "redux-logger";
import thunkMiddleware from "redux-thunk";
// Reducers
import rootReducer from "./redux/index";
const configureStore = () => {
const store = createStore(
rootReducer,
composeWithDevTools(
applyMiddleware(thunkMiddleware, createLogger({ collapsed: true }))
)
);
if (module.hot) {
module.hot.accept("./index.js", () => {
// eslint-disable-next-line global-require
const nextRootReducer = require("./index.js");
store.replaceReducer(nextRootReducer);
});
}
return store;
};
export default configureStore;
|
import sys
import unittest
from unittest import mock
from bpython.curtsiesfrontend.coderunner import CodeRunner, FakeOutput
class TestCodeRunner(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
def test_simple(self):
c = CodeRunner(
request_refresh=lambda: self.orig_stdout.flush()
or self.orig_stderr.flush()
)
stdout = FakeOutput(c, lambda *args, **kwargs: None, None)
stderr = FakeOutput(c, lambda *args, **kwargs: None, None)
sys.stdout = stdout
sys.stdout = stderr
c.load_code("1 + 1")
c.run_code()
c.run_code()
c.run_code()
def test_exception(self):
c = CodeRunner(
request_refresh=lambda: self.orig_stdout.flush()
or self.orig_stderr.flush()
)
def ctrlc():
raise KeyboardInterrupt()
stdout = FakeOutput(c, lambda x: ctrlc(), None)
stderr = FakeOutput(c, lambda *args, **kwargs: None, None)
sys.stdout = stdout
sys.stderr = stderr
c.load_code("1 + 1")
c.run_code()
class TestFakeOutput(unittest.TestCase):
def assert_unicode(self, s):
self.assertIsInstance(s, str)
def test_bytes(self):
out = FakeOutput(mock.Mock(), self.assert_unicode, None)
out.write("native string type")
|
import { connect } from 'react-redux';
import { bindActionCreators } from 'redux';
import CommentList from '../components/Article/CommentSection/CommentList';
import * as CommentListActions from '../actions/commentList';
const mapStateToProps = ({ commentList: { comments } }) => ({
comments,
});
const mapDispatchToProps = dispatch => bindActionCreators(CommentListActions, dispatch);
export default connect(mapStateToProps, mapDispatchToProps)(CommentList);
|
# Solution of;
# Project Euler Problem 512: Sums of totients of powers
# https://projecteuler.net/problem=512
#
# Let $\varphi(n)$ be Euler's totient function. Let
# $f(n)=(\sum_{i=1}^{n}\varphi(n^i)) \text{ mod } (n+1)$. Let
# $g(n)=\sum_{i=1}^{n} f(i)$. $g(100)=2007$. Find $g(5 \times 10^8)$.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 512
timed.caller(dummy, n, i, prob_id)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InvoiceAmountLimitDTO(object):
def __init__(self):
self._amount_limit = None
self._invoice_kind = None
self._month_amount_limit = None
@property
def amount_limit(self):
return self._amount_limit
@amount_limit.setter
def amount_limit(self, value):
self._amount_limit = value
@property
def invoice_kind(self):
return self._invoice_kind
@invoice_kind.setter
def invoice_kind(self, value):
self._invoice_kind = value
@property
def month_amount_limit(self):
return self._month_amount_limit
@month_amount_limit.setter
def month_amount_limit(self, value):
self._month_amount_limit = value
def to_alipay_dict(self):
params = dict()
if self.amount_limit:
if hasattr(self.amount_limit, 'to_alipay_dict'):
params['amount_limit'] = self.amount_limit.to_alipay_dict()
else:
params['amount_limit'] = self.amount_limit
if self.invoice_kind:
if hasattr(self.invoice_kind, 'to_alipay_dict'):
params['invoice_kind'] = self.invoice_kind.to_alipay_dict()
else:
params['invoice_kind'] = self.invoice_kind
if self.month_amount_limit:
if hasattr(self.month_amount_limit, 'to_alipay_dict'):
params['month_amount_limit'] = self.month_amount_limit.to_alipay_dict()
else:
params['month_amount_limit'] = self.month_amount_limit
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InvoiceAmountLimitDTO()
if 'amount_limit' in d:
o.amount_limit = d['amount_limit']
if 'invoice_kind' in d:
o.invoice_kind = d['invoice_kind']
if 'month_amount_limit' in d:
o.month_amount_limit = d['month_amount_limit']
return o
|
#include <myBgn.h>
#define NO_OF_QUERIES 1
typedef struct SLP1_key_t{
BGN_PK_t * bgnPk;
BGN_SK_t * bgnSk;
int keyPerm;
//permutation key
}SLP1_KEY_t;
typedef struct trapdoor_t{
int pos_i; //Should be changed
int rand_s;
}TRAPDOOR_t;
typedef struct lp_q_res_t{
element_t * ciphertexts;
element_t * nhds;
}LP_Q_RES_t;
typedef struct lp2_q_res_t{
element_t * ciphertexts;
element_t * m;
}LP2_Q_RES_t;
typedef struct slp1_res_t{
int maxIndex;
mpz_t maxScore;
}SLP1_RES_t;
typedef struct slp2_res_t{
mpz_t * scores;
int * indices;
int maxIndex;
mpz_t maxScore;
}SLP2_RES_t;
extern int ** read_matrix_from_file(char * , int );
extern int ** Allocate_2D_int(int , int );
extern element_t ** Allocate_2D_element(int , int );
extern SLP1_KEY_t * SLP1_Key_Gen( int );
extern element_t ** SLP1_Encrypt_Matrix( int ** , BGN_PK_t * , int );
extern void SLP1_Trapdoor_Gen(TRAPDOOR_t *, int , int );
extern void SLP1_LinkPred_Query(LP_Q_RES_t *, TRAPDOOR_t *, element_t ** ,BGN_PK_t * , int );
extern void SLP1_Find_Max_Vertex(SLP1_RES_t * , SLP1_KEY_t *, LP_Q_RES_t *, int );
extern void SLP1_print_slp1_res(SLP1_RES_t *);
extern double time_difference(struct timeval * , struct timeval * );
extern void SLP1_print_times(int ,int, double, double, int, double, double, double );
extern void SLP1_Clear_LPQRes(LP_Q_RES_t *, int );
extern void SLP1_Clear_All(int **AdjMatrix, element_t **,BGN_PK_t *, int );
extern int ** SLP2_contruct_b_matrix(int **, int );
extern element_t ** SLP2_Encrypt_Matrix( int ** , BGN_PK_t * , int );
extern void SLP2_LinkPred_Query( LP2_Q_RES_t *,TRAPDOOR_t *, element_t ** , element_t ** , BGN_PK_t * , int);
extern void SLP2_Find_Max_Vertex(SLP2_RES_t * , SLP1_KEY_t *, LP2_Q_RES_t *, int );
extern void SLP2_sort(mpz_t * , int * , int );
extern void SLP2_final_score(SLP1_KEY_t *, element_t *, SLP2_RES_t *,int);
extern void SLP2_print_slp2_res(SLP2_RES_t *);
extern void SLP2_Clear_All(int **, element_t **,int **, element_t **, BGN_PK_t *, int );
|
# Databricks notebook source
# MAGIC %run ./_databricks-academy-helper $lesson="1.3.2L"
# COMMAND ----------
DA.cleanup()
DA.init()
DA.conclude_setup()
|
/**
* freee API
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* OpenAPI spec version: v1.0
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
*
* Swagger Codegen version: 2.4.0-SNAPSHOT
*
* Do not edit the class manually.
*
*/
(function(root, factory) {
if (typeof define === 'function' && define.amd) {
// AMD.
define(['expect.js', '../../src/index'], factory);
} else if (typeof module === 'object' && module.exports) {
// CommonJS-like environments that support module.exports, like Node.
factory(require('expect.js'), require('../../src/index'));
} else {
// Browser globals (root is window)
factory(root.expect, root.FreeeAccountingClient);
}
}(this, function(expect, FreeeAccountingClient) {
'use strict';
var instance;
beforeEach(function() {
instance = new FreeeAccountingClient.SectionsApi();
});
var getProperty = function(object, getter, property) {
// Use getter method if present; otherwise, get the property directly.
if (typeof object[getter] === 'function')
return object[getter]();
else
return object[property];
}
var setProperty = function(object, setter, property, value) {
// Use setter method if present; otherwise, set the property directly.
if (typeof object[setter] === 'function')
object[setter](value);
else
object[property] = value;
}
describe('SectionsApi', function() {
describe('createSection', function() {
it('should call createSection successfully', function(done) {
//uncomment below and update the code to test createSection
//instance.createSection(function(error) {
// if (error) throw error;
//expect().to.be();
//});
done();
});
});
describe('destroySection', function() {
it('should call destroySection successfully', function(done) {
//uncomment below and update the code to test destroySection
//instance.destroySection(function(error) {
// if (error) throw error;
//expect().to.be();
//});
done();
});
});
describe('getSections', function() {
it('should call getSections successfully', function(done) {
//uncomment below and update the code to test getSections
//instance.getSections(function(error) {
// if (error) throw error;
//expect().to.be();
//});
done();
});
});
describe('updateSection', function() {
it('should call updateSection successfully', function(done) {
//uncomment below and update the code to test updateSection
//instance.updateSection(function(error) {
// if (error) throw error;
//expect().to.be();
//});
done();
});
});
});
}));
|
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
#import "NSObject.h"
#import "NSSecureCoding.h"
@class ASDBetaAppDisplayNames, ASDBetaAppVersion, NSDate, NSDictionary, NSString;
@interface ASDBetaAppLaunchInfo : NSObject <NSSecureCoding>
{
BOOL _feedbackEnabled;
BOOL _launchScreenEnabled;
BOOL _sharedFeedback;
NSString *_artistName;
ASDBetaAppDisplayNames *_displayNames;
NSDate *_expirationDate;
NSString *_iconURLTemplate;
NSDate *_lastWelcomeScreenViewDate;
NSDictionary *_localizedTestNotes;
NSString *_testerEmail;
ASDBetaAppVersion *_version;
}
+ (BOOL)supportsSecureCoding;
@property(copy) ASDBetaAppVersion *version; // @synthesize version=_version;
@property(copy) NSString *testerEmail; // @synthesize testerEmail=_testerEmail;
@property(getter=hasSharedFeedback) BOOL sharedFeedback; // @synthesize sharedFeedback=_sharedFeedback;
@property(copy) NSDictionary *localizedTestNotes; // @synthesize localizedTestNotes=_localizedTestNotes;
@property(getter=isLaunchScreenEnabled) BOOL launchScreenEnabled; // @synthesize launchScreenEnabled=_launchScreenEnabled;
@property(copy) NSDate *lastWelcomeScreenViewDate; // @synthesize lastWelcomeScreenViewDate=_lastWelcomeScreenViewDate;
@property(copy) NSString *iconURLTemplate; // @synthesize iconURLTemplate=_iconURLTemplate;
@property(getter=isFeedbackEnabled) BOOL feedbackEnabled; // @synthesize feedbackEnabled=_feedbackEnabled;
@property(copy) NSDate *expirationDate; // @synthesize expirationDate=_expirationDate;
@property(copy) ASDBetaAppDisplayNames *displayNames; // @synthesize displayNames=_displayNames;
@property(copy) NSString *artistName; // @synthesize artistName=_artistName;
- (void).cxx_destruct;
- (id)initWithCoder:(id)arg1;
- (void)encodeWithCoder:(id)arg1;
- (id)init;
@end
|
# -*- coding: utf-8 -*-
"""Version-check script."""
import os
import sys
import codecs
from art.art_param import *
Failed = 0
VERSION = "4.9"
README_ITEMS = ['<td align="center">{0}</td>'.format(str(FONT_COUNTER)),
'<img src="https://img.shields.io/badge/Art List-{0}-orange.svg">'.format(str(ART_COUNTER)),
'<img src="https://img.shields.io/badge/Font List-{0}-blue.svg">'.format(str(FONT_COUNTER)),
'<td align="center">{0}</td>'.format(str(ART_COUNTER)),
'<td align="center">{0}</td>'.format(str(DECORATION_COUNTER)),
'<img src="https://img.shields.io/badge/Decor List-{0}-green.svg">'.format(str(DECORATION_COUNTER))]
SETUP_ITEMS = [
"version='{0}'"]
INSTALL_ITEMS = [
"[Version {0}](https://github.com/sepandhaghighi/art/archive/v{0}.zip)",
"pip install art=={0}",
"pip3 install art=={0}",
'easy_install "art=={0}"']
CHANGELOG_ITEMS = [
"## [{0}]",
"https://github.com/sepandhaghighi/art/compare/v{0}...dev",
"[{0}]:"]
ART_LIST_ITEMS = ["### Version : {0}"]
FONT_LIST_ITEMS = ["### Version : {0}"]
PARAMS_ITEMS = ['ART_VERSION = "{0}"']
FILES = {
"setup.py": SETUP_ITEMS,
"INSTALL.md": INSTALL_ITEMS,
"CHANGELOG.md": CHANGELOG_ITEMS,
"FontList.ipynb": FONT_LIST_ITEMS,
"ArtList.ipynb": ART_LIST_ITEMS,
os.path.join(
"art",
"art_param.py"): PARAMS_ITEMS}
TEST_NUMBER = len(FILES.keys()) + 1
def print_result(failed=False):
"""
Print final result.
:param failed: failed flag
:type failed: bool
:return: None
"""
message = "Version/Counter tag tests "
if not failed:
print("\n" + message + "passed!")
else:
print("\n" + message + "failed!")
print("Passed : " + str(TEST_NUMBER - Failed) + "/" + str(TEST_NUMBER))
if __name__ == "__main__":
for file_name in FILES.keys():
try:
file_content = codecs.open(
file_name, "r", "utf-8", "ignore").read()
for test_item in FILES[file_name]:
if file_content.find(test_item.format(VERSION)) == -1:
print("Incorrect version tag in " + file_name)
Failed += 1
break
except Exception as e:
Failed += 1
print("Error in " + file_name + "\n" + "Message : " + str(e))
try:
readme_file_content = codecs.open(
"README.md", "r", "utf-8", "ignore").read()
for test_item in README_ITEMS:
if readme_file_content.find(
test_item) == -1:
print("Incorrect counter in " + "README.md")
Failed += 1
break
except Exception as e:
Failed += 1
print("Error in " + file_name + "\n" + "Message : " + str(e))
if Failed == 0:
print_result(False)
sys.exit(0)
else:
print_result(True)
sys.exit(1)
|
#!/usr/bin/env python
import argparse
import json
import os
import random
import numpy as np
import ray
from ray.tune import Trainable, run, sample_from
from ray.tune.schedulers import HyperBandScheduler
class MyTrainableClass(Trainable):
"""Example agent whose learning curve is a random sigmoid.
The dummy hyperparameters "width" and "height" determine the slope and
maximum reward value reached.
"""
def _setup(self, config):
self.timestep = 0
def _train(self):
self.timestep += 1
v = np.tanh(float(self.timestep) / self.config.get("width", 1))
v *= self.config.get("height", 1)
# Here we use `episode_reward_mean`, but you can also report other
# objectives such as loss or accuracy.
return {"episode_reward_mean": v}
def _save(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"timestep": self.timestep}))
return path
def _restore(self, checkpoint_path):
with open(checkpoint_path) as f:
self.timestep = json.loads(f.read())["timestep"]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init(num_cpus=4 if args.smoke_test else None)
# Hyperband early stopping, configured with `episode_reward_mean` as the
# objective and `training_iteration` as the time unit,
# which is automatically filled by Tune.
hyperband = HyperBandScheduler(
time_attr="training_iteration",
metric="episode_reward_mean",
mode="max",
max_t=200)
run(MyTrainableClass,
name="hyperband_test",
num_samples=20,
stop={"training_iteration": 1 if args.smoke_test else 99999},
config={
"width": sample_from(lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random()))
},
scheduler=hyperband,
fail_fast=True)
|
let utility ={}
utility.types = {
media: ["mp4", "mkv"],
archives: ['zip', '7z', 'rar', 'tar', 'gz', 'ar', 'iso', "xz"],
documents: ['docx', 'doc', 'pdf', 'xlsx', 'xls', 'odt', 'ods', 'odp', 'odg', 'odf', 'txt', 'ps', 'tex'],
app: ['exe', 'dmg', 'pkg', "deb"]
}
module.exports = utility;
|
import sys
from antlr4 import FileStream, CommonTokenStream, ParseTreeWalker
from antlr_py.ChatLexer import ChatLexer
from antlr_py.ChatParser import ChatParser
from antlr_py.listeners import HtmlChatListener
def main(argv):
input_ = FileStream(argv[1])
lexer = ChatLexer(input_)
parser = ChatParser(CommonTokenStream(lexer))
tree = parser.chat()
with open("output.html", "w") as output:
chat_listener = HtmlChatListener(output)
walker = ParseTreeWalker()
walker.walk(chat_listener, tree)
if __name__ == '__main__':
main(sys.argv)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import abc
import collections
from lxml import etree as et
import decimal
import datetime
import urllib.request, urllib.parse, urllib.error
from . import utilities
from .base import Base, MalformedPageError, InvalidBaseError, loadable
class MalformedMediaListPageError(MalformedPageError):
pass
class InvalidMediaListError(InvalidBaseError):
pass
class MediaList(Base, collections.Mapping, metaclass=abc.ABCMeta):
__id_attribute = "username"
def __getitem__(self, media):
return self.list[media]
def __contains__(self, media):
return media in self.list
def __len__(self):
return len(self.list)
def __iter__(self):
for media in self.list:
yield media
def __init__(self, session, user_name):
super(MediaList, self).__init__(session)
self.username = user_name
if not isinstance(self.username, str) or len(self.username) < 1:
raise InvalidMediaListError(self.username)
self._list = None
self._stats = None
# subclasses must define a list type, ala "anime" or "manga"
@abc.abstractmethod
def type(self):
pass
# a list verb ala "watch", "read", etc
@abc.abstractmethod
def verb(self):
pass
# a list with status ints as indices and status texts as values.
@property
def user_status_terms(self):
statuses = collections.defaultdict(lambda: 'Unknown')
statuses[1] = self.verb.capitalize() + 'ing'
statuses[2] = 'Completed'
statuses[3] = 'On-Hold'
statuses[4] = 'Dropped'
statuses[6] = 'Plan to ' + self.verb.capitalize()
return statuses
def parse_entry_media_attributes(self, soup):
"""
Args:
soup: a lxml.html.HtmlElement containing a row from the current media list
Return a dict of attributes of the media the row is about.
"""
row_info = {}
try:
start = utilities.parse_profile_date(soup.find('.//series_start').text)
except ValueError:
start = None
except:
if not self.session.suppress_parse_exceptions:
raise
if start is not None:
try:
row_info['aired'] = (start, utilities.parse_profile_date(soup.find('.//series_end').text))
except ValueError:
row_info['aired'] = (start, None)
except:
if not self.session.suppress_parse_exceptions:
raise
# look up the given media type's status terms.
status_terms = getattr(self.session, self.type)(1)._status_terms
try:
row_info['id'] = int(soup.find('.//series_' + self.type + 'db_id').text)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
row_info['title'] = soup.find('.//series_title').text
except:
if not self.session.suppress_parse_exceptions:
raise
try:
row_info['status'] = status_terms[int(soup.find('.//series_status').text)]
except:
if not self.session.suppress_parse_exceptions:
raise
try:
row_info['picture'] = soup.find('.//series_image').text
except:
if not self.session.suppress_parse_exceptions:
raise
return row_info
def parse_entry(self, soup):
"""
Given:
soup: a lxml.html.HtmlElement containing a row from the current media list
Return a tuple:
(media object, dict of this row's parseable attributes)
"""
# parse the media object first.
media_attrs = self.parse_entry_media_attributes(soup)
media_id = media_attrs['id']
del media_attrs['id']
media = getattr(self.session, self.type)(media_id).set(media_attrs)
entry_info = {}
try:
entry_info['started'] = utilities.parse_profile_date(soup.find('.//my_start_date').text)
except ValueError:
entry_info['started'] = None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
entry_info['finished'] = utilities.parse_profile_date(soup.find('.//my_finish_date').text)
except ValueError:
entry_info['finished'] = None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
entry_info['status'] = self.user_status_terms[int(soup.find('.//my_status').text)]
except:
if not self.session.suppress_parse_exceptions:
raise
try:
entry_info['score'] = int(soup.find('.//my_score').text)
# if user hasn't set a score, set it to None to indicate as such.
if entry_info['score'] == 0:
entry_info['score'] = None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
entry_info['last_updated'] = datetime.datetime.fromtimestamp(int(soup.find('.//my_last_updated').text))
except:
if not self.session.suppress_parse_exceptions:
raise
return media, entry_info
def parse_stats(self, soup):
"""
Given:
soup: a lxml.etree element containing the current media list's stats
Return a dict of this media list's stats.
"""
stats = {}
for row in soup.getchildren():
try:
key = row.tag.replace('user_', '')
if key == 'id':
stats[key] = int(row.text)
elif key == 'name':
stats[key] = row.text
elif key == self.verb + 'ing':
try:
stats[key] = int(row.text)
except ValueError:
stats[key] = 0
elif key == 'completed':
try:
stats[key] = int(row.text)
except ValueError:
stats[key] = 0
elif key == 'onhold':
try:
stats['on_hold'] = int(row.text)
except ValueError:
stats[key] = 0
elif key == 'dropped':
try:
stats[key] = int(row.text)
except ValueError:
stats[key] = 0
elif key == 'planto' + self.verb:
try:
stats['plan_to_' + self.verb] = int(row.text)
except ValueError:
stats[key] = 0
# for some reason, MAL doesn't substitute 'read' in for manga for the verb here
elif key == 'days_spent_watching':
try:
stats['days_spent'] = decimal.Decimal(row.text)
except decimal.InvalidOperation:
stats[key] = decimal.Decimal(0)
except:
if not self.session.suppress_parse_exceptions:
raise
return stats
def parse(self, xml):
list_info = {}
list_page = et.fromstring(xml.encode())
primary_elt = list_page
if primary_elt is None:
raise MalformedMediaListPageError(self.username, xml,
message="Could not find root XML element in " + self.type + " list")
bad_username_elt = list_page.find('.//error')
if bad_username_elt is not None:
raise InvalidMediaListError(self.username, message="Invalid username when fetching " + self.type + " list")
stats_elt = list_page.find('.//myinfo')
if stats_elt is None and not utilities.check_if_mal_response_is_empty(list_page):
raise MalformedMediaListPageError(self.username, xml,
message="Could not find stats element in " + self.type + " list")
if utilities.check_if_mal_response_is_empty(list_page):
raise InvalidMediaListError(self.username, message="Empty result set when fetching " + self.type + " list")
list_info['stats'] = self.parse_stats(stats_elt)
list_info['list'] = {}
for row in list_page.findall(".//%s" % self.type):
(media, entry) = self.parse_entry(row)
list_info['list'][media] = entry
return list_info
def load(self):
media_list = self.session.session.get('https://myanimelist.net/malappinfo.php?' + urllib.parse.urlencode(
{'u': self.username, 'status': 'all', 'type': self.type})).text
self.set(self.parse(media_list))
return self
@property
@loadable('load')
def list(self):
return self._list
@property
@loadable('load')
def stats(self):
return self._stats
def section(self, status):
return {k: self.list[k] for k in self.list if self.list[k]['status'] == status}
|
#!/usr/bin/env python
# ========================================================================== #
#
# Create links to the parameter files in the data tree template
#
# ========================================================================== #
from __future__ import print_function
import os
import sys
from optparse import OptionParser
import subprocess
def main():
global options
# parse the command line
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option('--debug',
dest='debug', default=False,
action="store_true",
help='Set debugging on')
parser.add_option('--verbose',
dest='verbose', default=False,
action="store_true",
help='Set verbose debugging on')
parser.add_option('--templateDir',
dest='templateDir', default="/tmp/templateDir",
help='Path of template - i.e. the directory tree template')
parser.add_option('--installDir',
dest='installDir', default="/tmp/installDir",
help='Where the tree will be installed')
(options, args) = parser.parse_args()
if (options.verbose):
options.debug = True
# debug print
if (options.debug):
print("Running script: ", os.path.basename(__file__), file=sys.stderr)
print(" Options:", file=sys.stderr)
print(" Debug: ", options.debug, file=sys.stderr)
print(" Verbose: ", options.verbose, file=sys.stderr)
print(" Template dir: ", options.templateDir, file=sys.stderr)
print(" Install dir: ", options.installDir, file=sys.stderr)
# make the install dir
try:
os.makedirs(options.installDir)
except OSError as exc:
if (options.verbose):
print("WARNING: trying to create install dir", file=sys.stderr)
print(" ", exc, file=sys.stderr)
# Walk the template directory tree
for dirPath, subDirList, fileList in os.walk(options.templateDir):
for fileName in fileList:
if (fileName[0] == '_'):
handleParamFile(dirPath, fileName)
sys.exit(0)
########################################################################
# Handle a parameter file entry
def handleParamFile(dirPath, paramFileName):
if (options.debug):
print("Handling param file, dir, paramFile: ", \
dirPath, ", ", paramFileName, file=sys.stderr)
# compute sub dir
subDir = dirPath[len(options.templateDir):]
# compute install sub dir
installSubDir = options.installDir + subDir
if (options.debug):
print("subDir: ", subDir, file=sys.stderr)
print("installSubDir: ", installSubDir, file=sys.stderr)
# make the install sub dir and go there
try:
os.makedirs(installSubDir)
except OSError as exc:
pass
if (options.debug):
print("os.chdir: ", installSubDir, file=sys.stderr)
os.chdir(installSubDir)
# remove the link if it exists
if (os.path.exists(paramFileName)):
os.remove(paramFileName)
# create the link
paramFilePath = os.path.join(options.templateDir + subDir, paramFileName)
cmd = "ln -s " + paramFilePath
runCommand(cmd)
return
########################################################################
# Run a command in a shell, wait for it to complete
def runCommand(cmd):
if (options.verbose == True):
print("running cmd:",cmd, file=sys.stderr)
try:
retcode = subprocess.call(cmd, shell=True)
if retcode < 0:
print("Child was terminated by signal: ", -retcode, file=sys.stderr)
else:
if (options.verbose == True):
print("Child returned code: ", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
########################################################################
# Run - entry point
if __name__ == "__main__":
main()
|
import React, { Component } from 'react';
import axios from "axios";
import { withRouter } from "react-router-dom" ;
import Footer from './Footer';
import { storage } from "./firebase.js";
import Navbar_Login from "./Navbar_Login"
class AddItems extends Component {
constructor(props) {
super(props)
//Defining the "this" in the functions using .bind method
this.onChangeItemName = this.onChangeItemName.bind(this);
this.onChangeCategory = this.onChangeCategory.bind(this);
this.onChangeDescription = this.onChangeDescription.bind(this);
this.handleChangeImage = this.handleChangeImage.bind(this);
this.onChangetype = this.onChangetype.bind(this);
this.onSubmit = this.onSubmit.bind(this);
this.state = {
itemName: "",
category : "Women",
description: "",
type:"Jacket",
image:null,
url :'',
progress:0,
phone:'',
}
}
//mount the user data so we add the username and phone number
componentDidMount() {
axios.get("http://localhost:3000/addUser/")
.then( res => {
// this.setState({phone :res.data.phone})
var phones=0
for (var i = 0 ; i< res.data.length;i++){
if (res.data[i].username=== localStorage.getItem('username')){
phones=res.data[i].phone
}
}
this.setState({phone: phones})
})
.catch((error) => {
console.log(error);
})
}
//List of category
//Event Handlers:
onChangeItemName(e) {
this.setState({
itemName: e.target.value
});
}
onChangeCategory(e) {
const { value } = e.target
this.setState({
category : value
});
}
onChangetype(e){
const { value } = e.target
this.setState({
type: value
});
}
onChangeDescription(e) {
this.setState({
description: e.target.value
});
}
// it addes the values of the input fileds in the states so we add the image from fire base
handleChangeImage(e) {
if (e.target.files[0]) {
this.setState({
image: e.target.files[0]
})
}
}
// it handles the upload of the picture in the firbase
handleUpload (e) {
e.preventDefault();
var uploadTask = storage.ref(`images/${this.state.image.name}`).put(this.state.image);
uploadTask.on(
"state_changed",
snapshot => {
var progress = Math.round(
(snapshot.bytesTransferred / snapshot.totalBytes) * 100
);
this.setState({
progress:progress})
},
error => {
console.log(error);
},
() => {
storage
.ref("images")
.child(this.state.image.name)
.getDownloadURL()
.then(url => {
this.setState({
url : url
})
});
}
);
}
onSubmit(e) {
e.preventDefault();
const item = {
userName:localStorage.getItem('username'),
itemName: this.state.itemName,
category: this.state.category,
phonenumber:this.state.phone,
description: this.state.description,
type:this.state.type,
image: this.state.url,
}
console.log(item);
axios.post("http://localhost:3000/addItems/add", item)
.then(res => console.log(res.data));
window.location = '/ItemsList2'
}
render() {
return (
<div>
<Navbar_Login/>
<br />
<div className = "container">
<form className="text-center border border-light p-9" action="#!" >
<h3> "Only by giving are you able to receive more than you already have." -Jim Rohn </h3>
<p className="h4 mb-4">Donate Your Item</p>
<div className="col">
<label>Item Name</label>
<input
required="true"
type = "text"
className = "form-control"
value = {this.state.itemName}
onChange = {this.onChangeItemName}
text-align = "center"
placeholder = "Insert Item Name"/>
</div>
<br />
<div className="col">
<label>Select Category </label>
<select
ref = "userInput"
required="true"
className = "form-control"
value = {this.state.category}
onChange = {this.onChangeCategory}
>
<option value = "Women">Women</option>
<option value = "Men">Men</option>
<option value = "Kids">Kids</option>
</select>
</div>
<br />
<div className = "col">
<label>Select Type </label>
<select
ref = "userInput"
required="true"
className = "form-control"
value = {this.state.type}
onChange = {this.onChangetype}
>
<option value = "Shoes">Shoes</option>
<option value = "Dress">Dress</option>
<option value = "Jacket">Jacket</option>
<option value = "Blouse">Blouse</option>
<option value = "Gloves">Gloves</option>
<option value = "Hat">Hat</option>
<option value = "Scarf">Scarf</option>
</select>
</div>
<br />
<div className = "col">
<label>Description </label>
<input
type = "text"
required="true"
className = "form-control"
value = {this.state.description}
onChange = {this.onChangeDescription}
placeholder = "Please insert a description of your item and add its current condition"/>
</div>
<br />
<div className = "col">
<label>Image</label>
<div id='image' > <img src={this.state.url || "http://via.placeholder.com/50*50"}
/></div>
<input type="file" onChange={this.handleChangeImage.bind(this)} className="btn btn-deep-orange darken-4" />
<button onClick={this.handleUpload.bind(this)} className="btn btn-deep-orange darken-4">Upload</button>
</div>
<br />
<br />
<div>
<button type="submit" onClick= {this.onSubmit} className="btn btn-deep-orange darken-4">Submit</button>
</div>
</form>
</div>
<Footer />
</div>
)
}
}
export default withRouter(AddItems)
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkmeshwallshearrate.py,v $
## Language: Python
## Date: $Date: 2005/09/14 09:49:59 $
## Version: $Revision: 1.6 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: full tensor WSR computation was contributed by
## Ulf Shiller & Mehrdad Yousefi, Clemson University
## Mehdi Najafi, University of Toronto
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
from vmtk import vtkvmtk
import vtk
import sys
from vmtk import pypes
class vmtkMeshWallShearRate(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Mesh = None
self.Surface = None
self.VelocityArrayName = None
self.WallShearRateArrayName = 'WallShearRate'
self.ConvergenceTolerance = 1E-6
self.QuadratureOrder = 3
self.UseFullStrainRateTensor = 0
self.SetScriptName('vmtkmeshwallshearrate')
self.SetScriptDoc('compute wall shear rate from a velocity field, producing a surface in output')
self.SetInputMembers([
['Mesh','i','vtkUnstructuredGrid',1,'','the input mesh','vmtkmeshreader'],
['VelocityArrayName','velocityarray','str',1,'',''],
['WallShearRateArrayName','wsrarray','str',1,'',''],
['ConvergenceTolerance','tolerance','float',1,'',''],
['UseFullStrainRateTensor','fulltensor','bool',1,'',''],
['QuadratureOrder','quadratureorder','int',1,'','']
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter']
])
def Execute(self):
if (self.Mesh == None):
self.PrintError('Error: no Mesh.')
wallShearRateFilter = vtkvmtk.vtkvmtkMeshWallShearRate()
wallShearRateFilter.SetInputData(self.Mesh)
wallShearRateFilter.SetVelocityArrayName(self.VelocityArrayName)
wallShearRateFilter.SetWallShearRateArrayName(self.WallShearRateArrayName)
wallShearRateFilter.SetConvergenceTolerance(self.ConvergenceTolerance)
wallShearRateFilter.SetQuadratureOrder(self.QuadratureOrder)
wallShearRateFilter.SetUseFullStrainRateTensor(self.UseFullStrainRateTensor)
wallShearRateFilter.ComputeIndividualPartialDerivativesOn()
wallShearRateFilter.Update()
self.Surface = wallShearRateFilter.GetOutput()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.applications.mobilenet import MobileNet
def reg_mobilenet(input_shape):
base_model = MobileNet(input_shape=input_shape, include_top=False, weights=None)
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.2))
top_model.add(Dense(1, activation='linear'))
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
model.compile(optimizer='adam',
loss='mse',
metrics=['accuracy'])
return model
|
(function(window){function ismdef(n){for(var t=0;t<n.length;t++)if(isdef(n[t])==!1)return!1;return!0}function isdef(v){return v==""?!1:eval("typeof "+v)!=undef}function createjs(n){var t=document.createElement("script");return t.setAttribute("type","text/javascript"),t.setAttribute("src",n),t}function includejs(n){var t=document.getElementsByTagName("head")[0];t?t.appendChild(createjs(n)):document.body.appendChild(createjs(n))}function importJS(n,t,i,r){isdef(t)==!1?(includejs(n),wait_load(t,function(){i&&i(),r&&r()})):r&&r()}function wait_load(n,t){var i=setInterval(function(){isdef(n)&&(clearInterval(i),t())},50)}function registerclick(){var n=0;jQuery(document).mousedown(function(t){var i,r,u;try{if(n==0)n=new Date;else{if(i=new Date,i-n<1e3)return;n=i}ismdef(["window.scupioec","window.scupioec.rec","window.scupioec.rec[0]","window.scupioec.rec[0].c","window.scupioec.rec[0].c.mid"])&&window.scupioec.rec[0].c.mid=="2"&&(r=jQuery(window),u=jQuery(document),window.scupioec.call({mid:window.scupioec.rec[0].c.mid,act:"mc",mc:t.clientX+","+t.clientY+";"+t.screenX+","+t.screenY+";"+t.pageX+","+t.pageY+";"+screen.width+","+screen.height+";"+screen.availWidth+","+screen.availHeight+";"+r.scrollLeft()+","+r.scrollTop()+";"+u.width()+","+u.height()+";1"}))}catch(f){}}),jQuery(document).ready(function(){if(ismdef(["window.scupioec","window.scupioec.rec","window.scupioec.rec[0]","window.scupioec.rec[0].c","window.scupioec.rec[0].c.mid"])&&window.scupioec.rec[0].c.mid=="2"){var n=[],t=(new Date).getTime(),i,r,u,f,e,o;jQuery(document).mousemove(function(n){i=n.clientX,r=n.clientY,u=n.screenX,f=n.screenY,e=n.pageX,o=n.pageY}),jQuery("iframe").each(function(i,r){var u=t+"sc"+i;jQuery(r).addClass(u),n[i]=!1,jQuery("."+t+"sc"+i).mouseover(function(){n[i]=!0}).mouseout(function(){n[i]=!1})}),jQuery(window).bind("blur",function(){for(frame=0;frame<n.length;frame++)if(n[frame]){var t=jQuery(window),s=jQuery(document);window.scupioec.call({mid:window.scupioec.rec[0].c.mid,act:"mc",mc:i+","+r+";"+u+","+f+";"+e+","+o+";"+screen.width+","+screen.height+";"+screen.availWidth+","+screen.availHeight+";"+t.scrollLeft()+","+t.scrollTop()+";"+s.width()+","+s.height()+";2;"+frame+";"+jQuery("iframe").get(frame).id+";"+jQuery("iframe").get(frame).src})}})}})}function now(){return+new Date}function ck(){for(var t="",u=document.cookie.split(";"),n,i=0;i<u.length;i++){if(n=jQuery.trim(u[i]),n.indexOf("SID=")==0&&(t=t+";"+n),n.indexOf("__utmz=")==0){var f=n.match(/\.utmcsr\=([^|]+)/),e=n.match(/utmccn\=([^|]+)/),o=n.match(/utmcmd\=([^|]+)/),r="";r+=f!=null?f[1]+"|":"|",r+=e!=null?e[1]+"|":"|",o!=null&&(r+=o[1]),t=t+";ga="+r}n.indexOf("ScupioOrETU=")==0&&(t=t+";"+n)}return t}function ShouldQU(){for(var t=document.cookie.split(";"),i,n=0;n<t.length;n++)if(i=jQuery.trim(t[n]),i.indexOf("eupkg=full")==0)return!1;return!0}function ifOldIE(){try{if($.browser.msie){var n=parseFloat($.browser.version);if(n<9)return!0}}catch(t){}return!1}function GetCurrentURL(){try{var n=top.location.href;if(n&&n!="about:blank")return n}catch(t){}try{return location.href}catch(t){}return""}function GetReferrerURL(){try{var n=top.document.referrer;if(n)return n}catch(t){}return""}function CheckIfQU(){var r,u,n,i,s,f,h;if(ifOldIE())return"0";if(r=document.location.href,u=getParam(r,"ref"),u.length<=5)return"-2";n=[],n.push(u),n.push(r),n.push(GetCurrentURL()),n.push(GetReferrerURL());var t="bwrectarea",e="bwrectitem",o=jQuery("#"+t);for(o.length==0&&(jQuery("body").append('<style type="text/css">.bwrect A:link {text-decoration: none;color:#FF0000;} .bwrect A:visited {text-decoration: none;color: #0000FF;}<\/style><div id="'+t+'" class="bwrect" style="display:none"><\/div>'),o=jQuery("#"+t)),i=0;i<n.length;i++)if(s=n[i],jQuery("#"+t).html('<a id="'+e+'" href="'+s+'">test<\/a>'),f=jQuery("#"+e).css("color"),h=f=="rgb(0, 0, 255)"||f=="#0000ff",h)return"0";return"-1"}function getParam(n,t){try{var i=RegExp("[?|&]"+t+"=(.+?)(&|$)").exec(n);if(i)return decodeURIComponent(i[1])}catch(r){}return""}function bwRecEn(){for(var i="BWRecEn=",r="",u=document.cookie.split(";"),t,n=0;n<u.length;n++)t=jQuery.trim(u[n]),t.indexOf(i)==0&&(r=t.substring(i.length,t.length));return unescape(r)}function setCookie(n,t,i,r,u,f){n&&(n=n+"="+escape(t)+";",i=i?"expires="+new Date((new Date).getTime()+i*864e5).toGMTString()+";":"",r=r?"path="+r+";":"path=/;",u=u?"domain="+u+";":"",f=f?"secure=true;":"",document.cookie=[n,i,r,u,f].join(""))}function initscupioec(){return window.scupioec||(window.scupioec={vt:now()+"_"+Math.floor(Math.random()*1e4),u:GetCurrentURL(),r:GetReferrerURL(),msi:ck(),bw:bwRecEn(),rec:[],SetCookie:function(n,t,i,r,u,f){setCookie(n,t,i,r,u,f)},flush:function(){for(window.__scupio_r=window.__scupio_r||[];window.__scupio_r.length;)window.scupioec.call(window.__scupio_r.shift())},call:function(n){var i=initscupioec(),t={};t.c=n,t.n=null,i.rec.push(t),callaspx(i)}}),window.scupioec}function scupioec(){var executed="executed",ec=initscupioec(),srcregex=new RegExp("rec.scupio.com/recweb/js/rec([^.]*).js","i"),scripts=jQuery("script"),i,scriptnode,recnode;if(scripts!=null)for(i=0;i<scripts.length;i++)if(srcregex.test(scripts[i].src)&&(scriptnode=jQuery(scripts[i]),!(scripts[i].className.indexOf(executed)>-1)&&(scriptnode.addClass(executed),scriptnode.html().length>0)))return eval("var config="+scriptnode.html()+";"),recnode={},recnode.c=config,recnode.n=scriptnode,ec.rec.push(recnode),ec;return null}function DoJqueryNoConflict(){if(typeof window.$.blockUI!="function"){var n=window.$;jQuery.noConflict(),typeof window.$=="undefined"?window.$=n:typeof window.$.BlockUI=="function"&&(jQuery=window.$)}}function jqueryReady(n){var t="jQuery";importJS(schemevar+"code.jquery.com/jquery-1.7.2.min.js",t,function(){DoJqueryNoConflict()},function(){importJS(schemevar+"rec.scupio.com/recweb/js/jquery.js",t,function(){DoJqueryNoConflict()},n)})}function hasstr(n){return typeof n=="string"?n.length>0:!1}function umallfix(ecobj){var jscontents,i,idxbegin,dstr,ritemtxt,ii;if(document.location.href.indexOf("u-mall.com.tw")!=-1&&(jscontents=jQuery("script"),jscontents!=null))for(i=0;i<jscontents.length;i++){var scriptnode=jQuery(jscontents[i]),txt=scriptnode.html(),idxend=txt.indexOf('jQuery("#ca_top_board").tmpl(data.adv).appendTo("#ca_top_board_main");');if(idxend!=-1&&(idxbegin=txt.indexOf("var data"),dstr=txt.substring(idxbegin,idxend),eval(dstr),eval("typeof data")=="object")){for(ritemtxt="",ii=0;ii<data.adv.length;ii++)ritemtxt+=data.adv[ii].topSaleCode+",";ecobj.ritem=ritemtxt;return}}}function obdesignbuyfix(n){var r,t,i;if(document.location.href.indexOf("obdesign.com.tw")!=-1){r=[];try{if(t=jQuery("#Label訂單編號1").text(),t==""||t==null||t==undefined)return;r.push("order:"+t),i="",jQuery.each(jQuery("#shop_list > tbody").find("tr"),function(n){var f,t;if(n>0){var e=jQuery(this).find("td").eq(0).find("img").attr("src"),r=e.split(/catalog\/[^/]+\/([^\.]+)\.jpg/ig),o=r[1].substring(0,r[1].length-1),s=jQuery(this).find("td").eq(4).text(),u=jQuery(this).find("td").eq(5).text();i+=o+","+u+","+s+",",f=jQuery(this).find("td").eq(6).text(),t=parseInt(f)-parseInt(u),t<0&&(i+="__discount,"+t+",1,")}});var e=jQuery("#Label商品金額總計1").text(),u=jQuery("#shop_list > tfoot").find("tr").eq(1).find("td").eq(1).text(),f=jQuery("#Label購物金額總計1").text();i+=u==""||u==null?"__sum,"+f+",1,":"__shipping,"+u+",1,__sum,"+f+",1,",r.push("bitem:"+i),n.pmitem=r.join("^")}catch(o){}return}}function obdesigncartfix(n){if(document.location.href.indexOf("obdesign.com.tw")!=-1){var t=[];try{jQuery.each(jQuery("#shop_list > tbody").find("tr"),function(){var f=jQuery(this).find("td").eq(1).find("a").attr("href"),i=f.split(/product.aspx\?seriesID=([^&]+)/ig),n,r,u;i.length>1&&(n=i[1],n!=""&&n!=null&&n!=undefined&&(n.match(/\&no=/)||(r=jQuery(this).find("td").eq(4).find("select option:selected").text(),u=jQuery(this).find("td").eq(5).text(),t.push(n+","+u+","+r))))}),n.pmitem="cart:"+t.join(",")}catch(i){}return}}function callaspx(ecobj){var datafunc,func,datastr,qustr;if(ecobj!=null){var ec_rec_index=ecobj.rec.length-1,ec_rec=ecobj.rec[ec_rec_index],ec_config=ec_rec.c,ec_node=ec_rec.n;if(ec_config.vidx=ec_rec_index,ec_config.vt=ecobj.vt,ec_config.mid!="none"){if(typeof ec_config.mid!=undef){if(!(disableLocalStoragePublisher[ec_config.mid]===!0)&&!hasstr(ecobj.ubi)){ecobj.ubi=getID();try{getIDFromIframe(setlsid,1e3)}catch(ignore){}}typeof ec_config.pid!=undef&&(ec_config.mid==48&&ec_config.pid.indexOf("cat")==0?umallfix(ec_config):ec_config.mid==5659&&(ec_config.pid=="transaction"?obdesignbuyfix(ec_config):ec_config.pid=="action"&&obdesigncartfix(ec_config)))}if(typeof ec_config.qu!=undef){if(ShouldQU()==!1)return;ec_config.qu=="0:0"&&(ec_config.qu="0:"+CheckIfQU())}hasstr(ecobj.u)&&(ec_config.u=ecobj.u,ec_config.u.length>1e3&&(ec_config.u=ec_config.u.substr(0,1e3)+"__T"),hasstr(ec_config.pid)&&ec_config.pid=="buy"&&delete ec_config.u),hasstr(ecobj.r)&&(ec_config.r=ecobj.r,ec_config.r.length>1e3&&(ec_config.r=ec_config.r.substr(0,1e3)+"__T")),hasstr(ecobj.msi)&&(ec_config.msi=ecobj.msi),hasstr(ecobj.ubi)&&(ec_config.ubi=ecobj.ubi),datafunc=function(a){eval("var data="+a+";"),typeof data!=undef&&data.length>0&&jQuery(ec_node).after(data)},typeof ec_config.success=="function"&&(datafunc=ec_config.success,delete ec_config.success),func=function(a,b){function IsVisited(n){jQuery("#"+taid).html('<a id="'+tiid+'" href="'+n+'">test<\/a>');var t=jQuery("#"+tiid).css("color");return t=="rgb(0, 0, 255)"||t=="#0000ff"}var taid="bwrectarea",tiid="bwrectitem",tresult,qrvalue,tarea,qurl,qset,qrstr,qr;if(datafunc(a),tresult={},eval("var qurls="+b+";"),typeof qurls!=undef&&(qrvalue=qurls.r,typeof qrvalue!=undef)){qurls.r=undef,tarea=jQuery("#"+taid),tarea.length==0&&(jQuery("body").append('<style type="text/css">.bwrect A:link {text-decoration: none;color:#FF0000;} .bwrect A:visited {text-decoration: none;color: #0000FF;}<\/style><div id="'+taid+'" class="bwrect" style="display:none"><\/div>'),tarea=jQuery("#"+taid));for(qurl in qurls)qset=qurls[qurl],tresult[qset]!=1&&IsVisited(qurl)&&(tresult[qset]=1);qrstr="";for(qr in tresult)qrstr+=qr+",";window.scupioec.call({qu:qrvalue+":"+qrstr})}},typeof ecobj.layout!=undef&&ecobj.layout=="none"&&(func=function(){}),typeof ec_config.carts=="string"&&typeof ec_config.pid==undef&&ec_config.carts.length==0&&(ec_config.pid="cart");var jshead=schemevar+"rec.scupio.com/recweb/rec.aspx?",jssrc=jshead+jQuery.param(ec_config),testlen=2e3;jssrc.length>testlen&&(hasstr(ec_config.r)&&(ec_config.r="__T",jssrc=jshead+jQuery.param(ec_config)),jssrc.length>testlen&&(hasstr(ec_config.u)&&(ec_config.u="__T"),jssrc=jshead+jQuery.param(ec_config))),datastr="window.scupioec.rec["+ec_rec_index+"].data",qustr="window.scupioec.rec["+ec_rec_index+"].qurls",importJS(jssrc,datastr,null,function(){func(datastr,qustr)})}}}function getID(){var n,t="";try{n=window.localStorage,typeof n=="object"&&typeof n.getItem=="function"&&(t=n.getItem("sclsid"),typeof t!="string"&&(t=""))}catch(i){}return t}function getIDFromIframe(n,t){function v(n,t,i){t.addEventListener?t.addEventListener(n,i,!1):t.attachEvent&&t.attachEvent("on"+n,i)}function y(n,t,i){t.removeEventListener?t.removeEventListener(n,i,!1):t.detachEvent&&t.detachEvent("on"+n,i)}function f(t){o||(o=!0,n(t),clearTimeout(s))}function h(n){if(n&&typeof n.origin=="string"&&n.origin.indexOf("//img.scupio.com")>=0){var t=n.data||n.message;y("message",window,h),f(t)}}var o=!1,s;if(typeof window.postMessage=="function"&&typeof window.localStorage=="object"){v("message",window,h);var c=window.document,r=c.createElement("iframe"),i=r.style,l="0",u="1",a="px",e;r.src="//img.scupio.com/test/lsid/ls.html?v=0.1",r.width=u,r.height=u,r.frameborder=l,r.allowtransparency="true",r.scrolling="no",i.position="absolute",i.width=u+a,i.height=u+a,i.bottom=i.left=i.margin=i.padding=i.border=l,i.overflow="hidden",e=c.getElementsByTagName("body")[0],e&&e.appendChild(r)}else f(null);s=setTimeout(f,t)}function setlsid(n){var t;try{t=window.localStorage,typeof t=="object"&&typeof t.setItem=="function"&&typeof n=="string"&&n!==""&&t.setItem("sclsid",n)}catch(i){}}var undef="undefined",httpvar="http://",httpsvar="https://",autoschemevar="//",schemevar="http://",disableLocalStoragePublisher;document.location.href.indexOf(httpvar)!=-1?schemevar=autoschemevar:document.location.href.indexOf(httpsvar)!=-1&&(schemevar=autoschemevar),disableLocalStoragePublisher={49:!0,63:!0,75:!0},jqueryReady(function(){registerclick();var n=scupioec();n!=null?(callaspx(n),window.scupioec.flush()):jQuery(document).ready(function(){callaspx(scupioec()),window.scupioec.flush()})})})(window)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# AZURE CLI RBAC TEST DEFINITIONS
import json
import os
import tempfile
import time
import datetime
import mock
import unittest
from azure_devtools.scenario_tests import AllowLargeResponse, record_only
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.testsdk import ScenarioTest, LiveScenarioTest, ResourceGroupPreparer, KeyVaultPreparer
from ..util import retry
class RoleScenarioTest(ScenarioTest):
def run_under_service_principal(self):
account_info = self.cmd('account show').get_output_in_json()
return account_info['user']['type'] == 'servicePrincipal'
class RbacSPSecretScenarioTest(RoleScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_create_rbac_sp_minimal')
def test_create_for_rbac_with_secret_no_assignment(self, resource_group):
self.kwargs['display_name'] = resource_group
try:
result = self.cmd('ad sp create-for-rbac -n {display_name} --skip-assignment',
checks=self.check('displayName', '{display_name}')).get_output_in_json()
self.kwargs['app_id'] = result['appId']
finally:
self.cmd('ad app delete --id {app_id}')
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_create_rbac_sp_with_password')
def test_create_for_rbac_with_secret_with_assignment(self, resource_group):
subscription_id = self.get_subscription_id()
self.kwargs.update({
'sub': subscription_id,
'scope': '/subscriptions/{}'.format(subscription_id),
'display_name': resource_group
})
try:
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
result = self.cmd('ad sp create-for-rbac -n {display_name} --scopes {scope} {scope}/resourceGroups/{rg}',
checks=self.check('displayName', '{display_name}')).get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.cmd('role assignment list --assignee {app_id} --scope {scope}',
checks=self.check("length([])", 1))
self.cmd('role assignment list --assignee {app_id} -g {rg}',
checks=self.check("length([])", 1))
self.cmd('role assignment delete --assignee {app_id} -g {rg}',
checks=self.is_empty())
self.cmd('role assignment delete --assignee {app_id}',
checks=self.is_empty())
finally:
self.cmd('ad app delete --id {app_id}')
class RbacSPCertScenarioTest(RoleScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_create_rbac_sp_with_cert')
def test_create_for_rbac_with_cert_with_assignment(self, resource_group):
subscription_id = self.get_subscription_id()
self.kwargs.update({
'sub': subscription_id,
'scope': '/subscriptions/{}'.format(subscription_id),
'display_name': resource_group
})
try:
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
result = self.cmd('ad sp create-for-rbac -n {display_name} --scopes {scope} {scope}/resourceGroups/{rg} --create-cert',
checks=self.check('displayName', '{display_name}')).get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.assertTrue(result['fileWithCertAndPrivateKey'].endswith('.pem'))
os.remove(result['fileWithCertAndPrivateKey'])
result = self.cmd('ad sp credential reset -n {app_id} --create-cert',
checks=self.check('name', '{app_id}')).get_output_in_json()
self.assertTrue(result['fileWithCertAndPrivateKey'].endswith('.pem'))
os.remove(result['fileWithCertAndPrivateKey'])
finally:
self.cmd('ad app delete --id {app_id}',
checks=self.is_empty())
class RbacSPKeyVaultScenarioTest2(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sp_with_kv_new_cert')
@KeyVaultPreparer(name_prefix='test-rbac-new-kv')
def test_create_for_rbac_with_new_kv_cert(self, resource_group, key_vault):
KeyVaultErrorException = get_sdk(self.cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_vault_error#KeyVaultErrorException')
subscription_id = self.get_subscription_id()
self.kwargs.update({
'display_name': resource_group,
'sub': subscription_id,
'scope': '/subscriptions/{}'.format(subscription_id),
'cert': 'cert1',
'kv': key_vault
})
time.sleep(5) # to avoid 504(too many requests) on a newly created vault
try:
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
try:
result = self.cmd('ad sp create-for-rbac --scopes {scope}/resourceGroups/{rg} --create-cert '
'--keyvault {kv} --cert {cert} -n {display_name}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
except KeyVaultErrorException:
if not self.is_live and not self.in_recording:
pass # temporary workaround for keyvault challenge handling was ignored under playback
else:
raise
cer1 = self.cmd('keyvault certificate show --vault-name {kv} -n {cert}').get_output_in_json()['cer']
self.cmd('ad sp credential reset -n {app_id} --create-cert --keyvault {kv} --cert {cert}')
cer2 = self.cmd('keyvault certificate show --vault-name {kv} -n {cert}').get_output_in_json()['cer']
self.assertTrue(cer1 != cer2)
finally:
self.cmd('ad app delete --id {app_id}')
class RbacSPKeyVaultScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sp_with_kv_existing_cert')
@KeyVaultPreparer(name_prefix='test-rbac-exist-kv')
def test_create_for_rbac_with_existing_kv_cert(self, resource_group, key_vault):
import time
subscription_id = self.get_subscription_id()
self.kwargs.update({
'display_name': resource_group,
'display_name2': resource_group + '2',
'sub': subscription_id,
'scope': '/subscriptions/{}'.format(subscription_id),
'cert': 'cert1',
'kv': key_vault
})
time.sleep(5) # to avoid 504(too many requests) on a newly created vault
# test with valid length cert
try:
self.kwargs['policy'] = self.cmd('keyvault certificate get-default-policy').get_output_in_json()
self.cmd('keyvault certificate create --vault-name {kv} -n {cert} -p "{policy}" --validity 24')
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
result = self.cmd('ad sp create-for-rbac -n {display_name} --keyvault {kv} '
'--cert {cert} --scopes {scope}/resourceGroups/{rg}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.cmd('ad sp credential reset -n {app_id} --keyvault {kv} --cert {cert}')
finally:
try:
self.cmd('ad app delete --id {app_id}')
except:
# Mute the exception, otherwise the exception thrown in the `try` clause will be hidden
pass
# test with cert that has too short a validity
try:
self.cmd('keyvault certificate create --vault-name {kv} -n {cert} -p "{policy}" --validity 6')
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
result = self.cmd('ad sp create-for-rbac --scopes {scope}/resourceGroups/{rg} --keyvault {kv} '
'--cert {cert} -n {display_name2}').get_output_in_json()
self.kwargs['app_id2'] = result['appId']
self.cmd('ad sp credential reset -n {app_id2} --keyvault {kv} --cert {cert}')
finally:
try:
self.cmd('ad app delete --id {app_id2}')
except:
pass
class RoleCreateScenarioTest(RoleScenarioTest):
@AllowLargeResponse()
def test_role_create_scenario(self):
subscription_id = self.get_subscription_id()
role_name = self.create_random_name('cli-test-role', 20)
template = {
"Name": role_name,
"Description": "Can monitor compute, network and storage, and restart virtual machines",
"Actions": ["Microsoft.Compute/*/read",
"Microsoft.Compute/virtualMachines/start/action",
"Microsoft.Compute/virtualMachines/restart/action",
"Microsoft.Network/*/read",
"Microsoft.Storage/*/read",
"Microsoft.Authorization/*/read",
"Microsoft.Resources/subscriptions/resourceGroups/read",
"Microsoft.Resources/subscriptions/resourceGroups/resources/read",
"Microsoft.Insights/alertRules/*"],
"AssignableScopes": ["/subscriptions/{}".format(subscription_id)]
}
_, temp_file = tempfile.mkstemp()
with open(temp_file, 'w') as f:
json.dump(template, f)
self.kwargs.update({
'sub': subscription_id,
'role': role_name,
'template': temp_file.replace('\\', '\\\\')
})
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
retry(lambda: self.cmd('role definition create --role-definition {template}', checks=[
self.check('permissions[0].actions[0]', 'Microsoft.Compute/*/read')]))
retry(lambda: self.cmd('role definition list -n {role}', checks=self.check('[0].roleName', '{role}')))
# verify we can update
template['Actions'].append('Microsoft.Support/*')
with open(temp_file, 'w') as f:
json.dump(template, f)
retry(lambda: self.cmd('role definition update --role-definition {template}',
checks=self.check('permissions[0].actions[-1]', 'Microsoft.Support/*')))
retry(lambda: self.cmd('role definition delete -n {role}', checks=self.is_empty()))
class RoleAssignmentScenarioTest(RoleScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_role_assign')
@AllowLargeResponse()
def test_role_assignment_e2e(self, resource_group):
if self.run_under_service_principal():
return # this test delete users which are beyond a SP's capacity, so quit...
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
user = self.create_random_name('testuser', 15)
self.kwargs.update({
'upn': user + '@azuresdkteam.onmicrosoft.com',
'nsg': 'nsg1'
})
self.cmd('ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}')
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
try:
self.cmd('network nsg create -n {nsg} -g {rg}')
result = self.cmd('network nsg show -n {nsg} -g {rg}').get_output_in_json()
self.kwargs['nsg_id'] = result['id']
# test role assignments on a resource group
self.cmd('role assignment create --assignee {upn} --role contributor -g {rg}')
self.cmd('role assignment list -g {rg}',
checks=self.check("length([])", 1))
self.cmd('role assignment list --assignee {upn} --role contributor -g {rg}', checks=[
self.check("length([])", 1),
self.check("[0].properties.principalName", self.kwargs["upn"])
])
# test couple of more general filters
result = self.cmd('role assignment list -g {rg} --include-inherited').get_output_in_json()
self.assertTrue(len(result) >= 1)
result = self.cmd('role assignment list --all').get_output_in_json()
self.assertTrue(len(result) >= 1)
self.cmd('role assignment delete --assignee {upn} --role contributor -g {rg}')
self.cmd('role assignment list -g {rg}',
checks=self.is_empty())
# test role assignments on a resource
self.cmd('role assignment create --assignee {upn} --role contributor --scope {nsg_id}')
self.cmd('role assignment list --assignee {upn} --role contributor --scope {nsg_id}',
checks=self.check("length([])", 1))
self.cmd('role assignment delete --assignee {upn} --role contributor --scope {nsg_id}')
self.cmd('role assignment list --scope {nsg_id}',
checks=self.is_empty())
# test role assignment on subscription level
self.cmd('role assignment create --assignee {upn} --role reader')
self.cmd('role assignment list --assignee {upn} --role reader',
checks=self.check("length([])", 1))
self.cmd('role assignment list --assignee {upn}',
checks=self.check("length([])", 1))
self.cmd('role assignment delete --assignee {upn} --role reader')
finally:
self.cmd('ad user delete --upn-or-object-id {upn}')
class RoleAssignmentListScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_assignments_for_coadmins')
@AllowLargeResponse()
def test_assignments_for_co_admins(self, resource_group):
result = self.cmd('role assignment list --include-classic-administrator').get_output_in_json()
self.assertTrue([x for x in result if x['properties']['roleDefinitionName'] in ['CoAdministrator', 'AccountAdministrator']])
self.cmd('role assignment list -g {}'.format(resource_group), checks=[
self.check("length([])", 0)
])
result = self.cmd('role assignment list -g {} --include-classic-administrator'.format(resource_group)).get_output_in_json()
self.assertTrue([x for x in result if x['properties']['roleDefinitionName'] in ['CoAdministrator', 'AccountAdministrator']])
if __name__ == '__main__':
unittest.main()
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
# model settings
model = dict(
type='FSAF',
bbox_head=dict(
type='FSAFHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
reg_decoded_bbox=True,
# Only anchor-free branch is implemented. The anchor generator only
# generates 1 anchor at each feature point, as a substitute of the
# grid of features.
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(_delete_=True, type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
_delete_=True,
type='IoULoss',
eps=1e-6,
loss_weight=1.0,
reduction='none'),
))
# training and testing settings
train_cfg = dict(
assigner=dict(
_delete_=True,
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=10, norm_type=2))
|
/* memoizer :: (Any -> Any) -> Any -> Any
@arg: an unary function
@return: the respective memoized function
*/
const memoizer = (f) => {
let cache = {}
return function (n) {
if (cache[n] != undefined) {
return cache[n]
} else {
let res = f(n)
cache[n] = res
return res
}
}
}
const compose = (...fns) => x =>
fns.reduceRight(
(v, f) => f(v),
x
)
export {
memoizer,
compose
}
|
numero = int(input('Digite um número:'))
ant = numero - 1
sus = numero + 1
print('O antessesor de {} é o {} e o sucessor é {}.'.format(numero, ant, sus))
|
"""
Config options.
"""
from .lib import arg, o
def help():
"""
Define options.
"""
h = arg
return [
h("bin min size =len**b", b=.5),
h("what columns to while tree building", c=["x", "y"]),
h("use at most 'd' rows for distance calcs", d=256),
h("merge ranges whose scores differ by less that F", e=0.05),
h("separation of poles (f=1 means 'max distance')", f=.9),
h("decision list: minimum leaf size", M=10),
h("decision list: maximum height", H=4),
h("decision list: ratio of negative examples", N=4),
h("coefficient for distance", p=2),
h("random number seed", r=1),
h("tree leaves must be at least n**s in size", s=0.5),
h("stats: Cliff's Delta 'dull'", Sdull=[.147, .33, .474]),
h("stats: Coehn 'd'", Scohen=0.2),
h("stats: number of boostrap samples", Sb=500),
h("stats: bootstrap confidences", Sconf=0.01),
h("training data (arff format", train="train.csv"),
h("testing data (csv format)", test="test.csv"),
h("List all tests", L=False),
h("Run all tests", T=False),
h("Verbose mode", V=False),
h("Run just the tests with names matching 'S'", t="")
]
my = o(**{k: d for k, d, _ in help()})
|
# -*- coding: utf-8 -*-
'''
@author: look
@copyright: 1999-2020 Alibaba.com. All rights reserved.
@license: Apache Software License 2.0
@contact: 390125133@qq.com
'''
'''logcat监控器
'''
import os,sys,csv
import re
import time
BaseDir=os.path.dirname(__file__)
sys.path.append(os.path.join(BaseDir,'../..'))
from mobileperf.android.tools.androiddevice import AndroidDevice
from mobileperf.common.basemonitor import Monitor
from mobileperf.common.utils import TimeUtils,FileUtils
from mobileperf.common.utils import ms2s
from mobileperf.common.log import logger
from mobileperf.android.globaldata import RuntimeData
class LogcatMonitor(Monitor):
'''logcat监控器
'''
def __init__(self, device_id, package=None, **regx_config):
'''构造器
:param str device_id: 设备id
:param list package : 监控的进程列表,列表为空时,监控所有进程
:param dict regx_config : 日志匹配配置项{conf_id = regx},如:AutoMonitor=ur'AutoMonitor.*:(.*), cost=(\d+)'
'''
super(LogcatMonitor, self).__init__(**regx_config)
self.package = package # 监控的进程列表
self.device_id = device_id
self.device = AndroidDevice(device_id) # 设备
self.running = False # logcat监控器的启动状态(启动/结束)
self.launchtime = LaunchTime(self.device_id, self.package)
self.exception_log_list = []
self.start_time = None
self.append_log_line_num = 0
self.file_log_line_num = 0
self.log_file_create_time = None
def start(self,start_time):
'''启动logcat日志监控器
'''
self.start_time = start_time
# 注册启动日志处理回调函数为handle_lauchtime
self.add_log_handle(self.launchtime.handle_launchtime)
logger.debug("logcatmonitor start...")
# 捕获所有进程日志
# https://developer.android.com/studio/command-line/logcat #alternativeBuffers
# 默认缓冲区 main system crash,输出全部缓冲区
if not self.running:
self.device.adb.start_logcat(RuntimeData.package_save_path, [], ' -b all')
time.sleep(1)
self.running = True
def stop(self):
'''结束logcat日志监控器
'''
logger.debug("logcat monitor: stop...")
self.remove_log_handle(self.launchtime.handle_launchtime) # 删除回调
logger.debug("logcat monitor: stopped")
if self.exception_log_list:
self.remove_log_handle(self.handle_exception)
self.device.adb.stop_logcat()
self.running = False
def parse(self, file_path):
pass
def set_exception_list(self,exception_log_list):
self.exception_log_list = exception_log_list
def add_log_handle(self, handle):
'''添加实时日志处理器,每产生一条日志,就调用一次handle
'''
self.device.adb._logcat_handle.append(handle)
def remove_log_handle(self, handle):
'''删除实时日志处理器
'''
self.device.adb._logcat_handle.remove(handle)
def handle_exception(self, log_line):
'''
这个方法在每次有log时回调
:param log_line:最近一条的log 内容
异常日志写一个文件
:return:void
'''
for tag in self.exception_log_list:
if tag in log_line:
logger.debug("exception Info: " + log_line)
tmp_file = os.path.join(RuntimeData.package_save_path, 'exception.log')
with open(tmp_file, 'a+') as f:
f.write(log_line + '\n')
# 这个路径 空格会有影响
process_stack_log_file = os.path.join(RuntimeData.package_save_path, 'process_stack_%s_%s.log' % (
self.package, TimeUtils.getCurrentTimeUnderline()))
# 如果进程挂了,pid会变 ,抓变后进程pid的堆栈没有意义
# self.logmonitor.device.adb.get_process_stack(self.package,process_stack_log_file)
if RuntimeData.old_pid:
self.device.adb.get_process_stack_from_pid(RuntimeData.old_pid, process_stack_log_file)
class LaunchTime(object):
def __init__(self,deviceid, packagename = ""):
# 列表的容积应该不用担心,与系统有一定关系,一般存几十万条数据没问题的
self.launch_list = [("datetime","packagenme/activity","this_time(s)","total_time(s)","launchtype")]
self.packagename = packagename
def handle_launchtime(self, log_line):
'''
这个方法在每次一个启动时间的log产生时回调
:param log_line:最近一条的log 内容
:param tag:启动的方式,是normal的启动,还是自定义方式的启动:fullydrawnlaunch
#如果监控到到fully drawn这样的log,则优先统计这种log,它表示了到起始界面自定义界面的启动时间
:return:void
'''
# logger.debug(log_line)
# add begin by liurui
# 08-28 10:57:30.229 18882 19137 D IC5: CLogProducer == > code = 0, uuid = 4FE71E350379C64611CCD905938C10CA, eventType = performance, eventName = am_activity_launch_timeme, \
# log_time = 2019-08-28 10:57:30.229, contextInfo = {"tag": "am_activity_launch_time", "start_time": "2019-08-28 10:57:16",
# "activity_name_original": "com.android.settings\/.FallbackHome",
# "activity_name": "com.android.settings#com.android.settings.FallbackHome",
# "this_time": "916", "total_time": "916", "start_type": "code_start",
# "gmt_create": "2019-08-28 10:57:16.742", "uploadtime": "2019-08-28 10:57:30.173",
# "boottime": "2019-08-28 10:57:18.502", "firstupload": "2019-08-28 10:57:25.733"}
ltag = ""
if ("am_activity_launch_time" in log_line or "am_activity_fully_drawn_time" in log_line):
# 最近增加的一条如果是启动时间相关的log,那么回调所有注册的_handle
if "am_activity_launch_time" in log_line:
ltag = "normal launch"
elif "am_activity_fully_drawn_time" in log_line:
ltag = "fullydrawn launch"
logger.debug("launchtime log:"+log_line)
if ltag:
content = []
timestamp = time.time()
content.append(TimeUtils.formatTimeStamp(timestamp))
temp_list = log_line.split()[-1].replace("[", "").replace("]", "").split(',')[2:5]
for i in range(len(temp_list)):
content.append(temp_list[i])
content.append(ltag)
logger.debug("Launch Info: "+str(content))
if len(content) == 5:
content = self.trim_value(content)
if content:
self.update_launch_list(content,timestamp)
def trim_value(self, content):
try:
content[2] = ms2s(float(content[2]))#将this_time转化单位转化为s
content[3] = ms2s(float(content[3]))#将total_time 转化为s
except Exception as e:
logger.error(e)
return []
return content
def update_launch_list(self, content,timestamp):
# if self.packagename in content[1]:
self.launch_list.append(content)
tmp_file = os.path.join(RuntimeData.package_save_path, 'launch_logcat.csv')
perf_data = {"task_id":"",'launch_time':[],'cpu':[],"mem":[],
'traffic':[], "fluency":[],'power':[],}
dic = {"time": timestamp,
"act_name": content[1],
"this_time": content[2],
"total_time": content[3],
"launch_type": content[4]}
perf_data['launch_time'].append(dic)
# perf_queue.put(perf_data)
with open(tmp_file,"a+") as f:
csvwriter = csv.writer(f, lineterminator='\n')#这种方式可以去除csv的空行
logger.debug("save launchtime data to csv: " + str(self.launch_list))
csvwriter.writerows(self.launch_list)
del self.launch_list[:]
if __name__ == '__main__':
logcat_monitor = LogcatMonitor("85I7UO4PFQCINJL7", "com.yunos.tv.alitvasr")
# 如果有异常日志标志,才启动这个模块
exceptionlog_list=["fatal exception","has died"]
if exceptionlog_list:
logcat_monitor.set_exception_list(exceptionlog_list)
logcat_monitor.add_log_handle(logcat_monitor.handle_exception)
start_time = TimeUtils.getCurrentTimeUnderline()
RuntimeData.package_save_path = os.path.join(FileUtils.get_top_dir(), 'results', "com.yunos.tv.alitvasr", start_time)
logcat_monitor.start(start_time)
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
from behave import then, when
from tests.ggrc.behave.utils import (
get_resource, get_service_endpoint_url_for_type, handle_get_resource_and_name_it,
check_for_resource_in_collection,
)
@when('Querying "{resource_type}" with "{querystring}"')
def query_resource_collection(context, resource_type, querystring):
url = '{0}?{1}'.format(
get_service_endpoint_url_for_type(context, resource_type),
querystring)
handle_get_resource_and_name_it(context, url, 'queryresultcollection')
@when('Querying "{resource_type}" with bad argument "{querystring}"')
def query_with_bad_argument(context, resource_type, querystring):
url = '{0}?{1}'.format(
get_service_endpoint_url_for_type(context, resource_type),
querystring)
context._response = get_resource(context, url)
@when('Querying "{resource_type}" with expression "{property_path}" equals literal "{value}"')
def query_resource_collection_with_literal(
context, resource_type, property_path, value):
value = eval(value)
query_resource_collection(
context, resource_type, '{0}={1}'.format(property_path, value))
@then('"{resource_name}" is in query result')
def check_resource_in_queryresult(context, resource_name):
check_for_resource_in_collection(
context, 'queryresultcollection', resource_name, True)
@then('"{resource_name}" is not in query result')
def check_resource_not_in_queryresult(context, resource_name):
check_for_resource_in_collection(
context, 'queryresultcollection', resource_name, False)
@then('query result selfLink query string is "{expected_querystring}"')
def check_query_selfLink(context, expected_querystring):
queryresult = context.queryresultcollection
root = queryresult.keys()[0]
selfLink = queryresult[root]['selfLink']
idx = selfLink.find('?')
assert selfLink[idx+1:] == expected_querystring, \
'Expected to find query string {0}, found {1}'.format(
expected_querystring, selfLink[idx+1:])
|
# Copyright (c) 2018, CMCC Technologies Co., Ltd.
# Copyright 2019 ZTE Corporation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
from lcm.ns.serializers.sol.pub_serializers import AffinityOrAntiAffinityRuleSerializer
from lcm.ns.serializers.sol.update_serializers import AddPnfDataSerializer, VnfInstanceDataSerializer, SapDataSerializer
class civicAddressElementSerializer(serializers.Serializer):
caType = serializers.CharField(
help_text="Describe the content type of caValue.",
required=True)
caValue = serializers.CharField(
help_text="Content of civic address element corresponding to the caType.",
required=True)
class LocationConstraintsSerializer(serializers.Serializer):
countryCode = serializers.CharField(
help_text="The two-letter ISO 3166 [29] country code in capital letters.",
required=True)
civicAddressElement = civicAddressElementSerializer(
help_text="Zero or more elements comprising the civic address.",
required=False,
many=True)
class VnfLocationConstraintSerializer(serializers.Serializer):
vnfProfileId = serializers.CharField(
help_text="Identifier (reference to) of a VnfProfile in the NSD used to manage the lifecycle of the VNF instance.",
required=True)
locationConstraints = LocationConstraintsSerializer(
help_text="Defines the location constraints for the VNF instance to be created based on the VNF profile.",
required=True)
class ParamsForVnfSerializer(serializers.Serializer):
vnfProfileId = serializers.CharField(
help_text="Identifier of (reference to) a vnfProfile to which the additional parameters apply.",
required=True)
additionalParams = serializers.DictField(
help_text="Additional parameters that are applied for the VNF instance to be created.",
required=False)
class NestedNsInstanceDataSerializer(serializers.Serializer):
nestedNsInstanceId = serializers.CharField(
help_text="Identifier of the existing nested NS instance to be used in the NS.",
required=True)
nsProfileId = serializers.CharField(
help_text="Identifier of an NsProfile defined in the NSD which the existing nested NS instance shall be matched with.",
required=True)
class InstantNsReqSerializer(serializers.Serializer):
nsFlavourId = serializers.CharField(
help_text="Identifier of the NS deployment flavour to be instantiated.",
required=True)
sapData = SapDataSerializer(
help_text="Create data concerning the SAPs of this NS",
required=False,
many=True)
addpnfData = AddPnfDataSerializer(
help_text="Information on the PNF(s) that are part of this NS.",
required=False,
many=True)
vnfInstanceData = VnfInstanceDataSerializer(
help_text="Specify an existing VNF instance to be used in the NS.",
required=False,
many=True)
nestedNsInstanceData = NestedNsInstanceDataSerializer(
help_text="Specify an existing NS instance to be used as a nested NS within the NS",
required=False,
many=True)
localizationLanguage = VnfLocationConstraintSerializer(
help_text="Defines the location constraints for the VNF to be instantiated as part of the NS instantiation.",
required=False,
many=True)
additionalParamForNs = serializers.DictField(
help_text="Allows the OSS/BSS to provide additional parameters at the NS level ",
required=False,
allow_null=True
)
additionalParamsForVnf = ParamsForVnfSerializer(
help_text="Allows the OSS/BSS to provide additional parameter(s)per VNF instance",
required=False,
many=True)
startTime = serializers.DateTimeField(
help_text="Timestamp indicating the earliest time to instantiate the NS.",
required=False)
nsInstantiationLevelId = serializers.CharField(
help_text="Identifies one of the NS instantiation levels declared in the DF applicable to this NS instance",
required=False)
additionalAffinityOrAntiAffiniityRule = AffinityOrAntiAffinityRuleSerializer(
help_text="Specifies additional affinity or anti-affinity constraint for the VNF instances to be"
" instantiated as part of the NS instantiation.",
required=False,
many=True)
|
/*!
* Angular Material Design
* https://github.com/angular/material
* @license MIT
* v1.1.0-rc4-master-afa1e45
*/
(function( window, angular, undefined ){
"use strict";
(function() {
'use strict';
/**
* @ngdoc module
* @name material.components.fabActions
*/
angular
.module('material.components.fabActions', ['material.core'])
.directive('mdFabActions', MdFabActionsDirective);
/**
* @ngdoc directive
* @name mdFabActions
* @module material.components.fabActions
*
* @restrict E
*
* @description
* The `<md-fab-actions>` directive is used inside of a `<md-fab-speed-dial>` or
* `<md-fab-toolbar>` directive to mark an element (or elements) as the actions and setup the
* proper event listeners.
*
* @usage
* See the `<md-fab-speed-dial>` or `<md-fab-toolbar>` directives for example usage.
*/
function MdFabActionsDirective() {
return {
restrict: 'E',
require: ['^?mdFabSpeedDial', '^?mdFabToolbar'],
compile: function(element, attributes) {
var children = element.children();
var hasNgRepeat = false;
angular.forEach(['', 'data-', 'x-'], function(prefix) {
hasNgRepeat = hasNgRepeat || (children.attr(prefix + 'ng-repeat') ? true : false);
});
// Support both ng-repeat and static content
if (hasNgRepeat) {
children.addClass('md-fab-action-item');
} else {
// Wrap every child in a new div and add a class that we can scale/fling independently
children.wrap('<div class="md-fab-action-item">');
}
}
}
}
})();
})(window, window.angular);
|
#!/usr/bin/env python
import io
import os
from setuptools import find_packages, setup
setup(
name='Appium-Flutter-Finder',
version='0.1.3',
description='An extention of finder for Appium flutter',
long_description=io.open(os.path.join(os.path.dirname('__file__'), 'README.md'), encoding='utf-8').read(),
long_description_content_type='text/markdown',
keywords=[
'appium',
'flutter',
'python client',
'mobile automation'
],
author='Kazuaki Matsuo',
author_email='fly.49.89.over@gmail.com',
url='https://github.com/truongsinh/appium-flutter-driver',
packages=find_packages(include=['appium_flutter_finder*']),
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing'
],
install_requires=['Appium-Python-Client >= 0.4']
)
|
/**
* To show the users who else are working on the board
*/
define(["matisse"],
function(matisse) {
function startGreeting(view) {
var friends = [];
var io = matisse.comm.socket;
function me() {
return matisse.userName;
}
io.on('hello', function(friend) {
if(friends.indexOf(friend) < 0) {
friends.push(friend);
view.showAdded(friend);
view.updateList(friends);
io.emit('hello', me());
}
});
io.on('bye', function(friend) {
if(friends.indexOf(friend) >= 0) {
friends.splice(friends.indexOf(friend), 1);
view.showRemoved(friend);
view.updateList(friends);
}
});
io.on("joined", function () {
io.emit('hello', me());
});
}
var friendsView = function() {
function setList(ul, friends) {
ul.empty();
$.each(friends, function(i, f) {
ul.append('<li>' + f + '</li>');
});
}
function notify(msg) {
setList($('#friendsicon .notify ul'), [msg]);
$('#friendsicon .notify').show().fadeOut(4000);
}
return {
showAdded: function(friend) {
notify(friend + ' joined');
},
showRemoved: function(friend) {
notify(friend + ' left');
},
updateList: function(friends) {
if(friends.length === 0) {
friends = ['<em>Home alone!</em>'];
}
setList($('#friendsicon .all ul'), friends);
}
};
};
return {
init: function() {
startGreeting(friendsView());
}
};
});
|
"use strict";
import _ from "lodash";
import uuidv4 from "uuid/v4";
import passwordValidator from "password-validator";
import { respCodeAndMsg, constants } from "../config";
const { STATUS_CODE } = respCodeAndMsg;
const createErrorObject = (httpStatusCode, message, data) => {
return {
details: {
message: message || respCodeAndMsg.ERROR_MESSAGES.SOMETHING_WRONG,
data: data || {}
},
httpStatusCode: httpStatusCode || STATUS_CODE.BAD_REQUEST
};
};
const createSuccessObject = (httpStatusCode, message, data) => {
return {
details: {
message: message || respCodeAndMsg.SUCCESS_MESSAGES.ACTION_COMPLETE,
data: data || {}
},
httpStatusCode: httpStatusCode || STATUS_CODE.OK
};
};
const getUUID = () => {
return uuidv4();
};
const isEmptyObject = obj => {
return _.isEmpty(obj);
};
const validatePassword = password => {
const passwordSchema = new passwordValidator();
const { PASSWORD_CONSTRAINTS } = constants;
passwordSchema
.is()
.min(PASSWORD_CONSTRAINTS.min) // Minimum length
.is()
.max(PASSWORD_CONSTRAINTS.max) // Maximum length
.has()
.uppercase() // Must have uppercase letters
.has()
.lowercase() // Must have lowercase letters
.has()
.digits() // Must have digits
.has()
.not()
.spaces();
return passwordSchema.validate(password);
};
export {
createErrorObject,
isEmptyObject,
createSuccessObject,
getUUID,
validatePassword
};
|
const { Command } = require('discord-akairo');
class ProgressionCommand extends Command {
constructor() {
super('progression', {
aliases: ['progression'],
category: 'Music',
description: {
content: 'La commande progression permet de voir la progression de la musique',
usage: "progression",
exemples: ['progression'],
},
channel: "guild",
});
}
async exec(message) {
await message.delete();
let guildQueue = this.client.musicPlayer.getQueue(message.guild.id);
const channel = await this.client.functions.checkMusicChannelExistence(message, message.guild, this.client.guildSettings, this.client);
if (channel.allowed) {
if (guildQueue) {
const ProgressBar = guildQueue.createProgressBar({size: 50});
await this.client.log.music.logCommand(message.guildId, message.author, this.id, {progressionBar: ProgressBar.prettier});
message.channel.send(`**Progression de la musique: ** \n${ProgressBar.prettier}`);
} else {
message.channel.send("Aucune musique n'est actuellement entrain d'être jouée!");
}
}
}
}
module.exports = ProgressionCommand;
|
const router = require('express').Router();
const { Comment } = require('../../models');
const withAuth = require('../../utils/auth');
//find all
router.get('/', (req,res) => {
Comment.findAll({})
.then(commentData => res.json(commentData))
.catch(err => {
console.log(err);
res.status(500).json(err)
});
});
router.get('/:id', (req, res) => {
Comment.findAll({
where: {
id: req.params.id
}
})
.then(commentData => res.json(commentData))
.catch(err => {
console.log(err);
res.status(500).json(err);
})
});
//create comment
router.post('/', async (req, res) => {
try {
const newComment = await Comment.create({
...req.body,
userId: req.session.userId,
});
res.json(newComment);
} catch (err) {
res.status(500).json(err);
}
});
//delete comment
router.delete('/:id', withAuth, async (req, res) => {
try {
const commentData = await Comment.destroy({
where: {
id: req.params.id,
user_id: req.session.user_id,
},
});
if (!commentData) {
res.status(404).json({ message: 'No blog found with this id!' });
return;
}
res.status(200).json(commentData);
} catch (err) {
res.status(500).json(err);
}
});
module.exports = router;
|